From c9416a1d3b20a7de4db5cc1b42ed4a6c5880eefe Mon Sep 17 00:00:00 2001 From: Ismail Alidzhikov Date: Wed, 30 Aug 2023 16:17:35 +0300 Subject: [PATCH] Add e2e tests (#30) * Fix the PodDisruptionBudget `apiVersion` `make extension-up` fails with: ``` Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: resource mapping not found for name: "gardener-extension-registry-cache-admission" namespace: "garden" from "": no matches for kind "PodDisruptionBudget" in version "policy/v1beta1" ``` * Drop unused client from the Shoot validator * Prefix the server in `hosts.toml` with `https://` It seems to work also without the `https://` prefix but to be aligned with the upstream docs/examples (ref https://github.com/containerd/containerd/blob/main/docs/hosts.md) and provider-local (ref https://github.com/gardener/gardener/blob/v1.77.0/pkg/provider-local/webhook/controlplane/ensurer.go#L167-L174) I decided to add the prefix for the registry-cache as well. * Add e2e tests --- .gitignore | 3 + Makefile | 24 +- .../templates/poddisruptionbudget.yaml | 2 +- example/shoot.yaml | 6 - go.mod | 7 + go.sum | 20 + hack/ci-e2e-kind.sh | 51 + hack/test-e2e-local.sh | 56 + pkg/admission/validator/shoot.go | 4 +- pkg/admission/validator/webhook.go | 4 +- pkg/controller/registrydeployer.go | 4 +- pkg/utils/registry/registry.go | 8 +- pkg/utils/registry/registry_test.go | 10 +- pkg/webhook/operatingsystemconfig/ensurer.go | 4 +- .../operatingsystemconfig/ensurer_test.go | 8 +- skaffold-admission.yaml | 23 + skaffold.yaml | 23 - test/e2e/common_test.go | 166 +++ test/e2e/create_enable_disable_delete_test.go | 68 + ...enabled_hibernate_reconcile_delete_test.go | 75 + test/e2e/e2e_suite_test.go | 27 + vendor/github.com/dsnet/compress/.travis.yml | 36 + vendor/github.com/dsnet/compress/LICENSE.md | 24 + vendor/github.com/dsnet/compress/README.md | 75 + vendor/github.com/dsnet/compress/api.go | 74 + vendor/github.com/dsnet/compress/bzip2/bwt.go | 110 ++ .../github.com/dsnet/compress/bzip2/common.go | 110 ++ .../dsnet/compress/bzip2/fuzz_off.go | 13 + .../dsnet/compress/bzip2/fuzz_on.go | 77 + .../compress/bzip2/internal/sais/common.go | 28 + .../compress/bzip2/internal/sais/sais_byte.go | 661 +++++++++ .../compress/bzip2/internal/sais/sais_int.go | 661 +++++++++ .../dsnet/compress/bzip2/mtf_rle2.go | 131 ++ .../github.com/dsnet/compress/bzip2/prefix.go | 374 +++++ .../github.com/dsnet/compress/bzip2/reader.go | 274 ++++ .../github.com/dsnet/compress/bzip2/rle1.go | 101 ++ .../github.com/dsnet/compress/bzip2/writer.go | 307 ++++ .../dsnet/compress/internal/common.go | 107 ++ .../dsnet/compress/internal/debug.go | 12 + .../dsnet/compress/internal/errors/errors.go | 120 ++ .../dsnet/compress/internal/gofuzz.go | 12 + .../dsnet/compress/internal/prefix/debug.go | 159 +++ .../dsnet/compress/internal/prefix/decoder.go | 136 ++ .../dsnet/compress/internal/prefix/encoder.go | 66 + .../dsnet/compress/internal/prefix/prefix.go | 400 ++++++ .../dsnet/compress/internal/prefix/range.go | 93 ++ .../dsnet/compress/internal/prefix/reader.go | 335 +++++ .../dsnet/compress/internal/prefix/wrap.go | 146 ++ .../dsnet/compress/internal/prefix/writer.go | 166 +++ .../dsnet/compress/internal/release.go | 21 + vendor/github.com/dsnet/compress/zbench.sh | 12 + vendor/github.com/dsnet/compress/zfuzz.sh | 10 + vendor/github.com/dsnet/compress/zprof.sh | 54 + vendor/github.com/dsnet/compress/ztest.sh | 54 + .../gardener/pkg/apis/authentication/doc.go | 21 + .../pkg/apis/authentication/register.go | 53 + .../types_adminkubeconfigrequest.go | 54 + .../apis/authentication/v1alpha1/defaults.go | 33 + .../pkg/apis/authentication/v1alpha1/doc.go | 28 + .../authentication/v1alpha1/generated.pb.go | 813 +++++++++++ .../authentication/v1alpha1/generated.proto | 62 + .../apis/authentication/v1alpha1/register.go | 53 + .../v1alpha1/types_adminkubeconfigrequest.go | 55 + .../v1alpha1/zz_generated.conversion.go | 149 ++ .../v1alpha1/zz_generated.deepcopy.go | 97 ++ .../v1alpha1/zz_generated.defaults.go | 38 + .../authentication/zz_generated.deepcopy.go | 92 ++ .../gardener/test/framework/cleanup.go | 80 ++ .../gardener/test/framework/common.go | 55 + .../test/framework/commonframework.go | 150 ++ .../gardener/gardener/test/framework/dump.go | 416 ++++++ .../gardener/test/framework/errors.go | 28 + .../gardener/test/framework/gardener_utils.go | 522 +++++++ .../test/framework/gardenerframework.go | 165 +++ .../gardener/test/framework/gingko_utils.go | 66 + .../gardener/test/framework/gomega_utils.go | 24 + .../gardener/gardener/test/framework/helm.go | 247 ++++ .../gardener/test/framework/http_utils.go | 84 ++ .../gardener/test/framework/k8s_utils.go | 591 ++++++++ .../gardener/test/framework/pod_executor.go | 78 ++ .../test/framework/rootpod_executor.go | 125 ++ .../gardener/test/framework/shoot_utils.go | 393 ++++++ .../test/framework/shootcreationframework.go | 510 +++++++ .../gardener/test/framework/shootframework.go | 325 +++++ .../test/framework/shootmigrationtest.go | 491 +++++++ .../gardener/test/framework/template.go | 53 + .../test/framework/test_description.go | 120 ++ .../gardener/test/framework/test_options.go | 125 ++ .../gardener/gardener/test/framework/utils.go | 209 +++ .../gardener/test/framework/worker_utils.go | 169 +++ .../test/utils/access/adminkubeconfig.go | 54 + .../gardener/test/utils/access/csr.go | 147 ++ .../test/utils/access/serviceaccount.go | 175 +++ .../utils/access/statictokenkubeconfig.go | 30 + vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 18 + vendor/github.com/golang/snappy/CONTRIBUTORS | 41 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 ++ vendor/github.com/golang/snappy/decode.go | 264 ++++ .../github.com/golang/snappy/decode_amd64.s | 490 +++++++ .../github.com/golang/snappy/decode_arm64.s | 494 +++++++ vendor/github.com/golang/snappy/decode_asm.go | 15 + .../github.com/golang/snappy/decode_other.go | 115 ++ vendor/github.com/golang/snappy/encode.go | 289 ++++ .../github.com/golang/snappy/encode_amd64.s | 730 ++++++++++ .../github.com/golang/snappy/encode_arm64.s | 722 ++++++++++ vendor/github.com/golang/snappy/encode_asm.go | 30 + .../github.com/golang/snappy/encode_other.go | 238 ++++ vendor/github.com/golang/snappy/snappy.go | 98 ++ vendor/github.com/mholt/archiver/.gitignore | 5 + vendor/github.com/mholt/archiver/.travis.yml | 21 + vendor/github.com/mholt/archiver/LICENSE | 21 + vendor/github.com/mholt/archiver/README.md | 255 ++++ vendor/github.com/mholt/archiver/appveyor.yml | 31 + vendor/github.com/mholt/archiver/archiver.go | 498 +++++++ vendor/github.com/mholt/archiver/build.bash | 17 + vendor/github.com/mholt/archiver/bz2.go | 64 + .../mholt/archiver/filecompressor.go | 67 + vendor/github.com/mholt/archiver/gz.go | 61 + vendor/github.com/mholt/archiver/lz4.go | 56 + vendor/github.com/mholt/archiver/rar.go | 390 ++++++ vendor/github.com/mholt/archiver/sz.go | 51 + vendor/github.com/mholt/archiver/tar.go | 605 ++++++++ vendor/github.com/mholt/archiver/tarbz2.go | 126 ++ vendor/github.com/mholt/archiver/targz.go | 124 ++ vendor/github.com/mholt/archiver/tarlz4.go | 122 ++ vendor/github.com/mholt/archiver/tarsz.go | 114 ++ vendor/github.com/mholt/archiver/tarxz.go | 119 ++ vendor/github.com/mholt/archiver/xz.go | 58 + vendor/github.com/mholt/archiver/zip.go | 575 ++++++++ vendor/github.com/nwaples/rardecode/LICENSE | 23 + vendor/github.com/nwaples/rardecode/README.md | 4 + .../github.com/nwaples/rardecode/archive.go | 342 +++++ .../github.com/nwaples/rardecode/archive15.go | 468 +++++++ .../github.com/nwaples/rardecode/archive50.go | 475 +++++++ .../nwaples/rardecode/bit_reader.go | 119 ++ .../github.com/nwaples/rardecode/decode29.go | 264 ++++ .../nwaples/rardecode/decode29_lz.go | 247 ++++ .../nwaples/rardecode/decode29_ppm.go | 132 ++ .../github.com/nwaples/rardecode/decode50.go | 294 ++++ .../nwaples/rardecode/decode_reader.go | 290 ++++ .../nwaples/rardecode/decrypt_reader.go | 126 ++ .../github.com/nwaples/rardecode/filters.go | 416 ++++++ .../github.com/nwaples/rardecode/huffman.go | 208 +++ .../github.com/nwaples/rardecode/ppm_model.go | 1096 +++++++++++++++ vendor/github.com/nwaples/rardecode/reader.go | 376 +++++ vendor/github.com/nwaples/rardecode/vm.go | 687 +++++++++ vendor/github.com/pierrec/lz4/.gitignore | 34 + vendor/github.com/pierrec/lz4/.travis.yml | 24 + vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 90 ++ vendor/github.com/pierrec/lz4/block.go | 413 ++++++ vendor/github.com/pierrec/lz4/debug.go | 23 + vendor/github.com/pierrec/lz4/debug_stub.go | 7 + vendor/github.com/pierrec/lz4/decode_amd64.go | 8 + vendor/github.com/pierrec/lz4/decode_amd64.s | 375 +++++ vendor/github.com/pierrec/lz4/decode_other.go | 98 ++ vendor/github.com/pierrec/lz4/errors.go | 30 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 +++ vendor/github.com/pierrec/lz4/lz4.go | 116 ++ vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 335 +++++ .../github.com/pierrec/lz4/reader_legacy.go | 207 +++ vendor/github.com/pierrec/lz4/writer.go | 422 ++++++ .../github.com/pierrec/lz4/writer_legacy.go | 182 +++ vendor/github.com/ulikunitz/xz/.gitignore | 25 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 73 + vendor/github.com/ulikunitz/xz/SECURITY.md | 10 + vendor/github.com/ulikunitz/xz/TODO.md | 363 +++++ vendor/github.com/ulikunitz/xz/bits.go | 79 ++ vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 ++++++++++ .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 +++ .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 457 ++++++ .../github.com/ulikunitz/xz/lzma/bintree.go | 522 +++++++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 +++ .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 ++++ .../ulikunitz/xz/lzma/decoderdict.go | 128 ++ .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 ++ .../github.com/ulikunitz/xz/lzma/encoder.go | 268 ++++ .../ulikunitz/xz/lzma/encoderdict.go | 149 ++ vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 +++++ vendor/github.com/ulikunitz/xz/lzma/header.go | 167 +++ .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++++++ .../ulikunitz/xz/lzma/lengthcodec.go | 116 ++ .../ulikunitz/xz/lzma/literalcodec.go | 125 ++ .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 +++ vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 ++ .../github.com/ulikunitz/xz/lzma/reader2.go | 231 +++ vendor/github.com/ulikunitz/xz/lzma/state.go | 145 ++ .../ulikunitz/xz/lzma/treecodecs.go | 133 ++ vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 +++ .../github.com/ulikunitz/xz/lzma/writer2.go | 305 ++++ vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 ++ vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 +++++ vendor/github.com/ulikunitz/xz/writer.go | 399 ++++++ vendor/github.com/xi2/xz/AUTHORS | 8 + vendor/github.com/xi2/xz/LICENSE | 18 + vendor/github.com/xi2/xz/README.md | 10 + vendor/github.com/xi2/xz/dec_bcj.go | 461 ++++++ vendor/github.com/xi2/xz/dec_delta.go | 55 + vendor/github.com/xi2/xz/dec_lzma2.go | 1235 +++++++++++++++++ vendor/github.com/xi2/xz/dec_stream.go | 932 +++++++++++++ vendor/github.com/xi2/xz/dec_util.go | 52 + vendor/github.com/xi2/xz/dec_xz.go | 124 ++ vendor/github.com/xi2/xz/doc.go | 35 + vendor/github.com/xi2/xz/reader.go | 256 ++++ vendor/golang.org/x/crypto/cast5/cast5.go | 536 +++++++ .../x/crypto/openpgp/armor/armor.go | 232 ++++ .../x/crypto/openpgp/armor/encode.go | 161 +++ .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/clearsign/clearsign.go | 424 ++++++ .../x/crypto/openpgp/elgamal/elgamal.go | 130 ++ .../x/crypto/openpgp/errors/errors.go | 78 ++ vendor/golang.org/x/crypto/openpgp/keys.go | 693 +++++++++ .../x/crypto/openpgp/packet/compressed.go | 123 ++ .../x/crypto/openpgp/packet/config.go | 91 ++ .../x/crypto/openpgp/packet/encrypted_key.go | 208 +++ .../x/crypto/openpgp/packet/literal.go | 89 ++ .../x/crypto/openpgp/packet/ocfb.go | 143 ++ .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 161 +++ .../x/crypto/openpgp/packet/packet.go | 590 ++++++++ .../x/crypto/openpgp/packet/private_key.go | 384 +++++ .../x/crypto/openpgp/packet/public_key.go | 753 ++++++++++ .../x/crypto/openpgp/packet/public_key_v3.go | 279 ++++ .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 ++++++++++ .../x/crypto/openpgp/packet/signature_v3.go | 146 ++ .../openpgp/packet/symmetric_key_encrypted.go | 155 +++ .../openpgp/packet/symmetrically_encrypted.go | 290 ++++ .../x/crypto/openpgp/packet/userattribute.go | 90 ++ .../x/crypto/openpgp/packet/userid.go | 159 +++ vendor/golang.org/x/crypto/openpgp/read.go | 448 ++++++ vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 ++++ vendor/golang.org/x/crypto/openpgp/write.go | 418 ++++++ .../client-go/tools/watch/informerwatcher.go | 150 ++ .../client-go/tools/watch/retrywatcher.go | 296 ++++ vendor/k8s.io/client-go/tools/watch/until.go | 169 +++ .../client-go/util/certificate/csr/csr.go | 364 +++++ .../helm/internal/third_party/dep/fs/fs.go | 373 +++++ .../internal/third_party/dep/fs/rename.go | 58 + .../third_party/dep/fs/rename_windows.go | 69 + .../helm/pkg/downloader/chart_downloader.go | 365 +++++ vendor/k8s.io/helm/pkg/downloader/doc.go | 23 + vendor/k8s.io/helm/pkg/downloader/manager.go | 752 ++++++++++ vendor/k8s.io/helm/pkg/getter/doc.go | 21 + vendor/k8s.io/helm/pkg/getter/getter.go | 98 ++ vendor/k8s.io/helm/pkg/getter/httpgetter.go | 97 ++ vendor/k8s.io/helm/pkg/getter/plugingetter.go | 99 ++ .../helm/pkg/helm/environment/environment.go | 168 +++ .../k8s.io/helm/pkg/helm/helmpath/helmhome.go | 103 ++ vendor/k8s.io/helm/pkg/plugin/hooks.go | 35 + vendor/k8s.io/helm/pkg/plugin/plugin.go | 200 +++ vendor/k8s.io/helm/pkg/provenance/doc.go | 37 + vendor/k8s.io/helm/pkg/provenance/sign.go | 411 ++++++ vendor/k8s.io/helm/pkg/repo/chartrepo.go | 284 ++++ vendor/k8s.io/helm/pkg/repo/doc.go | 93 ++ vendor/k8s.io/helm/pkg/repo/index.go | 342 +++++ vendor/k8s.io/helm/pkg/repo/local.go | 137 ++ vendor/k8s.io/helm/pkg/repo/repo.go | 156 +++ vendor/k8s.io/helm/pkg/resolver/resolver.go | 177 +++ vendor/k8s.io/helm/pkg/tlsutil/cfg.go | 89 ++ vendor/k8s.io/helm/pkg/tlsutil/tls.go | 97 ++ vendor/k8s.io/helm/pkg/urlutil/urlutil.go | 87 ++ vendor/modules.txt | 55 + 285 files changed, 51886 insertions(+), 56 deletions(-) create mode 100755 hack/ci-e2e-kind.sh create mode 100755 hack/test-e2e-local.sh create mode 100644 skaffold-admission.yaml create mode 100644 test/e2e/common_test.go create mode 100644 test/e2e/create_enable_disable_delete_test.go create mode 100644 test/e2e/create_enabled_hibernate_reconcile_delete_test.go create mode 100644 test/e2e/e2e_suite_test.go create mode 100644 vendor/github.com/dsnet/compress/.travis.yml create mode 100644 vendor/github.com/dsnet/compress/LICENSE.md create mode 100644 vendor/github.com/dsnet/compress/README.md create mode 100644 vendor/github.com/dsnet/compress/api.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/bwt.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_off.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/fuzz_on.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/prefix.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/reader.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/rle1.go create mode 100644 vendor/github.com/dsnet/compress/bzip2/writer.go create mode 100644 vendor/github.com/dsnet/compress/internal/common.go create mode 100644 vendor/github.com/dsnet/compress/internal/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/errors/errors.go create mode 100644 vendor/github.com/dsnet/compress/internal/gofuzz.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/debug.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/decoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/encoder.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/prefix.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/range.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/reader.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/wrap.go create mode 100644 vendor/github.com/dsnet/compress/internal/prefix/writer.go create mode 100644 vendor/github.com/dsnet/compress/internal/release.go create mode 100644 vendor/github.com/dsnet/compress/zbench.sh create mode 100644 vendor/github.com/dsnet/compress/zfuzz.sh create mode 100644 vendor/github.com/dsnet/compress/zprof.sh create mode 100644 vendor/github.com/dsnet/compress/ztest.sh create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/types_adminkubeconfigrequest.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.pb.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/types_adminkubeconfigrequest.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/authentication/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/cleanup.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/common.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/commonframework.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/dump.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/errors.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/gardener_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/gardenerframework.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/gingko_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/gomega_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/helm.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/http_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/k8s_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/pod_executor.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/rootpod_executor.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/shoot_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/shootcreationframework.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/shootframework.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/shootmigrationtest.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/template.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/test_description.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/test_options.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/utils.go create mode 100644 vendor/github.com/gardener/gardener/test/framework/worker_utils.go create mode 100644 vendor/github.com/gardener/gardener/test/utils/access/adminkubeconfig.go create mode 100644 vendor/github.com/gardener/gardener/test/utils/access/csr.go create mode 100644 vendor/github.com/gardener/gardener/test/utils/access/serviceaccount.go create mode 100644 vendor/github.com/gardener/gardener/test/utils/access/statictokenkubeconfig.go create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_arm64.s create mode 100644 vendor/github.com/golang/snappy/decode_asm.go create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s create mode 100644 vendor/github.com/golang/snappy/encode_asm.go create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/mholt/archiver/.gitignore create mode 100644 vendor/github.com/mholt/archiver/.travis.yml create mode 100644 vendor/github.com/mholt/archiver/LICENSE create mode 100644 vendor/github.com/mholt/archiver/README.md create mode 100644 vendor/github.com/mholt/archiver/appveyor.yml create mode 100644 vendor/github.com/mholt/archiver/archiver.go create mode 100644 vendor/github.com/mholt/archiver/build.bash create mode 100644 vendor/github.com/mholt/archiver/bz2.go create mode 100644 vendor/github.com/mholt/archiver/filecompressor.go create mode 100644 vendor/github.com/mholt/archiver/gz.go create mode 100644 vendor/github.com/mholt/archiver/lz4.go create mode 100644 vendor/github.com/mholt/archiver/rar.go create mode 100644 vendor/github.com/mholt/archiver/sz.go create mode 100644 vendor/github.com/mholt/archiver/tar.go create mode 100644 vendor/github.com/mholt/archiver/tarbz2.go create mode 100644 vendor/github.com/mholt/archiver/targz.go create mode 100644 vendor/github.com/mholt/archiver/tarlz4.go create mode 100644 vendor/github.com/mholt/archiver/tarsz.go create mode 100644 vendor/github.com/mholt/archiver/tarxz.go create mode 100644 vendor/github.com/mholt/archiver/xz.go create mode 100644 vendor/github.com/mholt/archiver/zip.go create mode 100644 vendor/github.com/nwaples/rardecode/LICENSE create mode 100644 vendor/github.com/nwaples/rardecode/README.md create mode 100644 vendor/github.com/nwaples/rardecode/archive.go create mode 100644 vendor/github.com/nwaples/rardecode/archive15.go create mode 100644 vendor/github.com/nwaples/rardecode/archive50.go create mode 100644 vendor/github.com/nwaples/rardecode/bit_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29_lz.go create mode 100644 vendor/github.com/nwaples/rardecode/decode29_ppm.go create mode 100644 vendor/github.com/nwaples/rardecode/decode50.go create mode 100644 vendor/github.com/nwaples/rardecode/decode_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/decrypt_reader.go create mode 100644 vendor/github.com/nwaples/rardecode/filters.go create mode 100644 vendor/github.com/nwaples/rardecode/huffman.go create mode 100644 vendor/github.com/nwaples/rardecode/ppm_model.go create mode 100644 vendor/github.com/nwaples/rardecode/reader.go create mode 100644 vendor/github.com/nwaples/rardecode/vm.go create mode 100644 vendor/github.com/pierrec/lz4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/errors.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/xi2/xz/AUTHORS create mode 100644 vendor/github.com/xi2/xz/LICENSE create mode 100644 vendor/github.com/xi2/xz/README.md create mode 100644 vendor/github.com/xi2/xz/dec_bcj.go create mode 100644 vendor/github.com/xi2/xz/dec_delta.go create mode 100644 vendor/github.com/xi2/xz/dec_lzma2.go create mode 100644 vendor/github.com/xi2/xz/dec_stream.go create mode 100644 vendor/github.com/xi2/xz/dec_util.go create mode 100644 vendor/github.com/xi2/xz/dec_xz.go create mode 100644 vendor/github.com/xi2/xz/doc.go create mode 100644 vendor/github.com/xi2/xz/reader.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/k8s.io/client-go/tools/watch/informerwatcher.go create mode 100644 vendor/k8s.io/client-go/tools/watch/retrywatcher.go create mode 100644 vendor/k8s.io/client-go/tools/watch/until.go create mode 100644 vendor/k8s.io/client-go/util/certificate/csr/csr.go create mode 100644 vendor/k8s.io/helm/internal/third_party/dep/fs/fs.go create mode 100644 vendor/k8s.io/helm/internal/third_party/dep/fs/rename.go create mode 100644 vendor/k8s.io/helm/internal/third_party/dep/fs/rename_windows.go create mode 100644 vendor/k8s.io/helm/pkg/downloader/chart_downloader.go create mode 100644 vendor/k8s.io/helm/pkg/downloader/doc.go create mode 100644 vendor/k8s.io/helm/pkg/downloader/manager.go create mode 100644 vendor/k8s.io/helm/pkg/getter/doc.go create mode 100644 vendor/k8s.io/helm/pkg/getter/getter.go create mode 100644 vendor/k8s.io/helm/pkg/getter/httpgetter.go create mode 100644 vendor/k8s.io/helm/pkg/getter/plugingetter.go create mode 100644 vendor/k8s.io/helm/pkg/helm/environment/environment.go create mode 100644 vendor/k8s.io/helm/pkg/helm/helmpath/helmhome.go create mode 100644 vendor/k8s.io/helm/pkg/plugin/hooks.go create mode 100644 vendor/k8s.io/helm/pkg/plugin/plugin.go create mode 100644 vendor/k8s.io/helm/pkg/provenance/doc.go create mode 100644 vendor/k8s.io/helm/pkg/provenance/sign.go create mode 100644 vendor/k8s.io/helm/pkg/repo/chartrepo.go create mode 100644 vendor/k8s.io/helm/pkg/repo/doc.go create mode 100644 vendor/k8s.io/helm/pkg/repo/index.go create mode 100644 vendor/k8s.io/helm/pkg/repo/local.go create mode 100644 vendor/k8s.io/helm/pkg/repo/repo.go create mode 100644 vendor/k8s.io/helm/pkg/resolver/resolver.go create mode 100644 vendor/k8s.io/helm/pkg/tlsutil/cfg.go create mode 100644 vendor/k8s.io/helm/pkg/tlsutil/tls.go create mode 100644 vendor/k8s.io/helm/pkg/urlutil/urlutil.go diff --git a/.gitignore b/.gitignore index 0c47a1f2..9695f1b4 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ /bin /hack/tools/bin +*.test *.coverprofile *.html .vscode @@ -20,3 +21,5 @@ TODO .fuse_hidden* .go-version + +/gardener diff --git a/Makefile b/Makefile index c0fdc71a..9dc2ed62 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,7 @@ VERSION := $(shell cat "$(REPO_ROOT)/VERSION") LD_FLAGS := "-w -X github.com/gardener/$(EXTENSION_PREFIX)-$(NAME)/pkg/version.Version=$(IMAGE_TAG)" LEADER_ELECTION := false IGNORE_OPERATION_ANNOTATION := true +PARALLEL_E2E_TESTS := 2 WEBHOOK_CONFIG_PORT := 8444 @@ -122,7 +123,7 @@ check-generate: .PHONY: check check: $(GOIMPORTS) $(GOLANGCI_LINT) $(HELM) - @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check.sh --golangci-lint-config=./.golangci.yaml ./cmd/... ./pkg/... + @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check.sh --golangci-lint-config=./.golangci.yaml ./cmd/... ./pkg/... ./test/... @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/check-charts.sh ./charts .PHONY: generate @@ -139,7 +140,7 @@ generate-in-docker: .PHONY: format format: $(GOIMPORTS) $(GOIMPORTSREVISER) - @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/format.sh ./cmd ./pkg + @$(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/format.sh ./cmd ./pkg ./test .PHONY: test test: @@ -159,6 +160,12 @@ verify: check format test .PHONY: verify-extended verify-extended: check-generate check format test-cov test-clean +test-e2e-local: $(GINKGO) + ./hack/test-e2e-local.sh --procs=$(PARALLEL_E2E_TESTS) ./test/e2e/... + +ci-e2e-kind: + ./hack/ci-e2e-kind.sh + # use static label for skaffold to prevent rolling all gardener components on every `skaffold` invocation extension-up extension-down: export SKAFFOLD_LABEL = skaffold.dev/run-id=extension-local @@ -170,3 +177,16 @@ extension-dev: $(SKAFFOLD) $(HELM) extension-down: $(SKAFFOLD) $(HELM) $(SKAFFOLD) delete + +# use static label for skaffold to prevent rolling all gardener components on every `skaffold` invocation +admission-up admission-down: export SKAFFOLD_LABEL = skaffold.dev/run-id=admission-local +admission-%: export SKAFFOLD_FILENAME = skaffold-admission.yaml + +admission-up: $(SKAFFOLD) $(HELM) + $(SKAFFOLD) run + +admission-dev: $(SKAFFOLD) $(HELM) + $(SKAFFOLD) dev --cleanup=false --trigger=manual + +admission-down: $(SKAFFOLD) $(HELM) + $(SKAFFOLD) delete diff --git a/charts/admission/charts/runtime/templates/poddisruptionbudget.yaml b/charts/admission/charts/runtime/templates/poddisruptionbudget.yaml index 4652800b..7f4c3ee2 100644 --- a/charts/admission/charts/runtime/templates/poddisruptionbudget.yaml +++ b/charts/admission/charts/runtime/templates/poddisruptionbudget.yaml @@ -1,4 +1,4 @@ -apiVersion: policy/v1beta1 +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "name" . }} diff --git a/example/shoot.yaml b/example/shoot.yaml index a929c6ef..38e7e82c 100644 --- a/example/shoot.yaml +++ b/example/shoot.yaml @@ -25,12 +25,6 @@ spec: garbageCollectionEnabled: false networking: type: calico - providerConfig: - apiVersion: calico.networking.extensions.gardener.cloud/v1alpha1 - kind: NetworkConfig - backend: none - typha: - enabled: false provider: type: local workers: diff --git a/go.mod b/go.mod index 7a61a5fc..f474118a 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cyphar/filepath-securejoin v0.2.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dsnet/compress v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -53,6 +54,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect @@ -71,6 +73,7 @@ require ( github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/mholt/archiver v3.1.1+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -78,6 +81,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nwaples/rardecode v1.1.2 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -86,6 +91,8 @@ require ( github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.8.2 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/mock v0.2.0 // indirect go.uber.org/multierr v1.7.0 // indirect diff --git a/go.sum b/go.sum index 5b6677f0..f1d6a396 100644 --- a/go.sum +++ b/go.sum @@ -107,6 +107,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8= github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= @@ -132,6 +135,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fluent/fluent-operator/v2 v2.2.0 h1:97CiP6WKOHRM7zY/zCynX187Rg+T8hgx2JzD2iuJof8= github.com/fluent/fluent-operator/v2 v2.2.0/go.mod h1:v/q0zLEOWP6MKHP7xvrhtASZTwlrk4LcCne/kgPQ7J0= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -230,6 +235,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= @@ -311,6 +318,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -342,6 +351,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= +github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= @@ -364,6 +375,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M= +github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -378,6 +391,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -445,6 +460,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/hack/ci-e2e-kind.sh b/hack/ci-e2e-kind.sh new file mode 100755 index 00000000..e94757ec --- /dev/null +++ b/hack/ci-e2e-kind.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o nounset +set -o pipefail +set -o errexit + +clamp_mss_to_pmtu() { + # https://github.com/kubernetes/test-infra/issues/23741 + if [[ "$OSTYPE" != "darwin"* ]]; then + iptables -t mangle -A POSTROUTING -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu + fi +} + +REPO_ROOT="$(readlink -f $(dirname ${0})/..)" +GARDENER_VERSION=$(go list -m -f '{{.Version}}' github.com/gardener/gardener) + +if [[ ! -d "$REPO_ROOT/gardener" ]]; then + git clone --branch $GARDENER_VERSION https://github.com/gardener/gardener.git +else + (cd "$REPO_ROOT/gardener" && git checkout $GARDENER_VERSION) +fi + +clamp_mss_to_pmtu + +make -C "$REPO_ROOT/gardener" kind-up +export KUBECONFIG=$REPO_ROOT/gardener/example/gardener-local/kind/local/kubeconfig + +trap '{ + make -C "$REPO_ROOT/gardener" kind-down +}' EXIT + +make -C "$REPO_ROOT/gardener" gardener-up +make extension-up +make test-e2e-local +# TODO: make extension-down currently fails. When we fix it, we can execute it during the tear down. +# make extension-down +make -C "$REPO_ROOT/gardener" gardener-down diff --git a/hack/test-e2e-local.sh b/hack/test-e2e-local.sh new file mode 100755 index 00000000..7066c56a --- /dev/null +++ b/hack/test-e2e-local.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +echo "> E2E Tests" + +# We have to make the shoot domains accessible. +seed_name="local" + +shoot_names=( + e2e-default.local + e2e-hib.local +) + +if [ -n "${CI:-}" ]; then + for shoot in "${shoot_names[@]}" ; do + printf "\n127.0.0.1 api.%s.external.local.gardener.cloud\n127.0.0.1 api.%s.internal.local.gardener.cloud\n" $shoot $shoot >>/etc/hosts + done +else + missing_entries=() + + for shoot in "${shoot_names[@]}"; do + for ip in internal external; do + if ! grep -q -x "127.0.0.1 api.$shoot.$ip.local.gardener.cloud" /etc/hosts; then + missing_entries+=("127.0.0.1 api.$shoot.$ip.local.gardener.cloud") + fi + done + done + + if [ ${#missing_entries[@]} -gt 0 ]; then + printf "Hostnames for the following Shoots are missing in /etc/hosts:\n" + for entry in "${missing_entries[@]}"; do + printf " - %s\n" "$entry" + done + printf "To access shoot clusters and run e2e tests, you have to extend your /etc/hosts file.\nPlease refer to https://github.com/gardener/gardener/blob/master/docs/deployment/getting_started_locally.md#accessing-the-shoot-cluster\n\n" + exit 1 + fi +fi + +GO111MODULE=on ginkgo run --timeout=1h --v --show-node-events "$@" diff --git a/pkg/admission/validator/shoot.go b/pkg/admission/validator/shoot.go index 50992a2c..b47049ca 100644 --- a/pkg/admission/validator/shoot.go +++ b/pkg/admission/validator/shoot.go @@ -30,14 +30,12 @@ import ( // shoot validates shoots type shoot struct { - client client.Client decoder runtime.Decoder } // NewShootValidator returns a new instance of a shoot validator. -func NewShootValidator(client client.Client, decoder runtime.Decoder) extensionswebhook.Validator { +func NewShootValidator(decoder runtime.Decoder) extensionswebhook.Validator { return &shoot{ - client: client, decoder: decoder, } } diff --git a/pkg/admission/validator/webhook.go b/pkg/admission/validator/webhook.go index 1ff75fb3..47d2721d 100644 --- a/pkg/admission/validator/webhook.go +++ b/pkg/admission/validator/webhook.go @@ -27,8 +27,6 @@ import ( const ( // Name is a name for a validation webhook. Name = "validator" - // SecretsValidatorName is the name of the secrets validator. - SecretsValidatorName = "secrets." + Name ) var logger = log.Log.WithName("registry-cache-validator-webhook") @@ -44,7 +42,7 @@ func New(mgr manager.Manager) (*extensionswebhook.Webhook, error) { Name: Name, Path: "/webhooks/validate", Validators: map[extensionswebhook.Validator][]extensionswebhook.Type{ - NewShootValidator(mgr.GetClient(), decoder): {{Obj: &core.Shoot{}}}, + NewShootValidator(decoder): {{Obj: &core.Shoot{}}}, }, }) } diff --git a/pkg/controller/registrydeployer.go b/pkg/controller/registrydeployer.go index f0d997a4..a770d053 100644 --- a/pkg/controller/registrydeployer.go +++ b/pkg/controller/registrydeployer.go @@ -67,8 +67,6 @@ func (c *registryCache) Ensure() ([]client.Object, error) { c.Labels[registryCacheServiceUpstreamLabel] = c.Upstream - upstreamURL := fmt.Sprintf("https://%s", registryutils.GetUpstreamServer(c.Upstream)) - var ( service = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -119,7 +117,7 @@ func (c *registryCache) Ensure() ([]client.Object, error) { Env: []corev1.EnvVar{ { Name: environmentVarialbleNameRegistryURL, - Value: upstreamURL, + Value: registryutils.GetUpstreamURL(c.Upstream), }, { Name: environmentVarialbleNameRegistryDelete, diff --git a/pkg/utils/registry/registry.go b/pkg/utils/registry/registry.go index e9b781e5..19cbd663 100644 --- a/pkg/utils/registry/registry.go +++ b/pkg/utils/registry/registry.go @@ -14,11 +14,11 @@ package registry -// GetUpstreamServer returns the upstream server by given upstream. -func GetUpstreamServer(upstream string) string { +// GetUpstreamURL returns the upstream URL by given upstream. +func GetUpstreamURL(upstream string) string { if upstream == "docker.io" { - return "registry-1.docker.io" + return "https://registry-1.docker.io" } - return upstream + return "https://" + upstream } diff --git a/pkg/utils/registry/registry_test.go b/pkg/utils/registry/registry_test.go index 30ddfb68..e7e72c32 100644 --- a/pkg/utils/registry/registry_test.go +++ b/pkg/utils/registry/registry_test.go @@ -30,12 +30,12 @@ func TestRegistryUtils(t *testing.T) { var _ = Describe("Registry utils", func() { - DescribeTable("#GetUpstreamServer", + DescribeTable("#GetUpstreamURL", func(upstream, expected string) { - Expect(registryutils.GetUpstreamServer(upstream)).To(Equal(expected)) + Expect(registryutils.GetUpstreamURL(upstream)).To(Equal(expected)) }, - Entry("upstream is docker.io", "docker.io", "registry-1.docker.io"), - Entry("upstream is eu.gcr.io", "eu.gcr.io", "eu.gcr.io"), - Entry("upstream is quay.io", "quay.io", "quay.io"), + Entry("upstream is docker.io", "docker.io", "https://registry-1.docker.io"), + Entry("upstream is eu.gcr.io", "eu.gcr.io", "https://eu.gcr.io"), + Entry("upstream is quay.io", "quay.io", "https://quay.io"), ) }) diff --git a/pkg/webhook/operatingsystemconfig/ensurer.go b/pkg/webhook/operatingsystemconfig/ensurer.go index 58831bde..ad69436d 100644 --- a/pkg/webhook/operatingsystemconfig/ensurer.go +++ b/pkg/webhook/operatingsystemconfig/ensurer.go @@ -100,14 +100,14 @@ func (e *ensurer) EnsureAdditionalFiles(ctx context.Context, gctx gcontext.Garde } for _, cache := range registryStatus.Caches { - upstreamServer := registryutils.GetUpstreamServer(cache.Upstream) + upstreamURL := registryutils.GetUpstreamURL(cache.Upstream) appendUniqueFile(new, extensionsv1alpha1.File{ Path: filepath.Join(containerdRegistryHostsDirectory, cache.Upstream, "hosts.toml"), Permissions: pointer.Int32(0644), Content: extensionsv1alpha1.FileContent{ Inline: &extensionsv1alpha1.FileContentInline{ - Data: fmt.Sprintf(hostsTOMLTemplate, upstreamServer, cache.Endpoint), + Data: fmt.Sprintf(hostsTOMLTemplate, upstreamURL, cache.Endpoint), }, }, }) diff --git a/pkg/webhook/operatingsystemconfig/ensurer_test.go b/pkg/webhook/operatingsystemconfig/ensurer_test.go index 25b8f6cf..8de68dfd 100644 --- a/pkg/webhook/operatingsystemconfig/ensurer_test.go +++ b/pkg/webhook/operatingsystemconfig/ensurer_test.go @@ -231,8 +231,8 @@ var _ = Describe("Ensurer", func() { Expect(ensurer.EnsureAdditionalFiles(ctx, gctx, &files, nil)).To(Succeed()) Expect(files).To(ConsistOf(oldFile, - hostsTOMLFile("docker.io", "registry-1.docker.io", "http://10.0.0.1:5000"), - hostsTOMLFile("eu.gcr.io", "eu.gcr.io", "http://10.0.0.2:5000"), + hostsTOMLFile("docker.io", "https://registry-1.docker.io", "http://10.0.0.1:5000"), + hostsTOMLFile("eu.gcr.io", "https://eu.gcr.io", "http://10.0.0.2:5000"), )) }) @@ -278,8 +278,8 @@ var _ = Describe("Ensurer", func() { Expect(ensurer.EnsureAdditionalFiles(ctx, gctx, &files, nil)).To(Succeed()) Expect(files).To(ConsistOf(oldFile, - hostsTOMLFile("docker.io", "registry-1.docker.io", "http://10.0.0.1:5000"), - hostsTOMLFile("eu.gcr.io", "eu.gcr.io", "http://10.0.0.2:5000"), + hostsTOMLFile("docker.io", "https://registry-1.docker.io", "http://10.0.0.1:5000"), + hostsTOMLFile("eu.gcr.io", "https://eu.gcr.io", "http://10.0.0.2:5000"), )) }) }) diff --git a/skaffold-admission.yaml b/skaffold-admission.yaml new file mode 100644 index 00000000..278b76bc --- /dev/null +++ b/skaffold-admission.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: skaffold/v2beta29 +kind: Config +metadata: + name: admission +build: + artifacts: + - image: eu.gcr.io/gardener-project/gardener/extensions/registry-cache-admission + ko: + main: ./cmd/gardener-extension-registry-cache-admission +deploy: + helm: + releases: + - name: gardener-extension-registry-cache-admission + chartPath: charts/admission + namespace: garden + artifactOverrides: + global: + image: eu.gcr.io/gardener-project/gardener/extensions/registry-cache-admission + imageStrategy: + helm: {} + valuesFiles: + - example/admission/values.yaml diff --git a/skaffold.yaml b/skaffold.yaml index 8205f39a..c6645df3 100644 --- a/skaffold.yaml +++ b/skaffold.yaml @@ -18,26 +18,3 @@ deploy: image: eu.gcr.io/gardener-project/gardener/extensions/registry-cache imageStrategy: helm: {} ---- -apiVersion: skaffold/v2beta29 -kind: Config -metadata: - name: admission -build: - artifacts: - - image: eu.gcr.io/gardener-project/gardener/extensions/registry-cache-admission - ko: - main: ./cmd/gardener-extension-registry-cache-admission -deploy: - helm: - releases: - - name: gardener-extension-registry-cache-admission - chartPath: charts/admission - namespace: garden - artifactOverrides: - global: - image: eu.gcr.io/gardener-project/gardener/extensions/registry-cache-admission - imageStrategy: - helm: {} - valuesFiles: - - example/admission/values.yaml diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go new file mode 100644 index 00000000..d328bb38 --- /dev/null +++ b/test/e2e/common_test.go @@ -0,0 +1,166 @@ +// Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_test + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/test/framework" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/pointer" + + "github.com/gardener/gardener-extension-registry-cache/pkg/apis/registry/v1alpha1" +) + +const ( + projectNamespace = "garden-local" + // nginxImageWithDigest corresponds to the nginx:1.13.0 image. + nginxImageWithDigest = "library/nginx@sha256:12d30ce421ad530494d588f87b2328ddc3cae666e77ea1ae5ac3a6661e52cde6" +) + +func defaultShootCreationFramework() *framework.ShootCreationFramework { + kubeconfigPath := os.Getenv("KUBECONFIG") + return framework.NewShootCreationFramework(&framework.ShootCreationConfig{ + GardenerConfig: &framework.GardenerConfig{ + ProjectNamespace: projectNamespace, + GardenerKubeconfig: kubeconfigPath, + SkipAccessingShoot: false, + CommonConfig: &framework.CommonConfig{}, + }, + }) +} + +func defaultShoot(generateName string) *gardencorev1beta1.Shoot { + purpose := gardencorev1beta1.ShootPurposeTesting + + return &gardencorev1beta1.Shoot{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateName, + Annotations: map[string]string{ + v1beta1constants.AnnotationShootInfrastructureCleanupWaitPeriodSeconds: "0", + v1beta1constants.AnnotationShootCloudConfigExecutionMaxDelaySeconds: "0", + }, + }, + Spec: gardencorev1beta1.ShootSpec{ + CloudProfileName: "local", + SecretBindingName: pointer.String("local"), + Region: "local", + Purpose: &purpose, + Kubernetes: gardencorev1beta1.Kubernetes{ + Version: "1.27.1", + Kubelet: &gardencorev1beta1.KubeletConfig{ + SerializeImagePulls: pointer.Bool(false), + RegistryPullQPS: pointer.Int32(10), + RegistryBurst: pointer.Int32(20), + }, + KubeAPIServer: &gardencorev1beta1.KubeAPIServerConfig{}, + }, + Networking: &gardencorev1beta1.Networking{ + Type: pointer.String("calico"), + }, + Provider: gardencorev1beta1.Provider{ + Type: "local", + Workers: []gardencorev1beta1.Worker{{ + Name: "local", + Machine: gardencorev1beta1.Machine{ + Type: "local", + }, + CRI: &gardencorev1beta1.CRI{ + Name: gardencorev1beta1.CRINameContainerD, + }, + Minimum: 1, + Maximum: 1, + }}, + }, + }, + } +} + +func registryCacheExtension(upstream string, size *resource.Quantity) gardencorev1beta1.Extension { + providerConfig := &v1alpha1.RegistryConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "RegistryConfig", + }, + Caches: []v1alpha1.RegistryCache{ + { + Upstream: upstream, + Size: size, + }, + }, + } + providerConfigJSON, err := json.Marshal(&providerConfig) + utilruntime.Must(err) + + extension := gardencorev1beta1.Extension{ + Type: "registry-cache", + ProviderConfig: &runtime.RawExtension{ + Raw: providerConfigJSON, + }, + } + + return extension +} + +func verifyRegistryCache(parentCtx context.Context, log logr.Logger, shootClient kubernetes.Interface, upstream, nginxImageWithDigest string) { + By("Create nginx Pod") + ctx, cancel := context.WithTimeout(parentCtx, 5*time.Minute) + defer cancel() + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: corev1.NamespaceDefault, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: nginxImageWithDigest, + }, + }, + }, + } + ExpectWithOffset(1, shootClient.Client().Create(ctx, pod)).To(Succeed()) + ExpectWithOffset(1, framework.WaitUntilPodIsRunning(ctx, log, pod.Name, pod.Namespace, shootClient)).To(Succeed()) + + By("Verify the registry cache pulled the nginx image") + ctx, cancel = context.WithTimeout(parentCtx, 30*time.Second) + defer cancel() + selector := labels.SelectorFromSet(labels.Set(map[string]string{"upstream-host": upstream})) + reader, err := framework.PodExecByLabel(ctx, selector, "registry-cache", "cat /var/lib/registry/scheduler-state.json", "registry-cache", shootClient) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Expected to successfully cat registry's scheduler-state.json file") + + schedulerStateFileContent, err := io.ReadAll(reader) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + schedulerStateMap := map[string]interface{}{} + ExpectWithOffset(1, json.Unmarshal(schedulerStateFileContent, &schedulerStateMap)).To(Succeed()) + ExpectWithOffset(1, schedulerStateMap).To(HaveKey(nginxImageWithDigest), fmt.Sprintf("Expected to find image %s in the registry's scheduler-state.json file", nginxImageWithDigest)) +} diff --git a/test/e2e/create_enable_disable_delete_test.go b/test/e2e/create_enable_disable_delete_test.go new file mode 100644 index 00000000..e66065c2 --- /dev/null +++ b/test/e2e/create_enable_disable_delete_test.go @@ -0,0 +1,68 @@ +// Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_test + +import ( + "context" + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" +) + +var _ = Describe("Registry Cache Extension Tests", func() { + parentCtx := context.Background() + + f := defaultShootCreationFramework() + f.Shoot = defaultShoot("e2e-default") + + It("should create Shoot, enable and disable the registry-cache extension, delete Shoot", func() { + By("Create Shoot") + ctx, cancel := context.WithTimeout(parentCtx, 15*time.Minute) + defer cancel() + Expect(f.CreateShootAndWaitForCreation(ctx, false)).To(Succeed()) + f.Verify() + + By("Enable the registry-cache extension") + ctx, cancel = context.WithTimeout(parentCtx, 10*time.Minute) + defer cancel() + Expect(f.UpdateShoot(ctx, f.Shoot, func(shoot *gardencorev1beta1.Shoot) error { + size := resource.MustParse("2Gi") + extension := registryCacheExtension("docker.io", &size) + shoot.Spec.Extensions = []gardencorev1beta1.Extension{extension} + + return nil + })).To(Succeed()) + + By("Verify registry-cache works") + verifyRegistryCache(parentCtx, f.Logger, f.ShootFramework.ShootClient, "docker.io", nginxImageWithDigest) + + By("Disable the registry-cache extension") + ctx, cancel = context.WithTimeout(parentCtx, 10*time.Minute) + defer cancel() + Expect(f.UpdateShoot(ctx, f.Shoot, func(shoot *gardencorev1beta1.Shoot) error { + shoot.Spec.Extensions = nil + + return nil + })).To(Succeed()) + + By("Delete Shoot") + ctx, cancel = context.WithTimeout(parentCtx, 15*time.Minute) + defer cancel() + Expect(f.DeleteShootAndWaitForDeletion(ctx, f.Shoot)).To(Succeed()) + }) +}) diff --git a/test/e2e/create_enabled_hibernate_reconcile_delete_test.go b/test/e2e/create_enabled_hibernate_reconcile_delete_test.go new file mode 100644 index 00000000..09769e7f --- /dev/null +++ b/test/e2e/create_enabled_hibernate_reconcile_delete_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_test + +import ( + "context" + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Registry Cache Extension Tests", func() { + parentCtx := context.Background() + + f := defaultShootCreationFramework() + shoot := defaultShoot("e2e-hib") + size := resource.MustParse("2Gi") + extension := registryCacheExtension("docker.io", &size) + shoot.Spec.Extensions = []gardencorev1beta1.Extension{extension} + f.Shoot = shoot + + It("should create Shoot with registry-cache extension enabled, hibernate Shoot, reconcile Shoot, delete Shoot", func() { + By("Create Shoot") + ctx, cancel := context.WithTimeout(parentCtx, 15*time.Minute) + defer cancel() + Expect(f.CreateShootAndWaitForCreation(ctx, false)).To(Succeed()) + f.Verify() + + By("Verify registry-cache works") + verifyRegistryCache(parentCtx, f.Logger, f.ShootFramework.ShootClient, "docker.io", nginxImageWithDigest) + + By("Hibernate Shoot") + ctx, cancel = context.WithTimeout(parentCtx, 10*time.Minute) + defer cancel() + Expect(f.HibernateShoot(ctx, f.Shoot)).To(Succeed()) + + By("Reconcile Shoot") + ctx, cancel = context.WithTimeout(parentCtx, 5*time.Minute) + defer cancel() + Expect(f.UpdateShoot(ctx, f.Shoot, func(shoot *gardencorev1beta1.Shoot) error { + metav1.SetMetaDataAnnotation(&shoot.ObjectMeta, "gardener.cloud/operation", "reconcile") + + return nil + })).To(Succeed()) + Expect(f.WaitForShootToBeReconciled(ctx, f.Shoot)).To(Succeed()) + + // We cannot properly test "Wake up Shoot" because after a wake up the registry-cache Pod fails to be scheduled because its PVC is bound to already deleted Node. + // + // PersistentVolumeClaims in local setup are provisioned by local-path-provisioner. local-path-provisioner creates a hostPath based persistent volume + // on the Node. The provisioned PV is tightly bound to the Node. When the Node is deleted, the PV's hostPath directory is also deleted. + // local-path-provisioner does not support moving PVC from one Node to another one (see https://github.com/rancher/local-path-provisioner/issues/31#issuecomment-690772828). + // A local-path-provisioner PVC cannot deal with Node deletion/rollout. + + By("Delete Shoot") + ctx, cancel = context.WithTimeout(parentCtx, 15*time.Minute) + defer cancel() + Expect(f.DeleteShootAndWaitForDeletion(ctx, f.Shoot)).To(Succeed()) + }) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 00000000..b98bfe73 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "E2E Test Suite") +} diff --git a/vendor/github.com/dsnet/compress/.travis.yml b/vendor/github.com/dsnet/compress/.travis.yml new file mode 100644 index 00000000..7e79820e --- /dev/null +++ b/vendor/github.com/dsnet/compress/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go +before_install: + - curl -L https://github.com/google/brotli/archive/v1.0.2.tar.gz | tar -zxv + - (cd brotli-1.0.2 && mkdir out && cd out && ../configure-cmake && make && sudo make install) + - rm -rf brotli-1.0.2 + - curl -L https://github.com/facebook/zstd/archive/v1.3.2.tar.gz | tar -zxv + - (cd zstd-1.3.2 && sudo make install) + - rm -rf zstd-1.3.2 + - sudo ldconfig + - mkdir /tmp/go1.12 + - curl -L -s https://dl.google.com/go/go1.12.linux-amd64.tar.gz | tar -zxf - -C /tmp/go1.12 --strip-components 1 + - unset GOROOT + - (GO111MODULE=on /tmp/go1.12/bin/go mod vendor) + - (cd /tmp && GO111MODULE=on /tmp/go1.12/bin/go get golang.org/x/lint/golint@8f45f776aaf18cebc8d65861cc70c33c60471952) + - (cd /tmp && GO111MODULE=on /tmp/go1.12/bin/go get honnef.co/go/tools/cmd/staticcheck@2019.1) +matrix: + include: + - go: 1.9.x + script: + - go test -v -race ./... + - go: 1.10.x + script: + - go test -v -race ./... + - go: 1.11.x + script: + - go test -v -race ./... + - go: 1.12.x + script: + - ./ztest.sh + - go: master + script: + - go test -v -race ./... + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/github.com/dsnet/compress/LICENSE.md b/vendor/github.com/dsnet/compress/LICENSE.md new file mode 100644 index 00000000..945b396c --- /dev/null +++ b/vendor/github.com/dsnet/compress/LICENSE.md @@ -0,0 +1,24 @@ +Copyright © 2015, Joe Tsai and The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. +* Neither the copyright holder nor the names of its contributors may be used to +endorse or promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dsnet/compress/README.md b/vendor/github.com/dsnet/compress/README.md new file mode 100644 index 00000000..63afb01c --- /dev/null +++ b/vendor/github.com/dsnet/compress/README.md @@ -0,0 +1,75 @@ +# Collection of compression libraries for Go # + +[![GoDoc](https://godoc.org/github.com/dsnet/compress/cmp?status.svg)](https://godoc.org/github.com/dsnet/compress) +[![Build Status](https://travis-ci.org/dsnet/compress.svg?branch=master)](https://travis-ci.org/dsnet/compress) +[![Report Card](https://goreportcard.com/badge/github.com/dsnet/compress)](https://goreportcard.com/report/github.com/dsnet/compress) + +## Introduction ## + +**NOTE: This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason.** + +This repository hosts a collection of compression related libraries. The goal of this project is to provide pure Go implementations for popular compression algorithms beyond what the Go standard library provides. The goals for these packages are as follows: +* Maintainable: That the code remains well documented, well tested, readable, easy to maintain, and easy to verify that it conforms to the specification for the format being implemented. +* Performant: To be able to compress and decompress within at least 80% of the rates that the C implementations are able to achieve. +* Flexible: That the code provides low-level and fine granularity control over the compression streams similar to what the C APIs would provide. + +Of these three, the first objective is often at odds with the other two objectives and provides interesting challenges. Higher performance can often be achieved by muddling abstraction layers or using non-intuitive low-level primitives. Also, more features and functionality, while useful in some situations, often complicates the API. Thus, this package will attempt to satisfy all the goals, but will defer to favoring maintainability when the performance or flexibility benefits are not significant enough. + + +## Library Status ## + +For the packages available, only some features are currently implemented: + +| Package | Reader | Writer | +| ------- | :----: | :----: | +| brotli | :white_check_mark: | | +| bzip2 | :white_check_mark: | :white_check_mark: | +| flate | :white_check_mark: | | +| xflate | :white_check_mark: | :white_check_mark: | + +This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason. When the library becomes more mature, it is planned to eventually conform to some strict versioning scheme like [Semantic Versioning](http://semver.org/). + +However, in the meanwhile, this library does provide some basic API guarantees. For the types defined below, the method signatures are guaranteed to not change. Note that the author still reserves the right to change the fields within each ```Reader``` and ```Writer``` structs. +```go +type ReaderConfig struct { ... } +type Reader struct { ... } + func NewReader(io.Reader, *ReaderConfig) (*Reader, error) { ... } + func (*Reader) Read([]byte) (int, error) { ... } + func (*Reader) Close() error { ... } + +type WriterConfig struct { ... } +type Writer struct { ... } + func NewWriter(io.Writer, *WriterConfig) (*Writer, error) { ... } + func (*Writer) Write([]byte) (int, error) { ... } + func (*Writer) Close() error { ... } +``` + +To see what work still remains, see the [Task List](https://github.com/dsnet/compress/wiki/Task-List). + +## Performance ## + +See [Performance Metrics](https://github.com/dsnet/compress/wiki/Performance-Metrics). + + +## Frequently Asked Questions ## + +See [Frequently Asked Questions](https://github.com/dsnet/compress/wiki/Frequently-Asked-Questions). + + +## Installation ## + +Run the command: + +```go get -u github.com/dsnet/compress``` + +This library requires `Go1.9` or higher in order to build. + + +## Packages ## + +| Package | Description | +| :------ | :---------- | +| [brotli](http://godoc.org/github.com/dsnet/compress/brotli) | Package brotli implements the Brotli format, described in RFC 7932. | +| [bzip2](http://godoc.org/github.com/dsnet/compress/bzip2) | Package bzip2 implements the BZip2 compressed data format. | +| [flate](http://godoc.org/github.com/dsnet/compress/flate) | Package flate implements the DEFLATE format, described in RFC 1951. | +| [xflate](http://godoc.org/github.com/dsnet/compress/xflate) | Package xflate implements the XFLATE format, an random-access extension to DEFLATE. | diff --git a/vendor/github.com/dsnet/compress/api.go b/vendor/github.com/dsnet/compress/api.go new file mode 100644 index 00000000..f80a9232 --- /dev/null +++ b/vendor/github.com/dsnet/compress/api.go @@ -0,0 +1,74 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package compress is a collection of compression libraries. +package compress + +import ( + "bufio" + "io" + + "github.com/dsnet/compress/internal/errors" +) + +// The Error interface identifies all compression related errors. +type Error interface { + error + CompressError() + + // IsDeprecated reports the use of a deprecated and unsupported feature. + IsDeprecated() bool + + // IsCorrupted reports whether the input stream was corrupted. + IsCorrupted() bool +} + +var _ Error = errors.Error{} + +// ByteReader is an interface accepted by all decompression Readers. +// It guarantees that the decompressor never reads more data than is necessary +// from the underlying io.Reader. +type ByteReader interface { + io.Reader + io.ByteReader +} + +var _ ByteReader = (*bufio.Reader)(nil) + +// BufferedReader is an interface accepted by all decompression Readers. +// It guarantees that the decompressor never reads more data than is necessary +// from the underlying io.Reader. Since BufferedReader allows a decompressor +// to peek at bytes further along in the stream without advancing the read +// pointer, decompression can experience a significant performance gain when +// provided a reader that satisfies this interface. Thus, a decompressor will +// prefer this interface over ByteReader for performance reasons. +// +// The bufio.Reader satisfies this interface. +type BufferedReader interface { + io.Reader + + // Buffered returns the number of bytes currently buffered. + // + // This value becomes invalid following the next Read/Discard operation. + Buffered() int + + // Peek returns the next n bytes without advancing the reader. + // + // If Peek returns fewer than n bytes, it also returns an error explaining + // why the peek is short. Peek must support peeking of at least 8 bytes. + // If 0 <= n <= Buffered(), Peek is guaranteed to succeed without reading + // from the underlying io.Reader. + // + // This result becomes invalid following the next Read/Discard operation. + Peek(n int) ([]byte, error) + + // Discard skips the next n bytes, returning the number of bytes discarded. + // + // If Discard skips fewer than n bytes, it also returns an error. + // If 0 <= n <= Buffered(), Discard is guaranteed to succeed without reading + // from the underlying io.Reader. + Discard(n int) (int, error) +} + +var _ BufferedReader = (*bufio.Reader)(nil) diff --git a/vendor/github.com/dsnet/compress/bzip2/bwt.go b/vendor/github.com/dsnet/compress/bzip2/bwt.go new file mode 100644 index 00000000..44a2541f --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/bwt.go @@ -0,0 +1,110 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/bzip2/internal/sais" + +// The Burrows-Wheeler Transform implementation used here is based on the +// Suffix Array by Induced Sorting (SA-IS) methodology by Nong, Zhang, and Chan. +// This implementation uses the sais algorithm originally written by Yuta Mori. +// +// The SA-IS algorithm runs in O(n) and outputs a Suffix Array. There is a +// mathematical relationship between Suffix Arrays and the Burrows-Wheeler +// Transform, such that a SA can be converted to a BWT in O(n) time. +// +// References: +// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf +// https://github.com/cscott/compressjs/blob/master/lib/BWT.js +// https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space +type burrowsWheelerTransform struct { + buf []byte + sa []int + perm []uint32 +} + +func (bwt *burrowsWheelerTransform) Encode(buf []byte) (ptr int) { + if len(buf) == 0 { + return -1 + } + + // TODO(dsnet): Find a way to avoid the duplicate input string method. + // We only need to do this because suffix arrays (by definition) only + // operate non-wrapped suffixes of a string. On the other hand, + // the BWT specifically used in bzip2 operate on a strings that wrap-around + // when being sorted. + + // Step 1: Concatenate the input string to itself so that we can use the + // suffix array algorithm for bzip2's variant of BWT. + n := len(buf) + bwt.buf = append(append(bwt.buf[:0], buf...), buf...) + if cap(bwt.sa) < 2*n { + bwt.sa = make([]int, 2*n) + } + t := bwt.buf[:2*n] + sa := bwt.sa[:2*n] + + // Step 2: Compute the suffix array (SA). The input string, t, will not be + // modified, while the results will be written to the output, sa. + sais.ComputeSA(t, sa) + + // Step 3: Convert the SA to a BWT. Since ComputeSA does not mutate the + // input, we have two copies of the input; in buf and buf2. Thus, we write + // the transformation to buf, while using buf2. + var j int + buf2 := t[n:] + for _, i := range sa { + if i < n { + if i == 0 { + ptr = j + i = n + } + buf[j] = buf2[i-1] + j++ + } + } + return ptr +} + +func (bwt *burrowsWheelerTransform) Decode(buf []byte, ptr int) { + if len(buf) == 0 { + return + } + + // Step 1: Compute cumm, where cumm[ch] reports the total number of + // characters that precede the character ch in the alphabet. + var cumm [256]int + for _, v := range buf { + cumm[v]++ + } + var sum int + for i, v := range cumm { + cumm[i] = sum + sum += v + } + + // Step 2: Compute perm, where perm[ptr] contains a pointer to the next + // byte in buf and the next pointer in perm itself. + if cap(bwt.perm) < len(buf) { + bwt.perm = make([]uint32, len(buf)) + } + perm := bwt.perm[:len(buf)] + for i, b := range buf { + perm[cumm[b]] = uint32(i) + cumm[b]++ + } + + // Step 3: Follow each pointer in perm to the next byte, starting with the + // origin pointer. + if cap(bwt.buf) < len(buf) { + bwt.buf = make([]byte, len(buf)) + } + buf2 := bwt.buf[:len(buf)] + i := perm[ptr] + for j := range buf2 { + buf2[j] = buf[i] + i = perm[i] + } + copy(buf, buf2) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/common.go b/vendor/github.com/dsnet/compress/bzip2/common.go new file mode 100644 index 00000000..c6339815 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/common.go @@ -0,0 +1,110 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package bzip2 implements the BZip2 compressed data format. +// +// Canonical C implementation: +// http://bzip.org +// +// Unofficial format specification: +// https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf +package bzip2 + +import ( + "fmt" + "hash/crc32" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" +) + +// There does not exist a formal specification of the BZip2 format. As such, +// much of this work is derived by either reverse engineering the original C +// source code or using secondary sources. +// +// Significant amounts of fuzz testing is done to ensure that outputs from +// this package is properly decoded by the C library. Furthermore, we test that +// both this package and the C library agree about what inputs are invalid. +// +// Compression stack: +// Run-length encoding 1 (RLE1) +// Burrows-Wheeler transform (BWT) +// Move-to-front transform (MTF) +// Run-length encoding 2 (RLE2) +// Prefix encoding (PE) +// +// References: +// http://bzip.org/ +// https://en.wikipedia.org/wiki/Bzip2 +// https://code.google.com/p/jbzip2/ + +const ( + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = 6 +) + +const ( + hdrMagic = 0x425a // Hex of "BZ" + blkMagic = 0x314159265359 // BCD of PI + endMagic = 0x177245385090 // BCD of sqrt(PI) + + blockSize = 100000 +) + +func errorf(c int, f string, a ...interface{}) error { + return errors.Error{Code: c, Pkg: "bzip2", Msg: fmt.Sprintf(f, a...)} +} + +func panicf(c int, f string, a ...interface{}) { + errors.Panic(errorf(c, f, a...)) +} + +// errWrap converts a lower-level errors.Error to be one from this package. +// The replaceCode passed in will be used to replace the code for any errors +// with the errors.Invalid code. +// +// For the Reader, set this to errors.Corrupted. +// For the Writer, set this to errors.Internal. +func errWrap(err error, replaceCode int) error { + if cerr, ok := err.(errors.Error); ok { + if errors.IsInvalid(cerr) { + cerr.Code = replaceCode + } + err = errorf(cerr.Code, "%s", cerr.Msg) + } + return err +} + +var errClosed = errorf(errors.Closed, "") + +// crc computes the CRC-32 used by BZip2. +// +// The CRC-32 computation in bzip2 treats bytes as having bits in big-endian +// order. That is, the MSB is read before the LSB. Thus, we can use the +// standard library version of CRC-32 IEEE with some minor adjustments. +// +// The byte array is used as an intermediate buffer to swap the bits of every +// byte of the input. +type crc struct { + val uint32 + buf [256]byte +} + +// update computes the CRC-32 of appending buf to c. +func (c *crc) update(buf []byte) { + cval := internal.ReverseUint32(c.val) + for len(buf) > 0 { + n := len(buf) + if n > len(c.buf) { + n = len(c.buf) + } + for i, b := range buf[:n] { + c.buf[i] = internal.ReverseLUT[b] + } + cval = crc32.Update(cval, crc32.IEEETable, c.buf[:n]) + buf = buf[n:] + } + c.val = internal.ReverseUint32(cval) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go new file mode 100644 index 00000000..ddd32f50 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_off.go @@ -0,0 +1,13 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !gofuzz + +// This file exists to suppress fuzzing details from release builds. + +package bzip2 + +type fuzzReader struct{} + +func (*fuzzReader) updateChecksum(int64, uint32) {} diff --git a/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go new file mode 100644 index 00000000..54122351 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/fuzz_on.go @@ -0,0 +1,77 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +// This file exists to export internal implementation details for fuzz testing. + +package bzip2 + +func ForwardBWT(buf []byte) (ptr int) { + var bwt burrowsWheelerTransform + return bwt.Encode(buf) +} + +func ReverseBWT(buf []byte, ptr int) { + var bwt burrowsWheelerTransform + bwt.Decode(buf, ptr) +} + +type fuzzReader struct { + Checksums Checksums +} + +// updateChecksum updates Checksums. +// +// If a valid pos is provided, it appends the (pos, val) pair to the slice. +// Otherwise, it will update the last record with the new value. +func (fr *fuzzReader) updateChecksum(pos int64, val uint32) { + if pos >= 0 { + fr.Checksums = append(fr.Checksums, Checksum{pos, val}) + } else { + fr.Checksums[len(fr.Checksums)-1].Value = val + } +} + +type Checksum struct { + Offset int64 // Bit offset of the checksum + Value uint32 // Checksum value +} + +type Checksums []Checksum + +// Apply overwrites all checksum fields in d with the ones in cs. +func (cs Checksums) Apply(d []byte) []byte { + d = append([]byte(nil), d...) + for _, c := range cs { + setU32(d, c.Offset, c.Value) + } + return d +} + +func setU32(d []byte, pos int64, val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + d[bpos/8] &= ^byte(1 << (7 - bpos%8)) + d[bpos/8] |= byte(val>>(31-i)) << (7 - bpos%8) + } +} + +// Verify checks that all checksum fields in d matches those in cs. +func (cs Checksums) Verify(d []byte) bool { + for _, c := range cs { + if getU32(d, c.Offset) != c.Value { + return false + } + } + return true +} + +func getU32(d []byte, pos int64) (val uint32) { + for i := uint(0); i < 32; i++ { + bpos := uint64(pos) + uint64(i) + val |= (uint32(d[bpos/8] >> (7 - bpos%8))) << (31 - i) + } + return val +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go new file mode 100644 index 00000000..cd4eee82 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go @@ -0,0 +1,28 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package sais implements a linear time suffix array algorithm. +package sais + +//go:generate go run sais_gen.go byte sais_byte.go +//go:generate go run sais_gen.go int sais_int.go + +// This package ports the C sais implementation by Yuta Mori. The ports are +// located in sais_byte.go and sais_int.go, which are identical to each other +// except for the types. Since Go does not support generics, we use generators to +// create the two files. +// +// References: +// https://sites.google.com/site/yuta256/sais +// https://www.researchgate.net/publication/221313676_Linear_Time_Suffix_Array_Construction_Using_D-Critical_Substrings +// https://www.researchgate.net/publication/224176324_Two_Efficient_Algorithms_for_Linear_Time_Suffix_Array_Construction + +// ComputeSA computes the suffix array of t and places the result in sa. +// Both t and sa must be the same length. +func ComputeSA(t []byte, sa []int) { + if len(sa) != len(t) { + panic("mismatching sizes") + } + computeSA_byte(t, sa, 0, len(t), 256) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go new file mode 100644 index 00000000..01b8529b --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_byte(T []byte, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_byte(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_byte(T []byte, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_byte(T []byte, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_byte(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_byte(T []byte, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_byte(T, C, n, k) + } + getBuckets_byte(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_byte(T []byte, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_byte(T, C, n, k) + getBuckets_byte(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_byte(T, SA, C, B, D, n, k) + name = postProcLMS2_byte(SA, n, m) + } else { + sortLMS1_byte(T, SA, C, B, n, k) + name = postProcLMS1_byte(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_byte(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_byte(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_byte(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go new file mode 100644 index 00000000..280682f0 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go @@ -0,0 +1,661 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Code generated by sais_gen.go. DO NOT EDIT. + +// ==================================================== +// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved. +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// ==================================================== + +package sais + +func getCounts_int(T []int, C []int, n, k int) { + var i int + for i = 0; i < k; i++ { + C[i] = 0 + } + for i = 0; i < n; i++ { + C[T[i]]++ + } +} + +func getBuckets_int(C, B []int, k int, end bool) { + var i, sum int + if end { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum + } + } else { + for i = 0; i < k; i++ { + sum += C[i] + B[i] = sum - C[i] + } + } +} + +func sortLMS1_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + if int(T[j]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + b-- + if int(T[j]) > c1 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS1_int(T []int, SA []int, n, m int) int { + var i, j, p, q, plen, qlen, name int + var c0, c1 int + var diff bool + + // Compact all the sorted substrings into the first m items of SA. + // 2*m must be not larger than n (provable). + for i = 0; SA[i] < 0; i++ { + SA[i] = ^SA[i] + } + if i < m { + for j, i = i, i+1; ; i++ { + if p = SA[i]; p < 0 { + SA[j] = ^p + j++ + SA[i] = 0 + if j == m { + break + } + } + } + } + + // Store the length of all substrings. + i = n - 1 + j = n - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + SA[m+((i+1)>>1)] = j - i + j = i + 1 + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + // Find the lexicographic names of all substrings. + name = 0 + qlen = 0 + for i, q = 0, n; i < m; i++ { + p = SA[i] + plen = SA[m+(p>>1)] + diff = true + if (plen == qlen) && ((q + plen) < n) { + for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ { + } + if j == plen { + diff = false + } + } + if diff { + name++ + q = p + qlen = plen + } + SA[m+(p>>1)] = name + } + return name +} + +func sortLMS2_int(T []int, SA, C, B, D []int, n, k int) { + var b, i, j, t, d int + var c0, c1 int + + // Compute SAl. + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + j-- + if int(T[j]) < c1 { + t = 1 + } else { + t = 0 + } + j += n + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i, d = 0, 0; i < n; i++ { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) < c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + if t&1 > 0 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + SA[i] = 0 + } else if j < 0 { + SA[i] = ^j + } + } + for i = n - 1; 0 <= i; i-- { + if SA[i] > 0 { + if SA[i] < n { + SA[i] += n + for j = i - 1; SA[j] < n; j-- { + } + SA[j] -= n + i = j + } + } + } + + // Compute SAs. + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i, d = n-1, d+1; i >= 0; i-- { + if j = SA[i]; j > 0 { + if n <= j { + d += 1 + j -= n + } + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + j-- + t = int(c0) << 1 + if int(T[j]) > c1 { + t |= 1 + } + if D[t] != d { + j += n + D[t] = d + } + b-- + if t&1 > 0 { + SA[b] = ^(j + 1) + } else { + SA[b] = j + } + SA[i] = 0 + } + } +} + +func postProcLMS2_int(SA []int, n, m int) int { + var i, j, d, name int + + // Compact all the sorted LMS substrings into the first m items of SA. + name = 0 + for i = 0; SA[i] < 0; i++ { + j = ^SA[i] + if n <= j { + name += 1 + } + SA[i] = j + } + if i < m { + for d, i = i, i+1; ; i++ { + if j = SA[i]; j < 0 { + j = ^j + if n <= j { + name += 1 + } + SA[d] = j + d++ + SA[i] = 0 + if d == m { + break + } + } + } + } + if name < m { + // Store the lexicographic names. + for i, d = m-1, name+1; 0 <= i; i-- { + if j = SA[i]; n <= j { + j -= n + d-- + } + SA[m+(j>>1)] = d + } + } else { + // Unset flags. + for i = 0; i < m; i++ { + if j = SA[i]; n <= j { + j -= n + SA[i] = j + } + } + } + return name +} + +func induceSA_int(T []int, SA, C, B []int, n, k int) { + var b, i, j int + var c0, c1 int + + // Compute SAl. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, false) // Find starts of buckets + j = n - 1 + c1 = int(T[j]) + b = B[c1] + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + for i = 0; i < n; i++ { + j = SA[i] + SA[i] = ^j + if j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + if j > 0 && int(T[j-1]) < c1 { + SA[b] = ^j + } else { + SA[b] = j + } + b++ + } + } + + // Compute SAs. + if &C[0] == &B[0] { + getCounts_int(T, C, n, k) + } + getBuckets_int(C, B, k, true) // Find ends of buckets + c1 = 0 + b = B[c1] + for i = n - 1; i >= 0; i-- { + if j = SA[i]; j > 0 { + j-- + if c0 = int(T[j]); c0 != c1 { + B[c1] = b + c1 = c0 + b = B[c1] + } + b-- + if (j == 0) || (int(T[j-1]) > c1) { + SA[b] = ^j + } else { + SA[b] = j + } + } else { + SA[i] = ^j + } + } +} + +func computeSA_int(T []int, SA []int, fs, n, k int) { + const ( + minBucketSize = 512 + sortLMS2Limit = 0x3fffffff + ) + + var C, B, D, RA []int + var bo int // Offset of B relative to SA + var b, i, j, m, p, q, name, newfs int + var c0, c1 int + var flags uint + + if k <= minBucketSize { + C = make([]int, k) + if k <= fs { + bo = n + fs - k + B = SA[bo:] + flags = 1 + } else { + B = make([]int, k) + flags = 3 + } + } else if k <= fs { + C = SA[n+fs-k:] + if k <= fs-k { + bo = n + fs - 2*k + B = SA[bo:] + flags = 0 + } else if k <= 4*minBucketSize { + B = make([]int, k) + flags = 2 + } else { + B = C + flags = 8 + } + } else { + C = make([]int, k) + B = C + flags = 4 | 8 + } + if n <= sortLMS2Limit && 2 <= (n/k) { + if flags&1 > 0 { + if 2*k <= fs-k { + flags |= 32 + } else { + flags |= 16 + } + } else if flags == 0 && 2*k <= (fs-2*k) { + flags |= 32 + } + } + + // Stage 1: Reduce the problem by at least 1/2. + // Sort all the LMS-substrings. + getCounts_int(T, C, n, k) + getBuckets_int(C, B, k, true) // Find ends of buckets + for i = 0; i < n; i++ { + SA[i] = 0 + } + b = -1 + i = n - 1 + j = n + m = 0 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + if b >= 0 { + SA[b] = j + } + B[c1]-- + b = B[c1] + j = i + m++ + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + + if m > 1 { + if flags&(16|32) > 0 { + if flags&16 > 0 { + D = make([]int, 2*k) + } else { + D = SA[bo-2*k:] + } + B[T[j+1]]++ + for i, j = 0, 0; i < k; i++ { + j += C[i] + if B[i] != j { + SA[B[i]] += n + } + D[i] = 0 + D[i+k] = 0 + } + sortLMS2_int(T, SA, C, B, D, n, k) + name = postProcLMS2_int(SA, n, m) + } else { + sortLMS1_int(T, SA, C, B, n, k) + name = postProcLMS1_int(T, SA, n, m) + } + } else if m == 1 { + SA[b] = j + 1 + name = 1 + } else { + name = 0 + } + + // Stage 2: Solve the reduced problem. + // Recurse if names are not yet unique. + if name < m { + newfs = n + fs - 2*m + if flags&(1|4|8) == 0 { + if k+name <= newfs { + newfs -= k + } else { + flags |= 8 + } + } + RA = SA[m+newfs:] + for i, j = m+(n>>1)-1, m-1; m <= i; i-- { + if SA[i] != 0 { + RA[j] = SA[i] - 1 + j-- + } + } + computeSA_int(RA, SA, newfs, m, name) + + i = n - 1 + j = m - 1 + c0 = int(T[n-1]) + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + for i >= 0 { + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 > c1 { + break + } + } + if i >= 0 { + RA[j] = i + 1 + j-- + for { + c1 = c0 + if i--; i < 0 { + break + } + if c0 = int(T[i]); c0 < c1 { + break + } + } + } + } + for i = 0; i < m; i++ { + SA[i] = RA[SA[i]] + } + if flags&4 > 0 { + B = make([]int, k) + C = B + } + if flags&2 > 0 { + B = make([]int, k) + } + } + + // Stage 3: Induce the result for the original problem. + if flags&8 > 0 { + getCounts_int(T, C, n, k) + } + // Put all left-most S characters into their buckets. + if m > 1 { + getBuckets_int(C, B, k, true) // Find ends of buckets + i = m - 1 + j = n + p = SA[m-1] + c1 = int(T[p]) + for { + c0 = c1 + q = B[c0] + for q < j { + j-- + SA[j] = 0 + } + for { + j-- + SA[j] = p + if i--; i < 0 { + break + } + p = SA[i] + if c1 = int(T[p]); c1 != c0 { + break + } + } + if i < 0 { + break + } + } + for j > 0 { + j-- + SA[j] = 0 + } + } + induceSA_int(T, SA, C, B, n, k) +} diff --git a/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go new file mode 100644 index 00000000..5c71b343 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go @@ -0,0 +1,131 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// moveToFront implements both the MTF and RLE stages of bzip2 at the same time. +// Any runs of zeros in the encoded output will be replaced by a sequence of +// RUNA and RUNB symbols are encode the length of the run. +// +// The RLE encoding used can actually be encoded to and decoded from using +// normal two's complement arithmetic. The methodology for doing so is below. +// +// Assuming the following: +// num: The value being encoded by RLE encoding. +// run: A sequence of RUNA and RUNB symbols represented as a binary integer, +// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN +// symbols are at the least-significant bit positions. +// cnt: The number of RUNA and RUNB symbols. +// +// Then the RLE encoding used by bzip2 has this mathematical property: +// num+1 == (1< len(mtf.dictBuf) { + panicf(errors.Internal, "alphabet too large") + } + copy(mtf.dictBuf[:], dict) + mtf.dictLen = len(dict) + mtf.blkSize = blkSize +} + +func (mtf *moveToFront) Encode(vals []byte) (syms []uint16) { + dict := mtf.dictBuf[:mtf.dictLen] + syms = mtf.syms[:0] + + if len(vals) > mtf.blkSize { + panicf(errors.Internal, "exceeded block size") + } + + var lastNum uint32 + for _, val := range vals { + // Normal move-to-front transform. + var idx uint8 // Reverse lookup idx in dict + for di, dv := range dict { + if dv == val { + idx = uint8(di) + break + } + } + copy(dict[1:], dict[:idx]) + dict[0] = val + + // Run-length encoding augmentation. + if idx == 0 { + lastNum++ + continue + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + lastNum = 0 + } + syms = append(syms, uint16(idx)+1) + } + if lastNum > 0 { + for rc := lastNum + 1; rc != 1; rc >>= 1 { + syms = append(syms, uint16(rc&1)) + } + } + mtf.syms = syms + return syms +} + +func (mtf *moveToFront) Decode(syms []uint16) (vals []byte) { + dict := mtf.dictBuf[:mtf.dictLen] + vals = mtf.vals[:0] + + var lastCnt uint + var lastRun uint32 + for _, sym := range syms { + // Run-length encoding augmentation. + if sym < 2 { + lastRun |= uint32(sym) << lastCnt + lastCnt++ + continue + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + lastCnt, lastRun = 0, 0 + } + + // Normal move-to-front transform. + val := dict[sym-1] // Forward lookup val in dict + copy(dict[1:], dict[:sym-1]) + dict[0] = val + + if len(vals) >= mtf.blkSize { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + vals = append(vals, val) + } + if lastCnt > 0 { + cnt := int((1< mtf.blkSize || lastCnt > 24 { + panicf(errors.Corrupted, "run-length decoding exceeded block size") + } + for i := cnt; i > 0; i-- { + vals = append(vals, dict[0]) + } + } + mtf.vals = vals + return vals +} diff --git a/vendor/github.com/dsnet/compress/bzip2/prefix.go b/vendor/github.com/dsnet/compress/bzip2/prefix.go new file mode 100644 index 00000000..4847d809 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/prefix.go @@ -0,0 +1,374 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +const ( + minNumTrees = 2 + maxNumTrees = 6 + + maxPrefixBits = 20 // Maximum bit-width of a prefix code + maxNumSyms = 256 + 2 // Maximum number of symbols in the alphabet + numBlockSyms = 50 // Number of bytes in a block +) + +// encSel and decSel are used to handle the prefix encoding for tree selectors. +// The prefix encoding is as follows: +// +// Code TreeIdx +// 0 <=> 0 +// 10 <=> 1 +// 110 <=> 2 +// 1110 <=> 3 +// 11110 <=> 4 +// 111110 <=> 5 +// 111111 <=> 6 Invalid tree index, so should fail +// +var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) { + var selCodes [maxNumTrees + 1]prefix.PrefixCode + for i := range selCodes { + selCodes[i] = prefix.PrefixCode{Sym: uint32(i), Len: uint32(i + 1)} + } + selCodes[maxNumTrees] = prefix.PrefixCode{Sym: maxNumTrees, Len: maxNumTrees} + prefix.GeneratePrefixes(selCodes[:]) + e.Init(selCodes[:]) + d.Init(selCodes[:]) + return +}() + +type prefixReader struct{ prefix.Reader } + +func (pr *prefixReader) Init(r io.Reader) { + pr.Reader.Init(r, true) +} + +func (pr *prefixReader) ReadBitsBE64(nb uint) uint64 { + if nb <= 32 { + v := uint32(pr.ReadBits(nb)) + return uint64(internal.ReverseUint32N(v, nb)) + } + v0 := internal.ReverseUint32(uint32(pr.ReadBits(32))) + v1 := internal.ReverseUint32(uint32(pr.ReadBits(nb - 32))) + v := uint64(v0)<<32 | uint64(v1) + return v >> (64 - nb) +} + +func (pr *prefixReader) ReadPrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Decoder) { + for i, pc := range codes { + clen := int(pr.ReadBitsBE64(5)) + sum := 1 << maxPrefixBits + for sym := range pc { + for { + if clen < 1 || clen > maxPrefixBits { + panicf(errors.Corrupted, "invalid prefix bit-length: %d", clen) + } + + b, ok := pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + if b == 0 { + break + } + + b, ok = pr.TryReadBits(1) + if !ok { + b = pr.ReadBits(1) + } + clen -= int(b*2) - 1 // +1 or -1 + } + pc[sym] = prefix.PrefixCode{Sym: uint32(sym), Len: uint32(clen)} + sum -= (1 << maxPrefixBits) >> uint(clen) + } + + if sum == 0 { + // Fast path, but only handles complete trees. + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + } else { + // Slow path, but handles anything. + pc = handleDegenerateCodes(pc) // Never fails, but may fail later + codes[i] = pc + } + trees[i].Init(pc) + } +} + +type prefixWriter struct{ prefix.Writer } + +func (pw *prefixWriter) Init(w io.Writer) { + pw.Writer.Init(w, true) +} + +func (pw *prefixWriter) WriteBitsBE64(v uint64, nb uint) { + if nb <= 32 { + v := internal.ReverseUint32N(uint32(v), nb) + pw.WriteBits(uint(v), nb) + return + } + v <<= (64 - nb) + v0 := internal.ReverseUint32(uint32(v >> 32)) + v1 := internal.ReverseUint32(uint32(v)) + pw.WriteBits(uint(v0), 32) + pw.WriteBits(uint(v1), nb-32) + return +} + +func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Encoder) { + for i, pc := range codes { + if err := prefix.GeneratePrefixes(pc); err != nil { + errors.Panic(err) // Using complete trees; should never fail + } + trees[i].Init(pc) + + clen := int(pc[0].Len) + pw.WriteBitsBE64(uint64(clen), 5) + for _, c := range pc { + for int(c.Len) < clen { + pw.WriteBits(3, 2) // 11 + clen-- + } + for int(c.Len) > clen { + pw.WriteBits(1, 2) // 10 + clen++ + } + pw.WriteBits(0, 1) + } + } +} + +// handleDegenerateCodes converts a degenerate tree into a canonical tree. +// +// For example, when the input is an under-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 3}, +// {Sym: 1, Len: 4}, +// {Sym: 2, Len: 3}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 3, Val: 0}, // 000 +// {Sym: 1, Len: 4, Val: 2}, // 0010 +// {Sym: 2, Len: 3, Val: 4}, // 100 +// {Sym: 258, Len: 4, Val: 10}, // 1010 +// {Sym: 259, Len: 3, Val: 6}, // 110 +// {Sym: 260, Len: 1, Val: 1}, // 1 +// } +// +// For example, when the input is an over-subscribed tree: +// input: []PrefixCode{ +// {Sym: 0, Len: 1}, +// {Sym: 1, Len: 3}, +// {Sym: 2, Len: 4}, +// {Sym: 3, Len: 3}, +// {Sym: 4, Len: 2}, +// } +// output: []PrefixCode{ +// {Sym: 0, Len: 1, Val: 0}, // 0 +// {Sym: 1, Len: 3, Val: 3}, // 011 +// {Sym: 3, Len: 3, Val: 7}, // 111 +// {Sym: 4, Len: 2, Val: 1}, // 01 +// } +func handleDegenerateCodes(codes prefix.PrefixCodes) prefix.PrefixCodes { + // Since there is no formal definition for the BZip2 format, there is no + // specification that says that the code lengths must form a complete + // prefix tree (IE: it is neither over-subscribed nor under-subscribed). + // Thus, the original C implementation becomes the reference for how prefix + // decoding is done in these edge cases. Unfortunately, the C version does + // not error when an invalid tree is used, but rather allows decoding to + // continue and only errors if some bit pattern happens to cause an error. + // Thus, it is possible for an invalid tree to end up decoding an input + // "properly" so long as invalid bit patterns are not present. In order to + // replicate this non-specified behavior, we use a ported version of the + // C code to generate the codes as a valid canonical tree by substituting + // invalid nodes with invalid symbols. + // + // ==================================================== + // This program, "bzip2", the associated library "libbzip2", and all + // documentation, are copyright (C) 1996-2010 Julian R Seward. All + // rights reserved. + // + // Redistribution and use in source and binary forms, with or without + // modification, are permitted provided that the following conditions + // are met: + // + // 1. Redistributions of source code must retain the above copyright + // notice, this list of conditions and the following disclaimer. + // + // 2. The origin of this software must not be misrepresented; you must + // not claim that you wrote the original software. If you use this + // software in a product, an acknowledgment in the product + // documentation would be appreciated but is not required. + // + // 3. Altered source versions must be plainly marked as such, and must + // not be misrepresented as being the original software. + // + // 4. The name of the author may not be used to endorse or promote + // products derived from this software without specific prior written + // permission. + // + // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + // OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + // ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + // GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + // + // Julian Seward, jseward@bzip.org + // bzip2/libbzip2 version 1.0.6 of 6 September 2010 + // ==================================================== + var ( + limits [maxPrefixBits + 2]int32 + bases [maxPrefixBits + 2]int32 + perms [maxNumSyms]int32 + + minLen = uint32(maxPrefixBits) + maxLen = uint32(0) + ) + + const ( + statusOkay = iota + statusInvalid + statusNeedBits + statusMaxBits + ) + + // createTables is the BZ2_hbCreateDecodeTables function from the C code. + createTables := func(codes []prefix.PrefixCode) { + for _, c := range codes { + if c.Len > maxLen { + maxLen = c.Len + } + if c.Len < minLen { + minLen = c.Len + } + } + + var pp int + for i := minLen; i <= maxLen; i++ { + for j, c := range codes { + if c.Len == i { + perms[pp] = int32(j) + pp++ + } + } + } + + var vec int32 + for _, c := range codes { + bases[c.Len+1]++ + } + for i := 1; i < len(bases); i++ { + bases[i] += bases[i-1] + } + for i := minLen; i <= maxLen; i++ { + vec += bases[i+1] - bases[i] + limits[i] = vec - 1 + vec <<= 1 + } + for i := minLen + 1; i <= maxLen; i++ { + bases[i] = ((limits[i-1] + 1) << 1) - bases[i] + } + } + + // getSymbol is the GET_MTF_VAL macro from the C code. + getSymbol := func(c prefix.PrefixCode) (uint32, int) { + v := internal.ReverseUint32(c.Val) + n := c.Len + + zn := minLen + if zn > n { + return 0, statusNeedBits + } + zvec := int32(v >> (32 - zn)) + v <<= zn + for { + if zn > maxLen { + return 0, statusMaxBits + } + if zvec <= limits[zn] { + break + } + zn++ + if zn > n { + return 0, statusNeedBits + } + zvec = (zvec << 1) | int32(v>>31) + v <<= 1 + } + if zvec-bases[zn] < 0 || zvec-bases[zn] >= maxNumSyms { + return 0, statusInvalid + } + return uint32(perms[zvec-bases[zn]]), statusOkay + } + + // Step 1: Create the prefix trees using the C algorithm. + createTables(codes) + + // Step 2: Starting with the shortest bit pattern, explore the whole tree. + // If tree is under-subscribed, the worst-case runtime is O(1< 0 { + codes = append(codes, c) + } + } + return codes +} diff --git a/vendor/github.com/dsnet/compress/bzip2/reader.go b/vendor/github.com/dsnet/compress/bzip2/reader.go new file mode 100644 index 00000000..86d3f718 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/reader.go @@ -0,0 +1,274 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Reader struct { + InputOffset int64 // Total number of bytes read from underlying io.Reader + OutputOffset int64 // Total number of bytes emitted from Read + + rd prefixReader + err error + level int // The current compression level + rdHdrFtr int // Number of times we read the stream header and footer + blkCRC uint32 // CRC-32 IEEE of each block (as stored) + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + mtf moveToFront + bwt burrowsWheelerTransform + rle runLengthEncoding + + // These fields are allocated with Reader and re-used later. + treeSels []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Decoder + syms []uint16 + + fuzzReader // Exported functionality when fuzz testing +} + +type ReaderConfig struct { + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewReader(r io.Reader, conf *ReaderConfig) (*Reader, error) { + zr := new(Reader) + zr.Reset(r) + return zr, nil +} + +func (zr *Reader) Reset(r io.Reader) error { + *zr = Reader{ + rd: zr.rd, + + mtf: zr.mtf, + bwt: zr.bwt, + rle: zr.rle, + + treeSels: zr.treeSels, + trees1D: zr.trees1D, + syms: zr.syms, + } + zr.rd.Init(r) + return nil +} + +func (zr *Reader) Read(buf []byte) (int, error) { + for { + cnt, err := zr.rle.Read(buf) + if err != rleDone && zr.err == nil { + zr.err = err + } + if cnt > 0 { + zr.crc.update(buf[:cnt]) + zr.OutputOffset += int64(cnt) + return cnt, nil + } + if zr.err != nil || len(buf) == 0 { + return 0, zr.err + } + + // Read the next chunk. + zr.rd.Offset = zr.InputOffset + func() { + defer errors.Recover(&zr.err) + if zr.rdHdrFtr%2 == 0 { + // Check if we are already at EOF. + if err := zr.rd.PullBits(1); err != nil { + if err == io.ErrUnexpectedEOF && zr.rdHdrFtr > 0 { + err = io.EOF // EOF is okay if we read at least one stream + } + errors.Panic(err) + } + + // Read stream header. + if zr.rd.ReadBitsBE64(16) != hdrMagic { + panicf(errors.Corrupted, "invalid stream magic") + } + if ver := zr.rd.ReadBitsBE64(8); ver != 'h' { + if ver == '0' { + panicf(errors.Deprecated, "bzip1 format is not supported") + } + panicf(errors.Corrupted, "invalid version: %q", ver) + } + lvl := int(zr.rd.ReadBitsBE64(8)) - '0' + if lvl < BestSpeed || lvl > BestCompression { + panicf(errors.Corrupted, "invalid block size: %d", lvl*blockSize) + } + zr.level = lvl + zr.rdHdrFtr++ + } else { + // Check and update the CRC. + if internal.GoFuzz { + zr.updateChecksum(-1, zr.crc.val) // Update with value + zr.blkCRC = zr.crc.val // Suppress CRC failures + } + if zr.blkCRC != zr.crc.val { + panicf(errors.Corrupted, "mismatching block checksum") + } + zr.endCRC = (zr.endCRC<<1 | zr.endCRC>>31) ^ zr.blkCRC + } + buf := zr.decodeBlock() + zr.rle.Init(buf) + }() + if zr.InputOffset, err = zr.rd.Flush(); zr.err == nil { + zr.err = err + } + if zr.err != nil { + zr.err = errWrap(zr.err, errors.Corrupted) + return 0, zr.err + } + } +} + +func (zr *Reader) Close() error { + if zr.err == io.EOF || zr.err == errClosed { + zr.rle.Init(nil) // Make sure future reads fail + zr.err = errClosed + return nil + } + return zr.err // Return the persistent error +} + +func (zr *Reader) decodeBlock() []byte { + if magic := zr.rd.ReadBitsBE64(48); magic != blkMagic { + if magic == endMagic { + endCRC := uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, zr.endCRC) + endCRC = zr.endCRC // Suppress CRC failures + } + if zr.endCRC != endCRC { + panicf(errors.Corrupted, "mismatching stream checksum") + } + zr.endCRC = 0 + zr.rd.ReadPads() + zr.rdHdrFtr++ + return nil + } + panicf(errors.Corrupted, "invalid block or footer magic") + } + + zr.crc.val = 0 + zr.blkCRC = uint32(zr.rd.ReadBitsBE64(32)) + if internal.GoFuzz { + zr.updateChecksum(zr.rd.BitsRead()-32, 0) // Record offset only + } + if zr.rd.ReadBitsBE64(1) != 0 { + panicf(errors.Deprecated, "block randomization is not supported") + } + + // Read BWT related fields. + ptr := int(zr.rd.ReadBitsBE64(24)) // BWT origin pointer + + // Read MTF related fields. + var dictArr [256]uint8 + dict := dictArr[:0] + bmapHi := uint16(zr.rd.ReadBits(16)) + for i := 0; i < 256; i, bmapHi = i+16, bmapHi>>1 { + if bmapHi&1 > 0 { + bmapLo := uint16(zr.rd.ReadBits(16)) + for j := 0; j < 16; j, bmapLo = j+1, bmapLo>>1 { + if bmapLo&1 > 0 { + dict = append(dict, uint8(i+j)) + } + } + } + } + + // Step 1: Prefix encoding. + syms := zr.decodePrefix(len(dict)) + + // Step 2: Move-to-front transform and run-length encoding. + zr.mtf.Init(dict, zr.level*blockSize) + buf := zr.mtf.Decode(syms) + + // Step 3: Burrows-Wheeler transformation. + if ptr >= len(buf) { + panicf(errors.Corrupted, "origin pointer (0x%06x) exceeds block size: %d", ptr, len(buf)) + } + zr.bwt.Decode(buf, ptr) + + return buf +} + +func (zr *Reader) decodePrefix(numSyms int) (syms []uint16) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOF symbols + if numSyms < 3 { + panicf(errors.Corrupted, "not enough prefix symbols: %d", numSyms) + } + + // Read information about the trees and tree selectors. + var mtf internal.MoveToFront + numTrees := int(zr.rd.ReadBitsBE64(3)) + if numTrees < minNumTrees || numTrees > maxNumTrees { + panicf(errors.Corrupted, "invalid number of prefix trees: %d", numTrees) + } + numSels := int(zr.rd.ReadBitsBE64(15)) + if cap(zr.treeSels) < numSels { + zr.treeSels = make([]uint8, numSels) + } + treeSels := zr.treeSels[:numSels] + for i := range treeSels { + sym, ok := zr.rd.TryReadSymbol(&decSel) + if !ok { + sym = zr.rd.ReadSymbol(&decSel) + } + if int(sym) >= numTrees { + panicf(errors.Corrupted, "invalid prefix tree selector: %d", sym) + } + treeSels[i] = uint8(sym) + } + mtf.Decode(treeSels) + zr.treeSels = treeSels + + // Initialize prefix codes. + for i := range zr.codes2D[:numTrees] { + zr.codes1D[i] = zr.codes2D[i][:numSyms] + } + zr.rd.ReadPrefixCodes(zr.codes1D[:numTrees], zr.trees1D[:numTrees]) + + // Read prefix encoded symbols of compressed data. + var tree *prefix.Decoder + var blkLen, selIdx int + syms = zr.syms[:0] + for { + if blkLen == 0 { + blkLen = numBlockSyms + if selIdx >= len(treeSels) { + panicf(errors.Corrupted, "not enough prefix tree selectors") + } + tree = &zr.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + sym, ok := zr.rd.TryReadSymbol(tree) + if !ok { + sym = zr.rd.ReadSymbol(tree) + } + + if int(sym) == numSyms-1 { + break // EOF marker + } + if int(sym) >= numSyms { + panicf(errors.Corrupted, "invalid prefix symbol: %d", sym) + } + if len(syms) >= zr.level*blockSize { + panicf(errors.Corrupted, "number of prefix symbols exceeds block size") + } + syms = append(syms, uint16(sym)) + } + zr.syms = syms + return syms +} diff --git a/vendor/github.com/dsnet/compress/bzip2/rle1.go b/vendor/github.com/dsnet/compress/bzip2/rle1.go new file mode 100644 index 00000000..1d789f65 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/rle1.go @@ -0,0 +1,101 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import "github.com/dsnet/compress/internal/errors" + +// rleDone is a special "error" to indicate that the RLE stage is done. +var rleDone = errorf(errors.Unknown, "RLE1 stage is completed") + +// runLengthEncoding implements the first RLE stage of bzip2. Every sequence +// of 4..255 duplicated bytes is replaced by only the first 4 bytes, and a +// single byte representing the repeat length. Similar to the C bzip2 +// implementation, the encoder will always terminate repeat sequences with a +// count (even if it is the end of the buffer), and it will also never produce +// run lengths of 256..259. The decoder can handle the latter case. +// +// For example, if the input was: +// input: "AAAAAAABBBBCCCD" +// +// Then the output will be: +// output: "AAAA\x03BBBB\x00CCCD" +type runLengthEncoding struct { + buf []byte + idx int + lastVal byte + lastCnt int +} + +func (rle *runLengthEncoding) Init(buf []byte) { + *rle = runLengthEncoding{buf: buf} +} + +func (rle *runLengthEncoding) Write(buf []byte) (int, error) { + for i, b := range buf { + if rle.lastVal != b { + rle.lastCnt = 0 + } + rle.lastCnt++ + switch { + case rle.lastCnt < 4: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + case rle.lastCnt == 4: + if rle.idx+1 >= len(rle.buf) { + return i, rleDone + } + rle.buf[rle.idx] = b + rle.idx++ + rle.buf[rle.idx] = 0 + rle.idx++ + case rle.lastCnt < 256: + rle.buf[rle.idx-1]++ + default: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + rle.lastCnt = 1 + rle.buf[rle.idx] = b + rle.idx++ + } + rle.lastVal = b + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Read(buf []byte) (int, error) { + for i := range buf { + switch { + case rle.lastCnt == -4: + if rle.idx >= len(rle.buf) { + return i, errorf(errors.Corrupted, "missing terminating run-length repeater") + } + rle.lastCnt = int(rle.buf[rle.idx]) + rle.idx++ + if rle.lastCnt > 0 { + break // Break the switch + } + fallthrough // Count was zero, continue the work + case rle.lastCnt <= 0: + if rle.idx >= len(rle.buf) { + return i, rleDone + } + b := rle.buf[rle.idx] + rle.idx++ + if b != rle.lastVal { + rle.lastCnt = 0 + rle.lastVal = b + } + } + buf[i] = rle.lastVal + rle.lastCnt-- + } + return len(buf), nil +} + +func (rle *runLengthEncoding) Bytes() []byte { return rle.buf[:rle.idx] } diff --git a/vendor/github.com/dsnet/compress/bzip2/writer.go b/vendor/github.com/dsnet/compress/bzip2/writer.go new file mode 100644 index 00000000..5c1a4c66 --- /dev/null +++ b/vendor/github.com/dsnet/compress/bzip2/writer.go @@ -0,0 +1,307 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package bzip2 + +import ( + "io" + + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" + "github.com/dsnet/compress/internal/prefix" +) + +type Writer struct { + InputOffset int64 // Total number of bytes issued to Write + OutputOffset int64 // Total number of bytes written to underlying io.Writer + + wr prefixWriter + err error + level int // The current compression level + wrHdr bool // Have we written the stream header? + blkCRC uint32 // CRC-32 IEEE of each block + endCRC uint32 // Checksum of all blocks using bzip2's custom method + + crc crc + rle runLengthEncoding + bwt burrowsWheelerTransform + mtf moveToFront + + // These fields are allocated with Writer and re-used later. + buf []byte + treeSels []uint8 + treeSelsMTF []uint8 + codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode + codes1D [maxNumTrees]prefix.PrefixCodes + trees1D [maxNumTrees]prefix.Encoder +} + +type WriterConfig struct { + Level int + + _ struct{} // Blank field to prevent unkeyed struct literals +} + +func NewWriter(w io.Writer, conf *WriterConfig) (*Writer, error) { + var lvl int + if conf != nil { + lvl = conf.Level + } + if lvl == 0 { + lvl = DefaultCompression + } + if lvl < BestSpeed || lvl > BestCompression { + return nil, errorf(errors.Invalid, "compression level: %d", lvl) + } + zw := new(Writer) + zw.level = lvl + zw.Reset(w) + return zw, nil +} + +func (zw *Writer) Reset(w io.Writer) error { + *zw = Writer{ + wr: zw.wr, + level: zw.level, + + rle: zw.rle, + bwt: zw.bwt, + mtf: zw.mtf, + + buf: zw.buf, + treeSels: zw.treeSels, + treeSelsMTF: zw.treeSelsMTF, + trees1D: zw.trees1D, + } + zw.wr.Init(w) + if len(zw.buf) != zw.level*blockSize { + zw.buf = make([]byte, zw.level*blockSize) + } + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Write(buf []byte) (int, error) { + if zw.err != nil { + return 0, zw.err + } + + cnt := len(buf) + for { + wrCnt, err := zw.rle.Write(buf) + if err != rleDone && zw.err == nil { + zw.err = err + } + zw.crc.update(buf[:wrCnt]) + buf = buf[wrCnt:] + if len(buf) == 0 { + zw.InputOffset += int64(cnt) + return cnt, nil + } + if zw.err = zw.flush(); zw.err != nil { + return 0, zw.err + } + } +} + +func (zw *Writer) flush() error { + vals := zw.rle.Bytes() + if len(vals) == 0 { + return nil + } + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.encodeBlock(vals) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + zw.endCRC = (zw.endCRC<<1 | zw.endCRC>>31) ^ zw.blkCRC + zw.blkCRC = 0 + zw.rle.Init(zw.buf) + return nil +} + +func (zw *Writer) Close() error { + if zw.err == errClosed { + return nil + } + + // Flush RLE buffer if there is left-over data. + if zw.err = zw.flush(); zw.err != nil { + return zw.err + } + + // Write stream footer. + zw.wr.Offset = zw.OutputOffset + func() { + defer errors.Recover(&zw.err) + if !zw.wrHdr { + // Write stream header. + zw.wr.WriteBitsBE64(hdrMagic, 16) + zw.wr.WriteBitsBE64('h', 8) + zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8) + zw.wrHdr = true + } + zw.wr.WriteBitsBE64(endMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.endCRC), 32) + zw.wr.WritePads(0) + }() + var err error + if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil { + zw.err = err + } + if zw.err != nil { + zw.err = errWrap(zw.err, errors.Internal) + return zw.err + } + + zw.err = errClosed + return nil +} + +func (zw *Writer) encodeBlock(buf []byte) { + zw.blkCRC = zw.crc.val + zw.wr.WriteBitsBE64(blkMagic, 48) + zw.wr.WriteBitsBE64(uint64(zw.blkCRC), 32) + zw.wr.WriteBitsBE64(0, 1) + zw.crc.val = 0 + + // Step 1: Burrows-Wheeler transformation. + ptr := zw.bwt.Encode(buf) + zw.wr.WriteBitsBE64(uint64(ptr), 24) + + // Step 2: Move-to-front transform and run-length encoding. + var dictMap [256]bool + for _, c := range buf { + dictMap[c] = true + } + + var dictArr [256]uint8 + var bmapLo [16]uint16 + dict := dictArr[:0] + bmapHi := uint16(0) + for i, b := range dictMap { + if b { + c := uint8(i) + dict = append(dict, c) + bmapHi |= 1 << (c >> 4) + bmapLo[c>>4] |= 1 << (c & 0xf) + } + } + + zw.wr.WriteBits(uint(bmapHi), 16) + for _, m := range bmapLo { + if m > 0 { + zw.wr.WriteBits(uint(m), 16) + } + } + + zw.mtf.Init(dict, len(buf)) + syms := zw.mtf.Encode(buf) + + // Step 3: Prefix encoding. + zw.encodePrefix(syms, len(dict)) +} + +func (zw *Writer) encodePrefix(syms []uint16, numSyms int) { + numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOB symbols + if numSyms < 3 { + panicf(errors.Internal, "unable to encode EOB marker") + } + syms = append(syms, uint16(numSyms-1)) // EOB marker + + // Compute number of prefix trees needed. + numTrees := maxNumTrees + for i, lim := range []int{200, 600, 1200, 2400} { + if len(syms) < lim { + numTrees = minNumTrees + i + break + } + } + + // Compute number of block selectors. + numSels := (len(syms) + numBlockSyms - 1) / numBlockSyms + if cap(zw.treeSels) < numSels { + zw.treeSels = make([]uint8, numSels) + } + treeSels := zw.treeSels[:numSels] + for i := range treeSels { + treeSels[i] = uint8(i % numTrees) + } + + // Initialize prefix codes. + for i := range zw.codes2D[:numTrees] { + pc := zw.codes2D[i][:numSyms] + for j := range pc { + pc[j] = prefix.PrefixCode{Sym: uint32(j)} + } + zw.codes1D[i] = pc + } + + // First cut at assigning prefix trees to each group. + var codes prefix.PrefixCodes + var blkLen, selIdx int + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + codes = zw.codes2D[treeSels[selIdx]][:numSyms] + selIdx++ + } + blkLen-- + codes[sym].Cnt++ + } + + // TODO(dsnet): Use K-means to cluster groups to each prefix tree. + + // Generate lengths and prefixes based on symbol frequencies. + for i := range zw.trees1D[:numTrees] { + pc := prefix.PrefixCodes(zw.codes2D[i][:numSyms]) + pc.SortByCount() + if err := prefix.GenerateLengths(pc, maxPrefixBits); err != nil { + errors.Panic(err) + } + pc.SortBySymbol() + } + + // Write out information about the trees and tree selectors. + var mtf internal.MoveToFront + zw.wr.WriteBitsBE64(uint64(numTrees), 3) + zw.wr.WriteBitsBE64(uint64(numSels), 15) + zw.treeSelsMTF = append(zw.treeSelsMTF[:0], treeSels...) + mtf.Encode(zw.treeSelsMTF) + for _, sym := range zw.treeSelsMTF { + zw.wr.WriteSymbol(uint(sym), &encSel) + } + zw.wr.WritePrefixCodes(zw.codes1D[:numTrees], zw.trees1D[:numTrees]) + + // Write out prefix encoded symbols of compressed data. + var tree *prefix.Encoder + blkLen, selIdx = 0, 0 + for _, sym := range syms { + if blkLen == 0 { + blkLen = numBlockSyms + tree = &zw.trees1D[treeSels[selIdx]] + selIdx++ + } + blkLen-- + ok := zw.wr.TryWriteSymbol(uint(sym), tree) + if !ok { + zw.wr.WriteSymbol(uint(sym), tree) + } + } +} diff --git a/vendor/github.com/dsnet/compress/internal/common.go b/vendor/github.com/dsnet/compress/internal/common.go new file mode 100644 index 00000000..da4e7034 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/common.go @@ -0,0 +1,107 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package internal is a collection of common compression algorithms. +// +// For performance reasons, these packages lack strong error checking and +// require that the caller to ensure that strict invariants are kept. +package internal + +var ( + // IdentityLUT returns the input key itself. + IdentityLUT = func() (lut [256]byte) { + for i := range lut { + lut[i] = uint8(i) + } + return lut + }() + + // ReverseLUT returns the input key with its bits reversed. + ReverseLUT = func() (lut [256]byte) { + for i := range lut { + b := uint8(i) + b = (b&0xaa)>>1 | (b&0x55)<<1 + b = (b&0xcc)>>2 | (b&0x33)<<2 + b = (b&0xf0)>>4 | (b&0x0f)<<4 + lut[i] = b + } + return lut + }() +) + +// ReverseUint32 reverses all bits of v. +func ReverseUint32(v uint32) (x uint32) { + x |= uint32(ReverseLUT[byte(v>>0)]) << 24 + x |= uint32(ReverseLUT[byte(v>>8)]) << 16 + x |= uint32(ReverseLUT[byte(v>>16)]) << 8 + x |= uint32(ReverseLUT[byte(v>>24)]) << 0 + return x +} + +// ReverseUint32N reverses the lower n bits of v. +func ReverseUint32N(v uint32, n uint) (x uint32) { + return ReverseUint32(v << (32 - n)) +} + +// ReverseUint64 reverses all bits of v. +func ReverseUint64(v uint64) (x uint64) { + x |= uint64(ReverseLUT[byte(v>>0)]) << 56 + x |= uint64(ReverseLUT[byte(v>>8)]) << 48 + x |= uint64(ReverseLUT[byte(v>>16)]) << 40 + x |= uint64(ReverseLUT[byte(v>>24)]) << 32 + x |= uint64(ReverseLUT[byte(v>>32)]) << 24 + x |= uint64(ReverseLUT[byte(v>>40)]) << 16 + x |= uint64(ReverseLUT[byte(v>>48)]) << 8 + x |= uint64(ReverseLUT[byte(v>>56)]) << 0 + return x +} + +// ReverseUint64N reverses the lower n bits of v. +func ReverseUint64N(v uint64, n uint) (x uint64) { + return ReverseUint64(v << (64 - n)) +} + +// MoveToFront is a data structure that allows for more efficient move-to-front +// transformations. This specific implementation assumes that the alphabet is +// densely packed within 0..255. +type MoveToFront struct { + dict [256]uint8 // Mapping from indexes to values + tail int // Number of tail bytes that are already ordered +} + +func (m *MoveToFront) Encode(vals []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, val := range vals { + var idx uint8 // Reverse lookup idx in dict + for di, dv := range m.dict { + if dv == val { + idx = uint8(di) + break + } + } + vals[i] = idx + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} + +func (m *MoveToFront) Decode(idxs []uint8) { + copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity + + var max int + for i, idx := range idxs { + val := m.dict[idx] // Forward lookup val in dict + idxs[i] = val + + max |= int(idx) + copy(m.dict[1:], m.dict[:idx]) + m.dict[0] = val + } + m.tail = 256 - max - 1 +} diff --git a/vendor/github.com/dsnet/compress/internal/debug.go b/vendor/github.com/dsnet/compress/internal/debug.go new file mode 100644 index 00000000..01df1f89 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/debug.go @@ -0,0 +1,12 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug,!gofuzz + +package internal + +const ( + Debug = true + GoFuzz = false +) diff --git a/vendor/github.com/dsnet/compress/internal/errors/errors.go b/vendor/github.com/dsnet/compress/internal/errors/errors.go new file mode 100644 index 00000000..c631afbd --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/errors/errors.go @@ -0,0 +1,120 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package errors implements functions to manipulate compression errors. +// +// In idiomatic Go, it is an anti-pattern to use panics as a form of error +// reporting in the API. Instead, the expected way to transmit errors is by +// returning an error value. Unfortunately, the checking of "err != nil" in +// tight loops commonly found in compression causes non-negligible performance +// degradation. While this may not be idiomatic, the internal packages of this +// repository rely on panics as a normal means to convey errors. In order to +// ensure that these panics do not leak across the public API, the public +// packages must recover from these panics and present an error value. +// +// The Panic and Recover functions in this package provide a safe way to +// recover from errors only generated from within this repository. +// +// Example usage: +// func Foo() (err error) { +// defer errors.Recover(&err) +// +// if rand.Intn(2) == 0 { +// // Unexpected panics will not be caught by Recover. +// io.Closer(nil).Close() +// } else { +// // Errors thrown by Panic will be caught by Recover. +// errors.Panic(errors.New("whoopsie")) +// } +// } +// +package errors + +import "strings" + +const ( + // Unknown indicates that there is no classification for this error. + Unknown = iota + + // Internal indicates that this error is due to an internal bug. + // Users should file a issue report if this type of error is encountered. + Internal + + // Invalid indicates that this error is due to the user misusing the API + // and is indicative of a bug on the user's part. + Invalid + + // Deprecated indicates the use of a deprecated and unsupported feature. + Deprecated + + // Corrupted indicates that the input stream is corrupted. + Corrupted + + // Closed indicates that the handlers are closed. + Closed +) + +var codeMap = map[int]string{ + Unknown: "unknown error", + Internal: "internal error", + Invalid: "invalid argument", + Deprecated: "deprecated format", + Corrupted: "corrupted input", + Closed: "closed handler", +} + +type Error struct { + Code int // The error type + Pkg string // Name of the package where the error originated + Msg string // Descriptive message about the error (optional) +} + +func (e Error) Error() string { + var ss []string + for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} { + if s != "" { + ss = append(ss, s) + } + } + return strings.Join(ss, ": ") +} + +func (e Error) CompressError() {} +func (e Error) IsInternal() bool { return e.Code == Internal } +func (e Error) IsInvalid() bool { return e.Code == Invalid } +func (e Error) IsDeprecated() bool { return e.Code == Deprecated } +func (e Error) IsCorrupted() bool { return e.Code == Corrupted } +func (e Error) IsClosed() bool { return e.Code == Closed } + +func IsInternal(err error) bool { return isCode(err, Internal) } +func IsInvalid(err error) bool { return isCode(err, Invalid) } +func IsDeprecated(err error) bool { return isCode(err, Deprecated) } +func IsCorrupted(err error) bool { return isCode(err, Corrupted) } +func IsClosed(err error) bool { return isCode(err, Closed) } + +func isCode(err error, code int) bool { + if cerr, ok := err.(Error); ok && cerr.Code == code { + return true + } + return false +} + +// errWrap is used by Panic and Recover to ensure that only errors raised by +// Panic are recovered by Recover. +type errWrap struct{ e *error } + +func Recover(err *error) { + switch ex := recover().(type) { + case nil: + // Do nothing. + case errWrap: + *err = *ex.e + default: + panic(ex) + } +} + +func Panic(err error) { + panic(errWrap{&err}) +} diff --git a/vendor/github.com/dsnet/compress/internal/gofuzz.go b/vendor/github.com/dsnet/compress/internal/gofuzz.go new file mode 100644 index 00000000..5035c9d6 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/gofuzz.go @@ -0,0 +1,12 @@ +// Copyright 2016, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build gofuzz + +package internal + +const ( + Debug = true + GoFuzz = true +) diff --git a/vendor/github.com/dsnet/compress/internal/prefix/debug.go b/vendor/github.com/dsnet/compress/internal/prefix/debug.go new file mode 100644 index 00000000..04fce70b --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/debug.go @@ -0,0 +1,159 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug + +package prefix + +import ( + "fmt" + "math" + "strings" +) + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func lenBase2(n uint) int { + return int(math.Ceil(math.Log2(float64(n + 1)))) +} +func padBase2(v, n uint, m int) string { + s := fmt.Sprintf("%b", 1< 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func lenBase10(n int) int { + return int(math.Ceil(math.Log10(float64(n + 1)))) +} +func padBase10(n, m int) string { + s := fmt.Sprintf("%d", n) + if pad := m - len(s); pad > 0 { + return strings.Repeat(" ", pad) + s + } + return s +} + +func (rc RangeCodes) String() string { + var maxLen, maxBase int + for _, c := range rc { + maxLen = max(maxLen, int(c.Len)) + maxBase = max(maxBase, int(c.Base)) + } + + var ss []string + ss = append(ss, "{") + for i, c := range rc { + base := padBase10(int(c.Base), lenBase10(maxBase)) + if c.Len > 0 { + base += fmt.Sprintf("-%d", c.End()-1) + } + ss = append(ss, fmt.Sprintf("\t%s: {len: %s, range: %s},", + padBase10(int(i), lenBase10(len(rc)-1)), + padBase10(int(c.Len), lenBase10(maxLen)), + base, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pc PrefixCodes) String() string { + var maxSym, maxLen, maxCnt int + for _, c := range pc { + maxSym = max(maxSym, int(c.Sym)) + maxLen = max(maxLen, int(c.Len)) + maxCnt = max(maxCnt, int(c.Cnt)) + } + + var ss []string + ss = append(ss, "{") + for _, c := range pc { + var cntStr string + if maxCnt > 0 { + cnt := int(32*float32(c.Cnt)/float32(maxCnt) + 0.5) + cntStr = fmt.Sprintf("%s |%s", + padBase10(int(c.Cnt), lenBase10(maxCnt)), + strings.Repeat("#", cnt), + ) + } + ss = append(ss, fmt.Sprintf("\t%s: %s, %s", + padBase10(int(c.Sym), lenBase10(maxSym)), + padBase2(uint(c.Val), uint(c.Len), maxLen), + cntStr, + )) + } + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pd Decoder) String() string { + var ss []string + ss = append(ss, "{") + if len(pd.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pd.chunks { + label := "sym" + if uint(c&countMask) > uint(pd.chunkBits) { + label = "idx" + } + ss = append(ss, fmt.Sprintf("\t\t%s: {%s: %s, len: %s}", + padBase2(uint(i), uint(pd.chunkBits), int(pd.chunkBits)), + label, padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + + for j, links := range pd.links { + ss = append(ss, fmt.Sprintf("\tlinks[%d]: {", j)) + linkBits := lenBase2(uint(pd.linkMask)) + for i, c := range links { + ss = append(ss, fmt.Sprintf("\t\t%s: {sym: %s, len: %s},", + padBase2(uint(i), uint(linkBits), int(linkBits)), + padBase10(int(c>>countBits), 3), + padBase10(int(c&countMask), 2), + )) + } + ss = append(ss, "\t},") + } + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pd.chunkMask)) + ss = append(ss, fmt.Sprintf("\tlinkMask: %b,", pd.linkMask)) + ss = append(ss, fmt.Sprintf("\tchunkBits: %d,", pd.chunkBits)) + ss = append(ss, fmt.Sprintf("\tMinBits: %d,", pd.MinBits)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pd.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} + +func (pe Encoder) String() string { + var maxLen int + for _, c := range pe.chunks { + maxLen = max(maxLen, int(c&countMask)) + } + + var ss []string + ss = append(ss, "{") + if len(pe.chunks) > 0 { + ss = append(ss, "\tchunks: {") + for i, c := range pe.chunks { + ss = append(ss, fmt.Sprintf("\t\t%s: %s,", + padBase10(i, 3), + padBase2(uint(c>>countBits), uint(c&countMask), maxLen), + )) + } + ss = append(ss, "\t},") + } + ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pe.chunkMask)) + ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pe.NumSyms)) + ss = append(ss, "}") + return strings.Join(ss, "\n") +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/decoder.go b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go new file mode 100644 index 00000000..a9bc2dcb --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/decoder.go @@ -0,0 +1,136 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +// The algorithm used to decode variable length codes is based on the lookup +// method in zlib. If the code is less-than-or-equal to maxChunkBits, +// then the symbol can be decoded using a single lookup into the chunks table. +// Otherwise, the links table will be used for a second level lookup. +// +// The chunks slice is keyed by the contents of the bit buffer ANDed with +// the chunkMask to avoid a out-of-bounds lookup. The value of chunks is a tuple +// that is decoded as follow: +// +// var length = chunks[bitBuffer&chunkMask] & countMask +// var symbol = chunks[bitBuffer&chunkMask] >> countBits +// +// If the decoded length is larger than chunkBits, then an overflow link table +// must be used for further decoding. In this case, the symbol is actually the +// index into the links tables. The second-level links table returned is +// processed in the same way as the chunks table. +// +// if length > chunkBits { +// var index = symbol // Previous symbol is index into links tables +// length = links[index][bitBuffer>>chunkBits & linkMask] & countMask +// symbol = links[index][bitBuffer>>chunkBits & linkMask] >> countBits +// } +// +// See the following: +// http://www.gzip.org/algorithm.txt + +type Decoder struct { + chunks []uint32 // First-level lookup map + links [][]uint32 // Second-level lookup map + chunkMask uint32 // Mask the length of the chunks table + linkMask uint32 // Mask the length of the link table + chunkBits uint32 // Bit-length of the chunks table + + MinBits uint32 // The minimum number of bits to safely make progress + NumSyms uint32 // Number of symbols +} + +// Init initializes Decoder according to the codes provided. +func (pd *Decoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pd = Decoder{chunks: pd.chunks[:0], links: pd.links[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pd.chunks = append(pd.chunks[:0], codes[0].Sym< c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + } + + // Allocate chunks table as needed. + const maxChunkBits = 9 // This can be tuned for better performance + pd.NumSyms = uint32(len(codes)) + pd.MinBits = minBits + pd.chunkBits = maxBits + if pd.chunkBits > maxChunkBits { + pd.chunkBits = maxChunkBits + } + numChunks := 1 << pd.chunkBits + pd.chunks = allocUint32s(pd.chunks, numChunks) + pd.chunkMask = uint32(numChunks - 1) + + // Allocate links tables as needed. + pd.links = pd.links[:0] + pd.linkMask = 0 + if pd.chunkBits < maxBits { + numLinks := 1 << (maxBits - pd.chunkBits) + pd.linkMask = uint32(numLinks - 1) + + var linkIdx uint32 + for i := range pd.chunks { + pd.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + for _, c := range codes { + if c.Len > pd.chunkBits && pd.chunks[c.Val&pd.chunkMask] == 0 { + pd.chunks[c.Val&pd.chunkMask] = (linkIdx << countBits) | (pd.chunkBits + 1) + linkIdx++ + } + } + + pd.links = extendSliceUint32s(pd.links, int(linkIdx)) + linksFlat := allocUint32s(pd.links[0], numLinks*int(linkIdx)) + for i, j := 0, 0; i < len(pd.links); i, j = i+1, j+numLinks { + pd.links[i] = linksFlat[j : j+numLinks] + } + } + + // Fill out chunks and links tables with values. + for _, c := range codes { + chunk := c.Sym<> countBits + links := pd.links[linkIdx] + skip := 1 << uint(c.Len-pd.chunkBits) + for j := int(c.Val >> pd.chunkBits); j < len(links); j += skip { + links[j] = chunk + } + } + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/encoder.go b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go new file mode 100644 index 00000000..4424a011 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/encoder.go @@ -0,0 +1,66 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "sort" + + "github.com/dsnet/compress/internal" +) + +type Encoder struct { + chunks []uint32 // First-level lookup map + chunkMask uint32 // Mask the length of the chunks table + + NumSyms uint32 // Number of symbols +} + +// Init initializes Encoder according to the codes provided. +func (pe *Encoder) Init(codes PrefixCodes) { + // Handle special case trees. + if len(codes) <= 1 { + switch { + case len(codes) == 0: // Empty tree (should error if used later) + *pe = Encoder{chunks: pe.chunks[:0], NumSyms: 0} + case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero) + pe.chunks = append(pe.chunks[:0], codes[0].Val< 0; n >>= 1 { + numChunks <<= 1 + } + pe.NumSyms = uint32(len(codes)) + +retry: + // Allocate and reset chunks. + pe.chunks = allocUint32s(pe.chunks, numChunks) + pe.chunkMask = uint32(numChunks - 1) + for i := range pe.chunks { + pe.chunks[i] = 0 // Logic below relies on zero value as uninitialized + } + + // Insert each symbol, checking that there are no conflicts. + for _, c := range codes { + if pe.chunks[c.Sym&pe.chunkMask] > 0 { + // Collision found our "hash" table, so grow and try again. + numChunks <<= 1 + goto retry + } + pe.chunks[c.Sym&pe.chunkMask] = c.Val<> uint(c.Len) + } + return sum == 0 || len(pc) == 0 +} + +// checkPrefixes reports whether all codes have non-overlapping prefixes. +func (pc PrefixCodes) checkPrefixes() bool { + for i, c1 := range pc { + for j, c2 := range pc { + mask := uint32(1)< 0 { + c.Val = internal.ReverseUint32N(c.Val, uint(c.Len)) + if vals[c.Len].Cnt > 0 && vals[c.Len].Val+1 != c.Val { + return false + } + vals[c.Len].Val = c.Val + vals[c.Len].Cnt++ + } + } + + // Rule 2. + var last PrefixCode + for _, v := range vals { + if v.Cnt > 0 { + curVal := v.Val - v.Cnt + 1 + if last.Cnt != 0 && last.Val >= curVal { + return false + } + last = v + } + } + return true +} + +// GenerateLengths assigns non-zero bit-lengths to all codes. Codes with high +// frequency counts will be assigned shorter codes to reduce bit entropy. +// This function is used primarily by compressors. +// +// The input codes must have the Cnt field populated, be sorted by count. +// Even if a code has a count of 0, a non-zero bit-length will be assigned. +// +// The result will have the Len field populated. The algorithm used guarantees +// that Len <= maxBits and that it is a complete prefix tree. The resulting +// codes will remain sorted by count. +func GenerateLengths(codes PrefixCodes, maxBits uint) error { + if len(codes) <= 1 { + if len(codes) == 1 { + codes[0].Len = 0 + } + return nil + } + + // Verify that the codes are in ascending order by count. + cntLast := codes[0].Cnt + for _, c := range codes[1:] { + if c.Cnt < cntLast { + return errorf(errors.Invalid, "non-monotonically increasing symbol counts") + } + cntLast = c.Cnt + } + + // Construct a Huffman tree used to generate the bit-lengths. + // + // The Huffman tree is a binary tree where each symbol lies as a leaf node + // on this tree. The length of the prefix code to assign is the depth of + // that leaf from the root. The Huffman algorithm, which runs in O(n), + // is used to generate the tree. It assumes that codes are sorted in + // increasing order of frequency. + // + // The algorithm is as follows: + // 1. Start with two queues, F and Q, where F contains all of the starting + // symbols sorted such that symbols with lowest counts come first. + // 2. While len(F)+len(Q) > 1: + // 2a. Dequeue the node from F or Q that has the lowest weight as N0. + // 2b. Dequeue the node from F or Q that has the lowest weight as N1. + // 2c. Create a new node N that has N0 and N1 as its children. + // 2d. Enqueue N into the back of Q. + // 3. The tree's root node is Q[0]. + type node struct { + cnt uint32 + + // n0 or c0 represent the left child of this node. + // Since Go does not have unions, only one of these will be set. + // Similarly, n1 or c1 represent the right child of this node. + // + // If n0 or n1 is set, then it represents a "pointer" to another + // node in the Huffman tree. Since Go's pointer analysis cannot reason + // that these node pointers do not escape (golang.org/issue/13493), + // we use an index to a node in the nodes slice as a pseudo-pointer. + // + // If c0 or c1 is set, then it represents a leaf "node" in the + // Huffman tree. The leaves are the PrefixCode values themselves. + n0, n1 int // Index to child nodes + c0, c1 *PrefixCode + } + var nodeIdx int + var nodeArr [1024]node // Large enough to handle most cases on the stack + nodes := nodeArr[:] + if len(nodes) < len(codes) { + nodes = make([]node, len(codes)) // Number of internal nodes < number of leaves + } + freqs, queue := codes, nodes[:0] + for len(freqs)+len(queue) > 1 { + // These are the two smallest nodes at the front of freqs and queue. + var n node + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c0, freqs = &freqs[0], freqs[1:] + n.cnt += n.c0.Cnt + } else { + n.cnt += queue[0].cnt + n.n0 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) { + n.c1, freqs = &freqs[0], freqs[1:] + n.cnt += n.c1.Cnt + } else { + n.cnt += queue[0].cnt + n.n1 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0] + nodeIdx++ + queue = queue[1:] + } + queue = append(queue, n) + } + rootIdx := nodeIdx + + // Search the whole binary tree, noting when we hit each leaf node. + // We do not care about the exact Huffman tree structure, but rather we only + // care about depth of each of the leaf nodes. That is, the depth determines + // how long each symbol is in bits. + // + // Since the number of leaves is n, there is at most n internal nodes. + // Thus, this algorithm runs in O(n). + var fixBits bool + var explore func(int, uint) + explore = func(rootIdx int, level uint) { + root := &nodes[rootIdx] + + // Explore left branch. + if root.c0 == nil { + explore(root.n0, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c0.Len = uint32(level) + } + + // Explore right branch. + if root.c1 == nil { + explore(root.n1, level+1) + } else { + fixBits = fixBits || (level > maxBits) + root.c1.Len = uint32(level) + } + } + explore(rootIdx, 1) + + // Fix the bit-lengths if we violate the maxBits requirement. + if fixBits { + // Create histogram for number of symbols with each bit-length. + var symBitsArr [valueBits + 1]uint32 + symBits := symBitsArr[:] // symBits[nb] indicates number of symbols using nb bits + for _, c := range codes { + for int(c.Len) >= len(symBits) { + symBits = append(symBits, 0) + } + symBits[c.Len]++ + } + + // Fudge the tree such that the largest bit-length is <= maxBits. + // This is accomplish by effectively doing a tree rotation. That is, we + // increase the bit-length of some higher frequency code, so that the + // bit-lengths of lower frequency codes can be decreased. + // + // Visually, this looks like the following transform: + // + // Level Before After + // __ ___ + // / \ / \ + // n-1 X / \ /\ /\ + // n X /\ X X X X + // n+1 X X + // + var treeRotate func(uint) + treeRotate = func(nb uint) { + if symBits[nb-1] == 0 { + treeRotate(nb - 1) + } + symBits[nb-1] -= 1 // Push this node to the level below + symBits[nb] += 3 // This level gets one node from above, two from below + symBits[nb+1] -= 2 // Push two nodes to the level above + } + for i := uint(len(symBits)) - 1; i > maxBits; i-- { + for symBits[i] > 0 { + treeRotate(i - 1) + } + } + + // Assign bit-lengths to each code. Since codes is sorted in increasing + // order of frequency, that means that the most frequently used symbols + // should have the shortest bit-lengths. Thus, we copy symbols to codes + // from the back of codes first. + cs := codes + for nb, cnt := range symBits { + if cnt > 0 { + pos := len(cs) - int(cnt) + cs2 := cs[pos:] + for i := range cs2 { + cs2[i].Len = uint32(nb) + } + cs = cs[:pos] + } + } + if len(cs) != 0 { + panic("not all codes were used up") + } + } + + if internal.Debug && !codes.checkLengths() { + panic("incomplete prefix tree detected") + } + return nil +} + +// GeneratePrefixes assigns a prefix value to all codes according to the +// bit-lengths. This function is used by both compressors and decompressors. +// +// The input codes must have the Sym and Len fields populated and be +// sorted by symbol. The bit-lengths of each code must be properly allocated, +// such that it forms a complete tree. +// +// The result will have the Val field populated and will produce a canonical +// prefix tree. The resulting codes will remain sorted by symbol. +func GeneratePrefixes(codes PrefixCodes) error { + if len(codes) <= 1 { + if len(codes) == 1 { + if codes[0].Len != 0 { + return errorf(errors.Invalid, "degenerate prefix tree with one node") + } + codes[0].Val = 0 + } + return nil + } + + // Compute basic statistics on the symbols. + var bitCnts [valueBits + 1]uint + c0 := codes[0] + bitCnts[c0.Len]++ + minBits, maxBits, symLast := c0.Len, c0.Len, c0.Sym + for _, c := range codes[1:] { + if c.Sym <= symLast { + return errorf(errors.Invalid, "non-unique or non-monotonically increasing symbols") + } + if minBits > c.Len { + minBits = c.Len + } + if maxBits < c.Len { + maxBits = c.Len + } + bitCnts[c.Len]++ // Histogram of bit counts + symLast = c.Sym // Keep track of last symbol + } + if minBits == 0 { + return errorf(errors.Invalid, "invalid prefix bit-length") + } + + // Compute the next code for a symbol of a given bit length. + var nextCodes [valueBits + 1]uint + var code uint + for i := minBits; i <= maxBits; i++ { + code <<= 1 + nextCodes[i] = code + code += bitCnts[i] + } + if code != 1<= n { + return s[:n] + } + return make([]uint32, n, n*3/2) +} + +func extendSliceUint32s(s [][]uint32, n int) [][]uint32 { + if cap(s) >= n { + return s[:n] + } + ss := make([][]uint32, n, n*3/2) + copy(ss, s[:cap(s)]) + return ss +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/range.go b/vendor/github.com/dsnet/compress/internal/prefix/range.go new file mode 100644 index 00000000..b7eddad5 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/range.go @@ -0,0 +1,93 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +type RangeCode struct { + Base uint32 // Starting base offset of the range + Len uint32 // Bit-length of a subsequent integer to add to base offset +} +type RangeCodes []RangeCode + +type RangeEncoder struct { + rcs RangeCodes + lut [1024]uint32 + minBase uint +} + +// End reports the non-inclusive ending range. +func (rc RangeCode) End() uint32 { return rc.Base + (1 << rc.Len) } + +// MakeRangeCodes creates a RangeCodes, where each region is assumed to be +// contiguously stacked, without any gaps, with bit-lengths taken from bits. +func MakeRangeCodes(minBase uint, bits []uint) (rc RangeCodes) { + for _, nb := range bits { + rc = append(rc, RangeCode{Base: uint32(minBase), Len: uint32(nb)}) + minBase += 1 << nb + } + return rc +} + +// Base reports the inclusive starting range for all ranges. +func (rcs RangeCodes) Base() uint32 { return rcs[0].Base } + +// End reports the non-inclusive ending range for all ranges. +func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() } + +// checkValid reports whether the RangeCodes is valid. In order to be valid, +// the following must hold true: +// rcs[i-1].Base <= rcs[i].Base +// rcs[i-1].End <= rcs[i].End +// rcs[i-1].End >= rcs[i].Base +// +// Practically speaking, each range must be increasing and must not have any +// gaps in between. It is okay for ranges to overlap. +func (rcs RangeCodes) checkValid() bool { + if len(rcs) == 0 { + return false + } + pre := rcs[0] + for _, cur := range rcs[1:] { + preBase, preEnd := pre.Base, pre.End() + curBase, curEnd := cur.Base, cur.End() + if preBase > curBase || preEnd > curEnd || preEnd < curBase { + return false + } + pre = cur + } + return true +} + +func (re *RangeEncoder) Init(rcs RangeCodes) { + if !rcs.checkValid() { + panic("invalid range codes") + } + *re = RangeEncoder{rcs: rcs, minBase: uint(rcs.Base())} + for sym, rc := range rcs { + base := int(rc.Base) - int(re.minBase) + end := int(rc.End()) - int(re.minBase) + if base >= len(re.lut) { + break + } + if end > len(re.lut) { + end = len(re.lut) + } + for i := base; i < end; i++ { + re.lut[i] = uint32(sym) + } + } +} + +func (re *RangeEncoder) Encode(offset uint) (sym uint) { + if idx := int(offset - re.minBase); idx < len(re.lut) { + return uint(re.lut[idx]) + } + sym = uint(re.lut[len(re.lut)-1]) +retry: + if int(sym) >= len(re.rcs) || re.rcs[sym].Base > uint32(offset) { + return sym - 1 + } + sym++ + goto retry // Avoid for-loop so that this function can be inlined +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/reader.go b/vendor/github.com/dsnet/compress/internal/prefix/reader.go new file mode 100644 index 00000000..e6252c95 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/reader.go @@ -0,0 +1,335 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "strings" + + "github.com/dsnet/compress" + "github.com/dsnet/compress/internal" + "github.com/dsnet/compress/internal/errors" +) + +// Reader implements a prefix decoder. If the input io.Reader satisfies the +// compress.ByteReader or compress.BufferedReader interface, then it also +// guarantees that it will never read more bytes than is necessary. +// +// For high performance, provide an io.Reader that satisfies the +// compress.BufferedReader interface. If the input does not satisfy either +// compress.ByteReader or compress.BufferedReader, then it will be internally +// wrapped with a bufio.Reader. +type Reader struct { + Offset int64 // Number of bytes read from the underlying io.Reader + + rd io.Reader + byteRd compress.ByteReader // Set if rd is a ByteReader + bufRd compress.BufferedReader // Set if rd is a BufferedReader + + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Do we treat input bytes as big endian? + + // These fields are only used if rd is a compress.BufferedReader. + bufPeek []byte // Buffer for the Peek data + discardBits int // Number of bits to discard from reader + fedBits uint // Number of bits fed in last call to PullBits + + // These fields are used to reduce allocations. + bb *buffer + br *bytesReader + sr *stringReader + bu *bufio.Reader +} + +// Init initializes the bit Reader to read from r. If bigEndian is true, then +// bits will be read starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will read starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pr *Reader) Init(r io.Reader, bigEndian bool) { + *pr = Reader{ + rd: r, + bigEndian: bigEndian, + + bb: pr.bb, + br: pr.br, + sr: pr.sr, + bu: pr.bu, + } + switch rr := r.(type) { + case *bytes.Buffer: + if pr.bb == nil { + pr.bb = new(buffer) + } + *pr.bb = buffer{Buffer: rr} + pr.bufRd = pr.bb + case *bytes.Reader: + if pr.br == nil { + pr.br = new(bytesReader) + } + *pr.br = bytesReader{Reader: rr} + pr.bufRd = pr.br + case *strings.Reader: + if pr.sr == nil { + pr.sr = new(stringReader) + } + *pr.sr = stringReader{Reader: rr} + pr.bufRd = pr.sr + case compress.BufferedReader: + pr.bufRd = rr + case compress.ByteReader: + pr.byteRd = rr + default: + if pr.bu == nil { + pr.bu = bufio.NewReader(nil) + } + pr.bu.Reset(r) + pr.rd, pr.bufRd = pr.bu, pr.bu + } +} + +// BitsRead reports the total number of bits emitted from any Read method. +func (pr *Reader) BitsRead() int64 { + offset := 8*pr.Offset - int64(pr.numBits) + if pr.bufRd != nil { + discardBits := pr.discardBits + int(pr.fedBits-pr.numBits) + offset = 8*pr.Offset + int64(discardBits) + } + return offset +} + +// IsBufferedReader reports whether the underlying io.Reader is also a +// compress.BufferedReader. +func (pr *Reader) IsBufferedReader() bool { + return pr.bufRd != nil +} + +// ReadPads reads 0-7 bits from the bit buffer to achieve byte-alignment. +func (pr *Reader) ReadPads() uint { + nb := pr.numBits % 8 + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// Read reads bytes into buf. +// The bit-ordering mode does not affect this method. +func (pr *Reader) Read(buf []byte) (cnt int, err error) { + if pr.numBits > 0 { + if pr.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + for cnt = 0; len(buf) > cnt && pr.numBits > 0; cnt++ { + if pr.bigEndian { + buf[cnt] = internal.ReverseLUT[byte(pr.bufBits)] + } else { + buf[cnt] = byte(pr.bufBits) + } + pr.bufBits >>= 8 + pr.numBits -= 8 + } + return cnt, nil + } + if _, err := pr.Flush(); err != nil { + return 0, err + } + cnt, err = pr.rd.Read(buf) + pr.Offset += int64(cnt) + return cnt, err +} + +// ReadOffset reads an offset value using the provided RangeCodes indexed by +// the symbol read. +func (pr *Reader) ReadOffset(pd *Decoder, rcs RangeCodes) uint { + rc := rcs[pr.ReadSymbol(pd)] + return uint(rc.Base) + pr.ReadBits(uint(rc.Len)) +} + +// TryReadBits attempts to read nb bits using the contents of the bit buffer +// alone. It returns the value and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadBits(nb uint) (uint, bool) { + if pr.numBits < nb { + return 0, false + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val, true +} + +// ReadBits reads nb bits in from the underlying reader. +func (pr *Reader) ReadBits(nb uint) uint { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + val := uint(pr.bufBits & uint64(1<>= nb + pr.numBits -= nb + return val +} + +// TryReadSymbol attempts to decode the next symbol using the contents of the +// bit buffer alone. It returns the decoded symbol and whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pr *Reader) TryReadSymbol(pd *Decoder) (uint, bool) { + if pr.numBits < uint(pd.MinBits) || len(pd.chunks) == 0 { + return 0, false + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb := uint(chunk & countMask) + if nb > pr.numBits || nb > uint(pd.chunkBits) { + return 0, false + } + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits), true +} + +// ReadSymbol reads the next symbol using the provided prefix Decoder. +func (pr *Reader) ReadSymbol(pd *Decoder) uint { + if len(pd.chunks) == 0 { + panicf(errors.Invalid, "decode with empty prefix tree") + } + + nb := uint(pd.MinBits) + for { + if err := pr.PullBits(nb); err != nil { + errors.Panic(err) + } + chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask] + nb = uint(chunk & countMask) + if nb > uint(pd.chunkBits) { + linkIdx := chunk >> countBits + chunk = pd.links[linkIdx][uint32(pr.bufBits>>pd.chunkBits)&pd.linkMask] + nb = uint(chunk & countMask) + } + if nb <= pr.numBits { + pr.bufBits >>= nb + pr.numBits -= nb + return uint(chunk >> countBits) + } + } +} + +// Flush updates the read offset of the underlying ByteReader. +// If reader is a compress.BufferedReader, then this calls Discard to update +// the read offset. +func (pr *Reader) Flush() (int64, error) { + if pr.bufRd == nil { + return pr.Offset, nil + } + + // Update the number of total bits to discard. + pr.discardBits += int(pr.fedBits - pr.numBits) + pr.fedBits = pr.numBits + + // Discard some bytes to update read offset. + var err error + nd := (pr.discardBits + 7) / 8 // Round up to nearest byte + nd, err = pr.bufRd.Discard(nd) + pr.discardBits -= nd * 8 // -7..0 + pr.Offset += int64(nd) + + // These are invalid after Discard. + pr.bufPeek = nil + return pr.Offset, err +} + +// PullBits ensures that at least nb bits exist in the bit buffer. +// If the underlying reader is a compress.BufferedReader, then this will fill +// the bit buffer with as many bits as possible, relying on Peek and Discard to +// properly advance the read offset. Otherwise, it will use ReadByte to fill the +// buffer with just the right number of bits. +func (pr *Reader) PullBits(nb uint) error { + if pr.bufRd != nil { + pr.discardBits += int(pr.fedBits - pr.numBits) + for { + if len(pr.bufPeek) == 0 { + pr.fedBits = pr.numBits // Don't discard bits just added + if _, err := pr.Flush(); err != nil { + return err + } + + // Peek no more bytes than necessary. + // The computation for cntPeek computes the minimum number of + // bytes to Peek to fill nb bits. + var err error + cntPeek := int(nb+(-nb&7)) / 8 + if cntPeek < pr.bufRd.Buffered() { + cntPeek = pr.bufRd.Buffered() + } + pr.bufPeek, err = pr.bufRd.Peek(cntPeek) + pr.bufPeek = pr.bufPeek[int(pr.numBits/8):] // Skip buffered bits + if len(pr.bufPeek) == 0 { + if pr.numBits >= nb { + break + } + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + + n := int(64-pr.numBits) / 8 // Number of bytes to copy to bit buffer + if len(pr.bufPeek) >= 8 { + // Starting with Go 1.7, the compiler should use a wide integer + // load here if the architecture supports it. + u := binary.LittleEndian.Uint64(pr.bufPeek) + if pr.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + + pr.bufBits |= u << pr.numBits + pr.numBits += uint(n * 8) + pr.bufPeek = pr.bufPeek[n:] + break + } else { + if n > len(pr.bufPeek) { + n = len(pr.bufPeek) + } + for _, c := range pr.bufPeek[:n] { + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + } + pr.bufPeek = pr.bufPeek[n:] + if pr.numBits > 56 { + break + } + } + } + pr.fedBits = pr.numBits + } else { + for pr.numBits < nb { + c, err := pr.byteRd.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if pr.bigEndian { + c = internal.ReverseLUT[c] + } + pr.bufBits |= uint64(c) << pr.numBits + pr.numBits += 8 + pr.Offset++ + } + } + return nil +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/wrap.go b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go new file mode 100644 index 00000000..49906d4a --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/wrap.go @@ -0,0 +1,146 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "bytes" + "io" + "strings" +) + +// For some of the common Readers, we wrap and extend them to satisfy the +// compress.BufferedReader interface to improve performance. + +type buffer struct { + *bytes.Buffer +} + +type bytesReader struct { + *bytes.Reader + pos int64 + buf []byte + arr [512]byte +} + +type stringReader struct { + *strings.Reader + pos int64 + buf []byte + arr [512]byte +} + +func (r *buffer) Buffered() int { + return r.Len() +} + +func (r *buffer) Peek(n int) ([]byte, error) { + b := r.Bytes() + if len(b) < n { + return b, io.EOF + } + return b[:n], nil +} + +func (r *buffer) Discard(n int) (int, error) { + b := r.Next(n) + if len(b) < n { + return len(b), io.EOF + } + return n, nil +} + +func (r *bytesReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *bytesReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *bytesReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *bytesReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} + +func (r *stringReader) Buffered() int { + r.update() + if r.Len() > len(r.buf) { + return len(r.buf) + } + return r.Len() +} + +func (r *stringReader) Peek(n int) ([]byte, error) { + if n > len(r.arr) { + return nil, io.ErrShortBuffer + } + + // Return sub-slice of local buffer if possible. + r.update() + if len(r.buf) >= n { + return r.buf[:n], nil + } + + // Fill entire local buffer, and return appropriate sub-slice. + cnt, err := r.ReadAt(r.arr[:], r.pos) + r.buf = r.arr[:cnt] + if cnt < n { + return r.arr[:cnt], err + } + return r.arr[:n], nil +} + +func (r *stringReader) Discard(n int) (int, error) { + var err error + if n > r.Len() { + n, err = r.Len(), io.EOF + } + r.Seek(int64(n), io.SeekCurrent) + return n, err +} + +// update reslices the internal buffer to be consistent with the read offset. +func (r *stringReader) update() { + pos, _ := r.Seek(0, io.SeekCurrent) + if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) { + r.buf, r.pos = r.buf[off:], pos + } else { + r.buf, r.pos = nil, pos + } +} diff --git a/vendor/github.com/dsnet/compress/internal/prefix/writer.go b/vendor/github.com/dsnet/compress/internal/prefix/writer.go new file mode 100644 index 00000000..c9783905 --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/prefix/writer.go @@ -0,0 +1,166 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package prefix + +import ( + "encoding/binary" + "io" + + "github.com/dsnet/compress/internal/errors" +) + +// Writer implements a prefix encoder. For performance reasons, Writer will not +// write bytes immediately to the underlying stream. +type Writer struct { + Offset int64 // Number of bytes written to the underlying io.Writer + + wr io.Writer + bufBits uint64 // Buffer to hold some bits + numBits uint // Number of valid bits in bufBits + bigEndian bool // Are bits written in big-endian order? + + buf [512]byte + cntBuf int +} + +// Init initializes the bit Writer to write to w. If bigEndian is true, then +// bits will be written starting from the most-significant bits of a byte +// (as done in bzip2), otherwise it will write starting from the +// least-significant bits of a byte (such as for deflate and brotli). +func (pw *Writer) Init(w io.Writer, bigEndian bool) { + *pw = Writer{wr: w, bigEndian: bigEndian} + return +} + +// BitsWritten reports the total number of bits issued to any Write method. +func (pw *Writer) BitsWritten() int64 { + return 8*pw.Offset + 8*int64(pw.cntBuf) + int64(pw.numBits) +} + +// WritePads writes 0-7 bits to the bit buffer to achieve byte-alignment. +func (pw *Writer) WritePads(v uint) { + nb := -pw.numBits & 7 + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// Write writes bytes from buf. +// The bit-ordering mode does not affect this method. +func (pw *Writer) Write(buf []byte) (cnt int, err error) { + if pw.numBits > 0 || pw.cntBuf > 0 { + if pw.numBits%8 != 0 { + return 0, errorf(errors.Invalid, "non-aligned bit buffer") + } + if _, err := pw.Flush(); err != nil { + return 0, err + } + } + cnt, err = pw.wr.Write(buf) + pw.Offset += int64(cnt) + return cnt, err +} + +// WriteOffset writes ofs in a (sym, extra) fashion using the provided prefix +// Encoder and RangeEncoder. +func (pw *Writer) WriteOffset(ofs uint, pe *Encoder, re *RangeEncoder) { + sym := re.Encode(ofs) + pw.WriteSymbol(sym, pe) + rc := re.rcs[sym] + pw.WriteBits(ofs-uint(rc.Base), uint(rc.Len)) +} + +// TryWriteBits attempts to write nb bits using the contents of the bit buffer +// alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteBits(v, nb uint) bool { + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb + return true +} + +// WriteBits writes nb bits of v to the underlying writer. +func (pw *Writer) WriteBits(v, nb uint) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + pw.bufBits |= uint64(v) << pw.numBits + pw.numBits += nb +} + +// TryWriteSymbol attempts to encode the next symbol using the contents of the +// bit buffer alone. It reports whether it succeeded. +// +// This method is designed to be inlined for performance reasons. +func (pw *Writer) TryWriteSymbol(sym uint, pe *Encoder) bool { + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + if 64-pw.numBits < nb { + return false + } + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb + return true +} + +// WriteSymbol writes the symbol using the provided prefix Encoder. +func (pw *Writer) WriteSymbol(sym uint, pe *Encoder) { + if _, err := pw.PushBits(); err != nil { + errors.Panic(err) + } + chunk := pe.chunks[uint32(sym)&pe.chunkMask] + nb := uint(chunk & countMask) + pw.bufBits |= uint64(chunk>>countBits) << pw.numBits + pw.numBits += nb +} + +// Flush flushes all complete bytes from the bit buffer to the byte buffer, and +// then flushes all bytes in the byte buffer to the underlying writer. +// After this call, the bit Writer is will only withhold 7 bits at most. +func (pw *Writer) Flush() (int64, error) { + if pw.numBits < 8 && pw.cntBuf == 0 { + return pw.Offset, nil + } + if _, err := pw.PushBits(); err != nil { + return pw.Offset, err + } + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + return pw.Offset, err +} + +// PushBits pushes as many bytes as possible from the bit buffer to the byte +// buffer, reporting the number of bits pushed. +func (pw *Writer) PushBits() (uint, error) { + if pw.cntBuf >= len(pw.buf)-8 { + cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf]) + pw.cntBuf -= cnt + pw.Offset += int64(cnt) + if err != nil { + return 0, err + } + } + + u := pw.bufBits + if pw.bigEndian { + // Swap all the bits within each byte. + u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1 + u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2 + u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4 + } + // Starting with Go 1.7, the compiler should use a wide integer + // store here if the architecture supports it. + binary.LittleEndian.PutUint64(pw.buf[pw.cntBuf:], u) + + nb := pw.numBits / 8 // Number of bytes to copy from bit buffer + pw.cntBuf += int(nb) + pw.bufBits >>= 8 * nb + pw.numBits -= 8 * nb + return 8 * nb, nil +} diff --git a/vendor/github.com/dsnet/compress/internal/release.go b/vendor/github.com/dsnet/compress/internal/release.go new file mode 100644 index 00000000..0990be1c --- /dev/null +++ b/vendor/github.com/dsnet/compress/internal/release.go @@ -0,0 +1,21 @@ +// Copyright 2015, Joe Tsai. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !debug,!gofuzz + +package internal + +// Debug indicates whether the debug build tag was set. +// +// If set, programs may choose to print with more human-readable +// debug information and also perform sanity checks that would otherwise be too +// expensive to run in a release build. +const Debug = false + +// GoFuzz indicates whether the gofuzz build tag was set. +// +// If set, programs may choose to disable certain checks (like checksums) that +// would be nearly impossible for gofuzz to properly get right. +// If GoFuzz is set, it implies that Debug is set as well. +const GoFuzz = false diff --git a/vendor/github.com/dsnet/compress/zbench.sh b/vendor/github.com/dsnet/compress/zbench.sh new file mode 100644 index 00000000..0205920d --- /dev/null +++ b/vendor/github.com/dsnet/compress/zbench.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zbench wraps internal/tool/bench and is useful for comparing benchmarks from +# the implementations in this repository relative to other implementations. +# +# See internal/tool/bench/main.go for more details. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/bench +go run $(go list -f '{{ join .GoFiles "\n" }}') "$@" diff --git a/vendor/github.com/dsnet/compress/zfuzz.sh b/vendor/github.com/dsnet/compress/zfuzz.sh new file mode 100644 index 00000000..42958ed4 --- /dev/null +++ b/vendor/github.com/dsnet/compress/zfuzz.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +# zfuzz wraps internal/tool/fuzz and is useful for fuzz testing each of +# the implementations in this repository. +cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/fuzz +./fuzz.sh "$@" diff --git a/vendor/github.com/dsnet/compress/zprof.sh b/vendor/github.com/dsnet/compress/zprof.sh new file mode 100644 index 00000000..3cd535be --- /dev/null +++ b/vendor/github.com/dsnet/compress/zprof.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +if [ $# == 0 ]; then + echo "Usage: $0 PKG_PATH TEST_ARGS..." + echo "" + echo "Runs coverage and performance benchmarks for a given package." + echo "The results are stored in the _zprof_ directory." + echo "" + echo "Example:" + echo " $0 flate -test.bench=Decode/Twain/Default" + exit 1 +fi + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PKG_PATH=$1 +PKG_NAME=$(basename $PKG_PATH) +shift + +TMPDIR=$(mktemp -d) +trap "rm -rf $TMPDIR $PKG_PATH/$PKG_NAME.test" SIGINT SIGTERM EXIT + +( + cd $DIR/$PKG_PATH + + # Print the go version. + go version + + # Perform coverage profiling. + go test github.com/dsnet/compress/$PKG_PATH -coverprofile $TMPDIR/cover.profile + if [ $? != 0 ]; then exit 1; fi + go tool cover -html $TMPDIR/cover.profile -o cover.html + + # Perform performance profiling. + if [ $# != 0 ]; then + go test -c github.com/dsnet/compress/$PKG_PATH + if [ $? != 0 ]; then exit 1; fi + ./$PKG_NAME.test -test.cpuprofile $TMPDIR/cpu.profile -test.memprofile $TMPDIR/mem.profile -test.run - "$@" + PPROF="go tool pprof" + $PPROF -output=cpu.svg -web $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=cpu.html -weblist=. $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null + $PPROF -output=mem_objects.svg -alloc_objects -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_objects.html -alloc_objects -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.svg -alloc_space -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + $PPROF -output=mem_space.html -alloc_space -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null + fi + + rm -rf $DIR/_zprof_/$PKG_NAME + mkdir -p $DIR/_zprof_/$PKG_NAME + mv *.html *.svg $DIR/_zprof_/$PKG_NAME 2> /dev/null +) diff --git a/vendor/github.com/dsnet/compress/ztest.sh b/vendor/github.com/dsnet/compress/ztest.sh new file mode 100644 index 00000000..15c4c00b --- /dev/null +++ b/vendor/github.com/dsnet/compress/ztest.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2017, Joe Tsai. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE.md file. + +cd $(go list -f '{{ .Dir }}' github.com/dsnet/compress) + +BOLD="\x1b[1mRunning: " +PASS="\x1b[32mPASS" +FAIL="\x1b[31mFAIL" +RESET="\x1b[0m" + +echo -e "${BOLD}fmt${RESET}" +RET_FMT=$(find . -name "*.go" | egrep -v "/(_.*_|\..*|testdata)/" | xargs gofmt -d) +if [[ ! -z "$RET_FMT" ]]; then echo "$RET_FMT"; echo; fi + +echo -e "${BOLD}test${RESET}" +RET_TEST=$(go test -race ./... | egrep -v "^(ok|[?])\s+") +if [[ ! -z "$RET_TEST" ]]; then echo "$RET_TEST"; echo; fi + +echo -e "${BOLD}staticcheck${RESET}" +RET_SCHK=$(staticcheck \ + -ignore " + github.com/dsnet/compress/brotli/*.go:SA4016 + github.com/dsnet/compress/brotli/*.go:S1023 + github.com/dsnet/compress/brotli/*.go:U1000 + github.com/dsnet/compress/bzip2/*.go:S1023 + github.com/dsnet/compress/flate/*.go:U1000 + github.com/dsnet/compress/internal/cgo/lzma/*.go:SA4000 + github.com/dsnet/compress/internal/prefix/*.go:S1004 + github.com/dsnet/compress/internal/prefix/*.go:S1023 + github.com/dsnet/compress/internal/prefix/*.go:SA4016 + github.com/dsnet/compress/internal/tool/bench/*.go:S1007 + github.com/dsnet/compress/xflate/internal/meta/*.go:S1023 + " ./... 2>&1) +if [[ ! -z "$RET_SCHK" ]]; then echo "$RET_SCHK"; echo; fi + +echo -e "${BOLD}lint${RESET}" +RET_LINT=$(golint ./... 2>&1 | + egrep -v "^vendor/" | + egrep -v "should have comment(.*)or be unexported" | + egrep -v "^(.*)type name will be used as(.*)by other packages" | + egrep -v "^brotli/transform.go:(.*)replace i [+]= 1 with i[+]{2}" | + egrep -v "^internal/prefix/prefix.go:(.*)replace symBits(.*) [-]= 1 with symBits(.*)[-]{2}" | + egrep -v "^xflate/common.go:(.*)NoCompression should be of the form" | + egrep -v "^exit status") +if [[ ! -z "$RET_LINT" ]]; then echo "$RET_LINT"; echo; fi + +if [[ ! -z "$RET_FMT" ]] || [ ! -z "$RET_TEST" ] || [[ ! -z "$RET_SCHK" ]] || [[ ! -z "$RET_LINT" ]]; then + echo -e "${FAIL}${RESET}"; exit 1 +else + echo -e "${PASS}${RESET}"; exit 0 +fi diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/doc.go new file mode 100644 index 00000000..0fd83905 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package authentication is the internal version of the API. +// +groupName=authentication.gardener.cloud +package authentication diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/register.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/register.go new file mode 100644 index 00000000..35263a6e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the authentication API group. +const GroupName = "authentication.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdminKubeconfigRequest{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/types_adminkubeconfigrequest.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/types_adminkubeconfigrequest.go new file mode 100644 index 00000000..3ffa508a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/types_adminkubeconfigrequest.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdminKubeconfigRequest can be used to request a kubeconfig with admin credentials +// for a Shoot cluster. +type AdminKubeconfigRequest struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec is the specification of the AdminKubeconfigRequest. + Spec AdminKubeconfigRequestSpec + // Status is the status of the AdminKubeconfigRequest. + Status AdminKubeconfigRequestStatus +} + +// AdminKubeconfigRequestStatus is the status of the AdminKubeconfigRequest containing +// the kubeconfig and expiration of the credential. +type AdminKubeconfigRequestStatus struct { + // Kubeconfig contains the kubeconfig with cluster-admin privileges for the shoot cluster. + Kubeconfig []byte + // ExpirationTimestamp is the expiration timestamp of the returned credential. + ExpirationTimestamp metav1.Time +} + +// AdminKubeconfigRequestSpec contains the expiration time of the kubeconfig. +type AdminKubeconfigRequestSpec struct { + // ExpirationSeconds is the requested validity duration of the credential. The + // credential issuer may return a credential with a different validity duration so a + // client needs to check the 'expirationTimestamp' field in a response. + // Defaults to 1 hour. + ExpirationSeconds int64 +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/defaults.go new file mode 100644 index 00000000..71a21346 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/defaults.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_AdminKubeconfigRequestSpec sets default values for AdminKubeconfigRequestSpec objects. +func SetDefaults_AdminKubeconfigRequestSpec(obj *AdminKubeconfigRequestSpec) { + if obj.ExpirationSeconds == nil { + hour := int64(60 * 60) + obj.ExpirationSeconds = &hour + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/doc.go new file mode 100644 index 00000000..fdcd2869 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/authentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +// +k8s:protobuf-gen=package + +//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/authentication-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../docs/api-reference/authentication.md + +// Package v1alpha1 is a version of the API. +// +groupName=authentication.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.pb.go new file mode 100644 index 00000000..4b9e2908 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.pb.go @@ -0,0 +1,813 @@ +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdminKubeconfigRequest) Reset() { *m = AdminKubeconfigRequest{} } +func (*AdminKubeconfigRequest) ProtoMessage() {} +func (*AdminKubeconfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4ad0cb10cdbf25b8, []int{0} +} +func (m *AdminKubeconfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdminKubeconfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdminKubeconfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdminKubeconfigRequest.Merge(m, src) +} +func (m *AdminKubeconfigRequest) XXX_Size() int { + return m.Size() +} +func (m *AdminKubeconfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdminKubeconfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdminKubeconfigRequest proto.InternalMessageInfo + +func (m *AdminKubeconfigRequestSpec) Reset() { *m = AdminKubeconfigRequestSpec{} } +func (*AdminKubeconfigRequestSpec) ProtoMessage() {} +func (*AdminKubeconfigRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_4ad0cb10cdbf25b8, []int{1} +} +func (m *AdminKubeconfigRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdminKubeconfigRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdminKubeconfigRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdminKubeconfigRequestSpec.Merge(m, src) +} +func (m *AdminKubeconfigRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *AdminKubeconfigRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AdminKubeconfigRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_AdminKubeconfigRequestSpec proto.InternalMessageInfo + +func (m *AdminKubeconfigRequestStatus) Reset() { *m = AdminKubeconfigRequestStatus{} } +func (*AdminKubeconfigRequestStatus) ProtoMessage() {} +func (*AdminKubeconfigRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_4ad0cb10cdbf25b8, []int{2} +} +func (m *AdminKubeconfigRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdminKubeconfigRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdminKubeconfigRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdminKubeconfigRequestStatus.Merge(m, src) +} +func (m *AdminKubeconfigRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *AdminKubeconfigRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AdminKubeconfigRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AdminKubeconfigRequestStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdminKubeconfigRequest)(nil), "github.com.gardener.gardener.pkg.apis.authentication.v1alpha1.AdminKubeconfigRequest") + proto.RegisterType((*AdminKubeconfigRequestSpec)(nil), "github.com.gardener.gardener.pkg.apis.authentication.v1alpha1.AdminKubeconfigRequestSpec") + proto.RegisterType((*AdminKubeconfigRequestStatus)(nil), "github.com.gardener.gardener.pkg.apis.authentication.v1alpha1.AdminKubeconfigRequestStatus") +} + +func init() { + proto.RegisterFile("github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto", fileDescriptor_4ad0cb10cdbf25b8) +} + +var fileDescriptor_4ad0cb10cdbf25b8 = []byte{ + // 469 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x41, 0x6b, 0xd4, 0x40, + 0x14, 0xc7, 0x33, 0x6d, 0x29, 0x32, 0x96, 0x42, 0x53, 0x94, 0x65, 0x95, 0x59, 0xd9, 0x93, 0x08, + 0x4e, 0x5c, 0x11, 0xf1, 0xd2, 0x83, 0x91, 0x9e, 0xa4, 0x08, 0xa9, 0x17, 0xf5, 0xe2, 0xdb, 0xc9, + 0x6b, 0x32, 0xae, 0x49, 0xc6, 0xcc, 0x64, 0xb1, 0x78, 0x11, 0xfc, 0x02, 0x7e, 0xac, 0xf5, 0xd6, + 0x63, 0x4f, 0x8b, 0x1b, 0x3f, 0x87, 0x20, 0x33, 0x9b, 0x36, 0x6b, 0xb7, 0x55, 0x61, 0xf1, 0xf6, + 0x5e, 0xe6, 0xfd, 0xff, 0xbf, 0x7f, 0xe6, 0x85, 0xd0, 0x83, 0x44, 0x9a, 0xb4, 0x1a, 0x72, 0x51, + 0x64, 0x41, 0x02, 0x65, 0x8c, 0x39, 0x96, 0x6d, 0xa1, 0x46, 0x49, 0x00, 0x4a, 0xea, 0x00, 0x2a, + 0x93, 0x62, 0x6e, 0xa4, 0x00, 0x23, 0x8b, 0x3c, 0x18, 0x0f, 0xe0, 0xbd, 0x4a, 0x61, 0x10, 0x24, + 0x76, 0x0c, 0x0c, 0xc6, 0x5c, 0x95, 0x85, 0x29, 0xfc, 0xbd, 0xd6, 0x8e, 0x9f, 0xb9, 0xb4, 0x85, + 0x1a, 0x25, 0xdc, 0xda, 0xf1, 0xdf, 0xed, 0xf8, 0x99, 0x5d, 0xf7, 0xfe, 0x62, 0x9a, 0x22, 0x29, + 0x02, 0xe7, 0x3a, 0xac, 0x8e, 0x5c, 0xe7, 0x1a, 0x57, 0xcd, 0x69, 0xdd, 0x47, 0xa3, 0x27, 0x9a, + 0xcb, 0xc2, 0x46, 0xcc, 0x40, 0xa4, 0x32, 0xc7, 0xf2, 0xb8, 0xcd, 0x9c, 0xa1, 0x81, 0x60, 0xbc, + 0x94, 0xb1, 0x1b, 0x5c, 0xa5, 0x2a, 0xab, 0xdc, 0xc8, 0x0c, 0x97, 0x04, 0x8f, 0xff, 0x26, 0xd0, + 0x22, 0xc5, 0x0c, 0x2e, 0xea, 0xfa, 0x3f, 0xd7, 0xe8, 0xcd, 0xa7, 0x71, 0x26, 0xf3, 0xe7, 0xd5, + 0x10, 0x45, 0x91, 0x1f, 0xc9, 0x24, 0xc2, 0x0f, 0x15, 0x6a, 0xe3, 0xbf, 0xa5, 0xd7, 0x6c, 0xbc, + 0x18, 0x0c, 0x74, 0xc8, 0x1d, 0x72, 0xf7, 0xfa, 0xc3, 0x07, 0x7c, 0x4e, 0xe1, 0x8b, 0x94, 0xf6, + 0xc6, 0xec, 0x34, 0x1f, 0x0f, 0xf8, 0x8b, 0xe1, 0x3b, 0x14, 0xe6, 0x00, 0x0d, 0x84, 0xfe, 0x64, + 0xda, 0xf3, 0xea, 0x69, 0x8f, 0xb6, 0xcf, 0xa2, 0x73, 0x57, 0xff, 0x13, 0xdd, 0xd0, 0x0a, 0x45, + 0x67, 0xcd, 0xb9, 0xbf, 0xe2, 0x2b, 0x2d, 0x86, 0x5f, 0xfe, 0x1a, 0x87, 0x0a, 0x45, 0xb8, 0xd5, + 0xc4, 0xd8, 0xb0, 0x5d, 0xe4, 0xa0, 0xfe, 0x17, 0x42, 0x37, 0xb5, 0x01, 0x53, 0xe9, 0xce, 0xba, + 0xe3, 0xbf, 0xf9, 0x3f, 0x7c, 0x87, 0x08, 0xb7, 0x9b, 0x04, 0x9b, 0xf3, 0x3e, 0x6a, 0xd0, 0x7d, + 0xa0, 0xdd, 0xab, 0x73, 0xfb, 0xcf, 0xe8, 0x0e, 0x7e, 0x54, 0xb2, 0x74, 0xa4, 0x43, 0x3b, 0x10, + 0x6b, 0xb7, 0x8b, 0xf5, 0xf0, 0x46, 0x3d, 0xed, 0xed, 0xec, 0x5f, 0x3c, 0x8c, 0x96, 0xe7, 0xfb, + 0xdf, 0x08, 0xbd, 0xfd, 0xa7, 0x6c, 0x3e, 0xa7, 0x74, 0x74, 0x7e, 0xe4, 0xec, 0xb7, 0xc2, 0x6d, + 0xbb, 0xb4, 0x05, 0xc1, 0xc2, 0x84, 0x7f, 0x4c, 0x77, 0x5b, 0xca, 0x4b, 0x99, 0xa1, 0x36, 0x90, + 0xa9, 0x66, 0x8b, 0xf7, 0xfe, 0xed, 0x1b, 0xb1, 0xb2, 0xf0, 0x56, 0x73, 0x29, 0xbb, 0xfb, 0xcb, + 0x76, 0xd1, 0x65, 0x8c, 0x50, 0x4c, 0x66, 0xcc, 0x3b, 0x99, 0x31, 0xef, 0x74, 0xc6, 0xbc, 0xcf, + 0x35, 0x23, 0x93, 0x9a, 0x91, 0x93, 0x9a, 0x91, 0xd3, 0x9a, 0x91, 0xef, 0x35, 0x23, 0x5f, 0x7f, + 0x30, 0xef, 0xf5, 0xde, 0x4a, 0x3f, 0x8c, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xfa, 0xde, + 0x47, 0x70, 0x04, 0x00, 0x00, +} + +func (m *AdminKubeconfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdminKubeconfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdminKubeconfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdminKubeconfigRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdminKubeconfigRequestSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdminKubeconfigRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AdminKubeconfigRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdminKubeconfigRequestStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdminKubeconfigRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Kubeconfig != nil { + i -= len(m.Kubeconfig) + copy(dAtA[i:], m.Kubeconfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kubeconfig))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdminKubeconfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdminKubeconfigRequestSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + +func (m *AdminKubeconfigRequestStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kubeconfig != nil { + l = len(m.Kubeconfig) + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ExpirationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdminKubeconfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdminKubeconfigRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AdminKubeconfigRequestSpec", "AdminKubeconfigRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "AdminKubeconfigRequestStatus", "AdminKubeconfigRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AdminKubeconfigRequestSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdminKubeconfigRequestSpec{`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *AdminKubeconfigRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdminKubeconfigRequestStatus{`, + `Kubeconfig:` + valueToStringGenerated(this.Kubeconfig) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdminKubeconfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdminKubeconfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdminKubeconfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdminKubeconfigRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdminKubeconfigRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdminKubeconfigRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdminKubeconfigRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdminKubeconfigRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdminKubeconfigRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubeconfig", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kubeconfig = append(m.Kubeconfig[:0], dAtA[iNdEx:postIndex]...) + if m.Kubeconfig == nil { + m.Kubeconfig = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExpirationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto new file mode 100644 index 00000000..be4e0c73 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/generated.proto @@ -0,0 +1,62 @@ +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.gardener.gardener.pkg.apis.authentication.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/gardener/gardener/pkg/apis/authentication/v1alpha1"; + +// AdminKubeconfigRequest can be used to request a kubeconfig with admin credentials +// for a Shoot cluster. +message AdminKubeconfigRequest { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the specification of the AdminKubeconfigRequest. + optional AdminKubeconfigRequestSpec spec = 2; + + // Status is the status of the AdminKubeconfigRequest. + optional AdminKubeconfigRequestStatus status = 3; +} + +// AdminKubeconfigRequestSpec contains the expiration time of the kubeconfig. +message AdminKubeconfigRequestSpec { + // ExpirationSeconds is the requested validity duration of the credential. The + // credential issuer may return a credential with a different validity duration so a + // client needs to check the 'expirationTimestamp' field in a response. + // Defaults to 1 hour. + // +optional + optional int64 expirationSeconds = 1; +} + +// AdminKubeconfigRequestStatus is the status of the AdminKubeconfigRequest containing +// the kubeconfig and expiration of the credential. +message AdminKubeconfigRequestStatus { + // Kubeconfig contains the kubeconfig with cluster-admin privileges for the shoot cluster. + optional bytes kubeconfig = 1; + + // ExpirationTimestamp is the expiration timestamp of the returned credential. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; +} + diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/register.go new file mode 100644 index 00000000..3d51bc13 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the authentication API group. +const GroupName = "authentication.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdminKubeconfigRequest{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/types_adminkubeconfigrequest.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/types_adminkubeconfigrequest.go new file mode 100644 index 00000000..5df13609 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/types_adminkubeconfigrequest.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdminKubeconfigRequest can be used to request a kubeconfig with admin credentials +// for a Shoot cluster. +type AdminKubeconfigRequest struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec is the specification of the AdminKubeconfigRequest. + Spec AdminKubeconfigRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status is the status of the AdminKubeconfigRequest. + Status AdminKubeconfigRequestStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// AdminKubeconfigRequestStatus is the status of the AdminKubeconfigRequest containing +// the kubeconfig and expiration of the credential. +type AdminKubeconfigRequestStatus struct { + // Kubeconfig contains the kubeconfig with cluster-admin privileges for the shoot cluster. + Kubeconfig []byte `json:"kubeconfig" protobuf:"bytes,1,name=kubeconfig"` + // ExpirationTimestamp is the expiration timestamp of the returned credential. + ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,name=expirationTimestamp"` +} + +// AdminKubeconfigRequestSpec contains the expiration time of the kubeconfig. +type AdminKubeconfigRequestSpec struct { + // ExpirationSeconds is the requested validity duration of the credential. The + // credential issuer may return a credential with a different validity duration so a + // client needs to check the 'expirationTimestamp' field in a response. + // Defaults to 1 hour. + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,1,opt,name=expirationSeconds"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000..0b4cbd3e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,149 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + authentication "github.com/gardener/gardener/pkg/apis/authentication" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AdminKubeconfigRequest)(nil), (*authentication.AdminKubeconfigRequest)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdminKubeconfigRequest_To_authentication_AdminKubeconfigRequest(a.(*AdminKubeconfigRequest), b.(*authentication.AdminKubeconfigRequest), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*authentication.AdminKubeconfigRequest)(nil), (*AdminKubeconfigRequest)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_authentication_AdminKubeconfigRequest_To_v1alpha1_AdminKubeconfigRequest(a.(*authentication.AdminKubeconfigRequest), b.(*AdminKubeconfigRequest), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdminKubeconfigRequestSpec)(nil), (*authentication.AdminKubeconfigRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec(a.(*AdminKubeconfigRequestSpec), b.(*authentication.AdminKubeconfigRequestSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*authentication.AdminKubeconfigRequestSpec)(nil), (*AdminKubeconfigRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec(a.(*authentication.AdminKubeconfigRequestSpec), b.(*AdminKubeconfigRequestSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdminKubeconfigRequestStatus)(nil), (*authentication.AdminKubeconfigRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus(a.(*AdminKubeconfigRequestStatus), b.(*authentication.AdminKubeconfigRequestStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*authentication.AdminKubeconfigRequestStatus)(nil), (*AdminKubeconfigRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus(a.(*authentication.AdminKubeconfigRequestStatus), b.(*AdminKubeconfigRequestStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_AdminKubeconfigRequest_To_authentication_AdminKubeconfigRequest(in *AdminKubeconfigRequest, out *authentication.AdminKubeconfigRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_AdminKubeconfigRequest_To_authentication_AdminKubeconfigRequest is an autogenerated conversion function. +func Convert_v1alpha1_AdminKubeconfigRequest_To_authentication_AdminKubeconfigRequest(in *AdminKubeconfigRequest, out *authentication.AdminKubeconfigRequest, s conversion.Scope) error { + return autoConvert_v1alpha1_AdminKubeconfigRequest_To_authentication_AdminKubeconfigRequest(in, out, s) +} + +func autoConvert_authentication_AdminKubeconfigRequest_To_v1alpha1_AdminKubeconfigRequest(in *authentication.AdminKubeconfigRequest, out *AdminKubeconfigRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_authentication_AdminKubeconfigRequest_To_v1alpha1_AdminKubeconfigRequest is an autogenerated conversion function. +func Convert_authentication_AdminKubeconfigRequest_To_v1alpha1_AdminKubeconfigRequest(in *authentication.AdminKubeconfigRequest, out *AdminKubeconfigRequest, s conversion.Scope) error { + return autoConvert_authentication_AdminKubeconfigRequest_To_v1alpha1_AdminKubeconfigRequest(in, out, s) +} + +func autoConvert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec(in *AdminKubeconfigRequestSpec, out *authentication.AdminKubeconfigRequestSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec is an autogenerated conversion function. +func Convert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec(in *AdminKubeconfigRequestSpec, out *authentication.AdminKubeconfigRequestSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_AdminKubeconfigRequestSpec_To_authentication_AdminKubeconfigRequestSpec(in, out, s) +} + +func autoConvert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec(in *authentication.AdminKubeconfigRequestSpec, out *AdminKubeconfigRequestSpec, s conversion.Scope) error { + if err := v1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + return nil +} + +// Convert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec is an autogenerated conversion function. +func Convert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec(in *authentication.AdminKubeconfigRequestSpec, out *AdminKubeconfigRequestSpec, s conversion.Scope) error { + return autoConvert_authentication_AdminKubeconfigRequestSpec_To_v1alpha1_AdminKubeconfigRequestSpec(in, out, s) +} + +func autoConvert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus(in *AdminKubeconfigRequestStatus, out *authentication.AdminKubeconfigRequestStatus, s conversion.Scope) error { + out.Kubeconfig = *(*[]byte)(unsafe.Pointer(&in.Kubeconfig)) + out.ExpirationTimestamp = in.ExpirationTimestamp + return nil +} + +// Convert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus is an autogenerated conversion function. +func Convert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus(in *AdminKubeconfigRequestStatus, out *authentication.AdminKubeconfigRequestStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_AdminKubeconfigRequestStatus_To_authentication_AdminKubeconfigRequestStatus(in, out, s) +} + +func autoConvert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus(in *authentication.AdminKubeconfigRequestStatus, out *AdminKubeconfigRequestStatus, s conversion.Scope) error { + out.Kubeconfig = *(*[]byte)(unsafe.Pointer(&in.Kubeconfig)) + out.ExpirationTimestamp = in.ExpirationTimestamp + return nil +} + +// Convert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus is an autogenerated conversion function. +func Convert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus(in *authentication.AdminKubeconfigRequestStatus, out *AdminKubeconfigRequestStatus, s conversion.Scope) error { + return autoConvert_authentication_AdminKubeconfigRequestStatus_To_v1alpha1_AdminKubeconfigRequestStatus(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..65cdee80 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,97 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequest) DeepCopyInto(out *AdminKubeconfigRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequest. +func (in *AdminKubeconfigRequest) DeepCopy() *AdminKubeconfigRequest { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdminKubeconfigRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequestSpec) DeepCopyInto(out *AdminKubeconfigRequestSpec) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequestSpec. +func (in *AdminKubeconfigRequestSpec) DeepCopy() *AdminKubeconfigRequestSpec { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequestStatus) DeepCopyInto(out *AdminKubeconfigRequestStatus) { + *out = *in + if in.Kubeconfig != nil { + in, out := &in.Kubeconfig, &out.Kubeconfig + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequestStatus. +func (in *AdminKubeconfigRequestStatus) DeepCopy() *AdminKubeconfigRequestStatus { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequestStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.defaults.go new file mode 100644 index 00000000..e90796cb --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,38 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AdminKubeconfigRequest{}, func(obj interface{}) { SetObjectDefaults_AdminKubeconfigRequest(obj.(*AdminKubeconfigRequest)) }) + return nil +} + +func SetObjectDefaults_AdminKubeconfigRequest(in *AdminKubeconfigRequest) { + SetDefaults_AdminKubeconfigRequestSpec(&in.Spec) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/authentication/zz_generated.deepcopy.go new file mode 100644 index 00000000..f4f5f2ff --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/authentication/zz_generated.deepcopy.go @@ -0,0 +1,92 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package authentication + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequest) DeepCopyInto(out *AdminKubeconfigRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequest. +func (in *AdminKubeconfigRequest) DeepCopy() *AdminKubeconfigRequest { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdminKubeconfigRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequestSpec) DeepCopyInto(out *AdminKubeconfigRequestSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequestSpec. +func (in *AdminKubeconfigRequestSpec) DeepCopy() *AdminKubeconfigRequestSpec { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminKubeconfigRequestStatus) DeepCopyInto(out *AdminKubeconfigRequestStatus) { + *out = *in + if in.Kubeconfig != nil { + in, out := &in.Kubeconfig, &out.Kubeconfig + *out = make([]byte, len(*in)) + copy(*out, *in) + } + in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminKubeconfigRequestStatus. +func (in *AdminKubeconfigRequestStatus) DeepCopy() *AdminKubeconfigRequestStatus { + if in == nil { + return nil + } + out := new(AdminKubeconfigRequestStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/test/framework/cleanup.go b/vendor/github.com/gardener/gardener/test/framework/cleanup.go new file mode 100644 index 00000000..091dc5c0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/cleanup.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file was copied from the kubernetes/kubernetes project +https://github.com/kubernetes/kubernetes/blob/v1.19.0-rc.4/test/e2e/framework/cleanup.go + +Modifications Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. +*/ + +package framework + +import ( + "sync" +) + +// CleanupActionHandle is an integer pointer type for handling cleanup action +type CleanupActionHandle *int +type cleanupFuncHandle struct { + actionHandle CleanupActionHandle + actionHook func() +} + +var cleanupActionsLock sync.Mutex +var cleanupHookList = []cleanupFuncHandle{} + +// AddCleanupAction installs a function that will be called in the event of the +// whole test being terminated. This allows arbitrary pieces of the overall +// test to hook into SynchronizedAfterSuite(). +// The hooks are called in last-in-first-out order. +func AddCleanupAction(fn func()) CleanupActionHandle { + p := CleanupActionHandle(new(int)) + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + c := cleanupFuncHandle{actionHandle: p, actionHook: fn} + cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...) + return p +} + +// RemoveCleanupAction removes a function that was installed by +// AddCleanupAction. +func RemoveCleanupAction(p CleanupActionHandle) { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + for i, item := range cleanupHookList { + if item.actionHandle == p { + cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...) + break + } + } +} + +// RunCleanupActions runs all functions installed by AddCleanupAction. It does +// not remove them (see RemoveCleanupAction) but it does run unlocked, so they +// may remove themselves. +func RunCleanupActions() { + list := []func(){} + func() { + cleanupActionsLock.Lock() + defer cleanupActionsLock.Unlock() + for _, p := range cleanupHookList { + list = append(list, p.actionHook) + } + }() + // Run unlocked. + for _, fn := range list { + fn() + } +} diff --git a/vendor/github.com/gardener/gardener/test/framework/common.go b/vendor/github.com/gardener/gardener/test/framework/common.go new file mode 100644 index 00000000..fb3d9cef --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/common.go @@ -0,0 +1,55 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import "time" + +const ( + k8sClientInitPollInterval = 20 * time.Second + k8sClientInitTimeout = 5 * time.Minute + defaultPollInterval = 5 * time.Second + + // KubeconfigSecretKeyName ist the name of the key in a secret that holds the kubeconfig of a shoot + KubeconfigSecretKeyName = "kubeconfig" + + // LoggingUserName is the admin user name for the vali instance of a shoot + valiLogging = "vali" + valiPort = 3100 + + // IntegrationTestPrefix is the default prefix that will be used for test shoots if none other is specified + IntegrationTestPrefix = "itest-" + + // WorkerNamePrefix is the default prefix that will be used for Shoot workers + WorkerNamePrefix = "worker-" + + // TestMachineryKubeconfigsPathEnvVarName is the name of the environment variable that holds the path to the + // testmachinery provided kubeconfigs. + TestMachineryKubeconfigsPathEnvVarName = "TM_KUBECONFIG_PATH" + + // TestMachineryTestRunIDEnvVarName is the name of the environment variable that holds the testrun ID. + TestMachineryTestRunIDEnvVarName = "TM_TESTRUN_ID" + + // SeedTaintTestRun is the taint used to limit shoots that can be scheduled on a seed to shoots created by the same testrun. + SeedTaintTestRun = "test.gardener.cloud/test-run" +) + +// SearchResponse represents the response from a search query to vali +type SearchResponse struct { + Data struct { + Result []struct { + Value []interface{} `json:"value"` + } `json:"result"` + } `json:"data"` +} diff --git a/vendor/github.com/gardener/gardener/test/framework/commonframework.go b/vendor/github.com/gardener/gardener/test/framework/commonframework.go new file mode 100644 index 00000000..c88dca58 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/commonframework.go @@ -0,0 +1,150 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "flag" + "os" + "path/filepath" + + "github.com/go-logr/logr" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/gardener/gardener/pkg/logger" +) + +var commonCfg *CommonConfig + +// CommonConfig is the configuration for a common framework +type CommonConfig struct { + LogLevel string + DisableStateDump bool + ResourceDir string + ChartDir string +} + +// CommonFramework represents the common gardener test framework that consolidates all +// shared features of the specific test frameworks (system, garderner, shoot) +type CommonFramework struct { + Config *CommonConfig + Logger logr.Logger + DisableStateDump bool + + // ResourcesDir is the absolute path to the resources directory + ResourcesDir string + + // TemplatesDir is the absolute path to the templates directory + TemplatesDir string + + // Chart is the absolute path to the helm chart directory + ChartDir string +} + +// NewCommonFramework creates a new common framework and registers its ginkgo BeforeEach setup +func NewCommonFramework(cfg *CommonConfig) *CommonFramework { + f := newCommonFrameworkFromConfig(cfg) + ginkgo.BeforeEach(f.BeforeEach) + return f +} + +// newCommonFrameworkFromConfig creates a new common framework and without registering its ginkgo BeforeEach setup +func newCommonFrameworkFromConfig(cfg *CommonConfig) *CommonFramework { + f := &CommonFramework{ + Config: cfg, + } + return f +} + +// BeforeEach should be called in ginkgo's BeforeEach. +// It sets up the common framework. +func (f *CommonFramework) BeforeEach() { + f.Config = mergeCommonConfigs(f.Config, commonCfg) + f.DisableStateDump = f.Config.DisableStateDump + + logf.SetLogger(logger.MustNewZapLogger(f.Config.LogLevel, logger.FormatJSON, zap.WriteTo(ginkgo.GinkgoWriter))) + f.Logger = logf.Log.WithName("test") + + if f.ResourcesDir == "" { + var err error + if f.Config.ResourceDir != "" { + f.ResourcesDir, err = filepath.Abs(f.Config.ResourceDir) + } else { + // This is the default location if the framework is running in one of the gardener/shoot suites. + // Otherwise the resource dir has to be adjusted + f.ResourcesDir, err = filepath.Abs(filepath.Join("..", "..", "..", "framework", "resources")) + } + ExpectNoError(err) + } + FileExists(f.ResourcesDir) + + f.TemplatesDir = filepath.Join(f.ResourcesDir, "templates") + + f.ChartDir = filepath.Join(f.ResourcesDir, "charts") + if f.Config.ChartDir != "" { + f.ChartDir = f.Config.ChartDir + } +} + +// CommonAfterSuite performs necessary common steps after all tests of a suite a run +func CommonAfterSuite() { + + // run all registered cleanup functions + RunCleanupActions() + + resourcesDir, err := filepath.Abs(filepath.Join("..", "..", "..", "framework", "resources")) + ExpectNoError(err) + err = os.RemoveAll(filepath.Join(resourcesDir, "charts")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = os.RemoveAll(filepath.Join(resourcesDir, "repository", "cache")) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func mergeCommonConfigs(base, overwrite *CommonConfig) *CommonConfig { + if base == nil { + return overwrite + } + if overwrite == nil { + return base + } + + if StringSet(overwrite.LogLevel) { + base.LogLevel = overwrite.LogLevel + } + if StringSet(overwrite.ResourceDir) { + base.ResourceDir = overwrite.ResourceDir + } + if StringSet(overwrite.ChartDir) { + base.ChartDir = overwrite.ChartDir + } + if overwrite.DisableStateDump { + base.DisableStateDump = overwrite.DisableStateDump + } + return base +} + +// RegisterCommonFrameworkFlags adds all flags that are needed to configure a common framework to the provided flagset. +func RegisterCommonFrameworkFlags() *CommonConfig { + newCfg := &CommonConfig{} + + flag.StringVar(&newCfg.LogLevel, "verbose", logger.InfoLevel, "verbosity level (defaults to info)") + flag.BoolVar(&newCfg.DisableStateDump, "disable-dump", false, "Disable the state dump if a test fails") + + commonCfg = newCfg + return commonCfg +} diff --git a/vendor/github.com/gardener/gardener/test/framework/dump.go b/vendor/github.com/gardener/gardener/test/framework/dump.go new file mode 100644 index 00000000..220a8ab4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/dump.go @@ -0,0 +1,416 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "bufio" + "bytes" + "context" + "fmt" + "sort" + + "github.com/go-logr/logr" + "github.com/hashicorp/go-multierror" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gardener/gardener/pkg/api/extensions" + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils/kubernetes/health" +) + +// DumpDefaultResourcesInAllNamespaces dumps all default k8s resources of a namespace +func (f *CommonFramework) DumpDefaultResourcesInAllNamespaces(ctx context.Context, k8sClient kubernetes.Interface) error { + namespaces := &corev1.NamespaceList{} + if err := k8sClient.Client().List(ctx, namespaces); err != nil { + return err + } + + var result error + + for _, ns := range namespaces.Items { + if err := f.DumpDefaultResourcesInNamespace(ctx, k8sClient, ns.Name); err != nil { + result = multierror.Append(result, err) + } + } + return result +} + +// DumpDefaultResourcesInNamespace dumps all default K8s resources of a namespace. +func (f *CommonFramework) DumpDefaultResourcesInNamespace(ctx context.Context, k8sClient kubernetes.Interface, namespace string) error { + log := f.Logger.WithValues("namespace", namespace) + + var result error + if err := f.dumpEventsInNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch Events from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpPodInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of Pods from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpDeploymentInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of Deployments from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpStatefulSetInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of StatefulSets from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpDaemonSetInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of DaemonSets from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpServiceInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of Services from namespace %s: %s", namespace, err.Error())) + } + if err := f.dumpVolumeInfoForNamespace(ctx, log, k8sClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of Volumes from namespace %s: %s", namespace, err.Error())) + } + return result +} + +func (f *GardenerFramework) dumpControlplaneInSeed(ctx context.Context, seed *gardencorev1beta1.Seed, namespace string) error { + log := f.Logger.WithValues("seedName", seed.GetName(), "namespace", namespace) + log.Info("Dumping control plane resources") + + _, seedClient, err := f.GetSeed(ctx, seed.GetName()) + if err != nil { + return err + } + + var result error + if err := f.dumpGardenerExtensionsInNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump Extensions from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpEventsInNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump Events from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpPodInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump information of Pods from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpDeploymentInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump information of Deployments from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpStatefulSetInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump information of StatefulSets from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpDaemonSetInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump information of DaemonSets from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpServiceInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to dump information of Services from namespace %s in seed %s: %w", namespace, seed.Name, err)) + } + if err := f.dumpVolumeInfoForNamespace(ctx, log, seedClient, namespace); err != nil { + result = multierror.Append(result, fmt.Errorf("unable to fetch information of Volumes from namespace %s: %w", namespace, err)) + } + + return result +} + +// dumpGardenerExtensionsInNamespace prints all gardener extension crds in the shoot namespace +func (f *GardenerFramework) dumpGardenerExtensionsInNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + var result *multierror.Error + + for kind, objList := range map[string]client.ObjectList{ + "Infrastructure": &extensionsv1alpha1.InfrastructureList{}, + "ControlPlane": &extensionsv1alpha1.ControlPlaneList{}, + "OperatingSystemConfig": &extensionsv1alpha1.OperatingSystemConfigList{}, + "Worker": &extensionsv1alpha1.WorkerList{}, + "BackupBucket": &extensionsv1alpha1.BackupBucketList{}, + "BackupEntry": &extensionsv1alpha1.BackupEntryList{}, + "Bastion": &extensionsv1alpha1.BastionList{}, + "Network": &extensionsv1alpha1.NetworkList{}, + } { + extensionLog := log.WithValues("kind", kind) + extensionLog.Info("Dumping extensions.gardener.cloud/v1alpha1 resources") + + if err := k8sClient.Client().List(ctx, objList, client.InNamespace(namespace)); err != nil { + result = multierror.Append(result, err) + if err := meta.EachListItem(objList, func(o runtime.Object) error { + obj, err := extensions.Accessor(o) + if err != nil { + return err + } + f.dumpGardenerExtension(extensionLog, obj) + return nil + }); err != nil { + result = multierror.Append(result, err) + } + } + } + + return result.ErrorOrNil() +} + +// dumpGardenerExtensions prints all gardener extension crds in the shoot namespace +func (f *GardenerFramework) dumpGardenerExtension(log logr.Logger, extension extensionsv1alpha1.Object) { + log = log.WithValues("name", extension.GetName(), "type", extension.GetExtensionSpec().GetExtensionType()) + + if err := health.CheckExtensionObject(extension); err != nil { + log.Info("Found unhealthy extension object", "reason", err.Error()) + } else { + log.Info("Found healthy extension object") + } + + log.Info("Extension object has last operation", "lastOperation", extension.GetExtensionStatus().GetLastOperation()) + if extension.GetExtensionStatus().GetLastError() != nil { + log.Info("Extension object has last error", "lastError", extension.GetExtensionStatus().GetLastError()) + } +} + +// DumpLogsForPodsWithLabelsInNamespace prints the logs of pods in the given namespace selected by the given list options. +func (f *CommonFramework) DumpLogsForPodsWithLabelsInNamespace(ctx context.Context, k8sClient kubernetes.Interface, namespace string, opts ...client.ListOption) error { + pods := &corev1.PodList{} + opts = append(opts, client.InNamespace(namespace)) + if err := k8sClient.Client().List(ctx, pods, opts...); err != nil { + return err + } + + var result error + for _, pod := range pods.Items { + if err := f.DumpLogsForPodInNamespace(ctx, k8sClient, namespace, pod.Name, &corev1.PodLogOptions{}); err != nil { + result = multierror.Append(result, err) + } + } + return result +} + +// DumpLogsForPodInNamespace prints the logs of the pod with the given namespace and name. +func (f *CommonFramework) DumpLogsForPodInNamespace(ctx context.Context, k8sClient kubernetes.Interface, namespace, name string, options *corev1.PodLogOptions) error { + log := f.Logger.WithValues("pod", client.ObjectKey{Namespace: namespace, Name: name}) + log.Info("Dumping logs for corev1.Pod") + + podIf := k8sClient.Kubernetes().CoreV1().Pods(namespace) + logs, err := kubernetes.GetPodLogs(ctx, podIf, name, options) + if err != nil { + return err + } + scanner := bufio.NewScanner(bytes.NewReader(logs)) + for scanner.Scan() { + log.Info(scanner.Text()) //nolint:logcheck + } + + return nil +} + +// dumpDeploymentInfoForNamespace prints information about all Deployments of a namespace +func (f *CommonFramework) dumpDeploymentInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping appsv1.Deployment resources") + + deployments := &appsv1.DeploymentList{} + if err := k8sClient.Client().List(ctx, deployments, client.InNamespace(namespace)); err != nil { + return err + } + for _, deployment := range deployments.Items { + log = log.WithValues("name", deployment.Name, "replicas", deployment.Status.Replicas, "availableReplicas", deployment.Status.AvailableReplicas) + + if err := health.CheckDeployment(&deployment); err != nil { + log.Info("Found unhealthy Deployment", "reason", err.Error(), "conditions", deployment.Status.Conditions) + continue + } + log.Info("Found healthy Deployment") + } + return nil +} + +// dumpStatefulSetInfoForNamespace prints information about all StatefulSets of a namespace +func (f *CommonFramework) dumpStatefulSetInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping appsv1.StatefulSet resources") + + statefulSets := &appsv1.StatefulSetList{} + if err := k8sClient.Client().List(ctx, statefulSets, client.InNamespace(namespace)); err != nil { + return err + } + for _, statefulSet := range statefulSets.Items { + log = log.WithValues("name", statefulSet.Name, "replicas", statefulSet.Status.Replicas, "readyReplicas", statefulSet.Status.ReadyReplicas) + + if err := health.CheckStatefulSet(&statefulSet); err != nil { + log.Info("Found unhealthy StatefulSet", "reason", err.Error(), "conditions", statefulSet.Status.Conditions) + continue + } + log.Info("Found healthy StatefulSet") + } + return nil +} + +// dumpDaemonSetInfoForNamespace prints information about all DaemonSets of a namespace +func (f *CommonFramework) dumpDaemonSetInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping appsv1.DaemonSet resources") + + daemonSets := &appsv1.DaemonSetList{} + if err := k8sClient.Client().List(ctx, daemonSets, client.InNamespace(namespace)); err != nil { + return err + } + for _, ds := range daemonSets.Items { + log = log.WithValues("name", ds.Name, "currentNumberScheduled", ds.Status.CurrentNumberScheduled, "desiredNumberScheduled", ds.Status.DesiredNumberScheduled) + + if err := health.CheckDaemonSet(&ds); err != nil { + log.Info("Found unhealthy DaemonSet", "reason", err.Error(), "conditions", ds.Status.Conditions) + continue + } + log.Info("Found healthy DaemonSet") + } + return nil +} + +// dumpNamespaceResource prints information about the Namespace itself +func (f *CommonFramework) dumpNamespaceResource(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping corev1.Namespace resources") + + ns := &corev1.Namespace{} + if err := k8sClient.Client().Get(ctx, client.ObjectKey{Name: namespace}, ns); err != nil { + return err + } + log.Info("Found Namespace", "namespace", ns) + return nil +} + +// dumpServiceInfoForNamespace prints information about all Services of a namespace +func (f *CommonFramework) dumpServiceInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping corev1.Service resources") + + services := &corev1.ServiceList{} + if err := k8sClient.Client().List(ctx, services, client.InNamespace(namespace)); err != nil { + return err + } + for _, service := range services.Items { + log.Info("Found Service", "service", service) + } + return nil +} + +// dumpVolumeInfoForNamespace prints information about all PVs and PVCs of a namespace +func (f *CommonFramework) dumpVolumeInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping corev1.PersistentVolumeClaim resources") + + pvcs := &corev1.PersistentVolumeClaimList{} + if err := k8sClient.Client().List(ctx, pvcs, client.InNamespace(namespace)); err != nil { + return err + } + for _, pvc := range pvcs.Items { + log.Info("Found PersistentVolumeClaim", "persistentVolumeClaim", pvc) + } + + log.Info("Dumping corev1.PersistentVolume resources") + + pvs := &corev1.PersistentVolumeList{} + if err := k8sClient.Client().List(ctx, pvs, client.InNamespace(namespace)); err != nil { + return err + } + for _, pv := range pvs.Items { + log.Info("Found PersistentVolume", "persistentVolume", pv) + } + return nil +} + +// dumpNodes prints information about all nodes +func (f *CommonFramework) dumpNodes(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface) error { + log.Info("Dumping corev1.Node resources") + + nodes := &corev1.NodeList{} + if err := k8sClient.Client().List(ctx, nodes); err != nil { + return err + } + for _, node := range nodes.Items { + log = log.WithValues("nodeName", node.Name) + if err := health.CheckNode(&node); err != nil { + log.Info("Found unhealthy Node", "phase", node.Status.Phase, "reason", err.Error(), "conditions", node.Status.Conditions) + } else { + log.Info("Found healthy Node", "phase", node.Status.Phase) + } + log.Info("Node resource capacity", "cpu", node.Status.Capacity.Cpu().String(), "memory", node.Status.Capacity.Memory().String()) + + nodeMetric := &metricsv1beta1.NodeMetrics{} + if err := k8sClient.Client().Get(ctx, client.ObjectKey{Name: node.Name}, nodeMetric); err != nil { + log.Error(err, "Unable to receive metrics for node") + continue + } + log.Info("Node resource usage", "cpu", nodeMetric.Usage.Cpu().String(), "memory", nodeMetric.Usage.Memory().String()) + } + return nil +} + +// dumpPodInfoForNamespace prints node information of all pods in a namespace +func (f *CommonFramework) dumpPodInfoForNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string) error { + log.Info("Dumping corev1.Pod resources") + + pods := &corev1.PodList{} + if err := k8sClient.Client().List(ctx, pods, client.InNamespace(namespace)); err != nil { + return err + } + for _, pod := range pods.Items { + log.Info("Found pod", + "podName", pod.Name, + "phase", pod.Status.Phase, + "nodeName", pod.Spec.NodeName, + ) + } + return nil +} + +// dumpEventsInNamespace prints all events of a namespace +func (f *CommonFramework) dumpEventsInNamespace(ctx context.Context, log logr.Logger, k8sClient kubernetes.Interface, namespace string, filters ...EventFilterFunc) error { + log.Info("Dumping corev1.Event resources") + + events := &corev1.EventList{} + if err := k8sClient.Client().List(ctx, events, client.InNamespace(namespace)); err != nil { + return err + } + + if len(events.Items) > 1 { + sort.Sort(eventByFirstTimestamp(events.Items)) + } + for _, event := range events.Items { + if ApplyFilters(event, filters...) { + log.Info("Found event", + "firstTimestamp", event.FirstTimestamp, + "involvedObjectName", event.InvolvedObject.Name, + "source", event.Source, + "reason", event.Reason, + "message", event.Message, + ) + } + } + return nil +} + +// EventFilterFunc is a function to filter events +type EventFilterFunc func(event corev1.Event) bool + +// ApplyFilters checks if one of the EventFilters filters the current event +func ApplyFilters(event corev1.Event, filters ...EventFilterFunc) bool { + for _, filter := range filters { + if !filter(event) { + return false + } + } + return true +} + +// eventByFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker. +type eventByFirstTimestamp []corev1.Event + +func (o eventByFirstTimestamp) Len() int { return len(o) } + +func (o eventByFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o eventByFirstTimestamp) Less(i, j int) bool { + if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) { + return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name + } + return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) +} diff --git a/vendor/github.com/gardener/gardener/test/framework/errors.go b/vendor/github.com/gardener/gardener/test/framework/errors.go new file mode 100644 index 00000000..c27519db --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/errors.go @@ -0,0 +1,28 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import "errors" + +var ( + // ErrNoRepositoriesFound no repositories found in repository file + ErrNoRepositoriesFound = errors.New("no repositories found in repository file") + + // ErrNoInternalIPsForNodeWasFound no internal IPs were found for node + ErrNoInternalIPsForNodeWasFound = errors.New("no internal IPs were found for node") + + // ErrNoRunningPodsFound no running pods were found + ErrNoRunningPodsFound = errors.New("no running pods were found") +) diff --git a/vendor/github.com/gardener/gardener/test/framework/gardener_utils.go b/vendor/github.com/gardener/gardener/test/framework/gardener_utils.go new file mode 100644 index 00000000..da57050e --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/gardener_utils.go @@ -0,0 +1,522 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + "github.com/gardener/gardener/pkg/client/kubernetes" + gardenerutils "github.com/gardener/gardener/pkg/utils/gardener" + kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes" + "github.com/gardener/gardener/pkg/utils/kubernetes/health" + "github.com/gardener/gardener/pkg/utils/retry" +) + +// GetSeeds returns all registered seeds +func (f *GardenerFramework) GetSeeds(ctx context.Context) ([]gardencorev1beta1.Seed, error) { + seeds := &gardencorev1beta1.SeedList{} + err := f.GardenClient.Client().List(ctx, seeds) + if err != nil { + return nil, fmt.Errorf("could not get Seeds from Garden cluster: %w", err) + } + + return seeds.Items, nil +} + +// GetSeed returns the seed and its k8s client +func (f *GardenerFramework) GetSeed(ctx context.Context, seedName string) (*gardencorev1beta1.Seed, kubernetes.Interface, error) { + seed := &gardencorev1beta1.Seed{} + err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: seedName}, seed) + if err != nil { + return nil, nil, fmt.Errorf("could not get Seed from Shoot in Garden cluster: %w", err) + } + + seedSecretRef := seed.Spec.SecretRef + if seedSecretRef == nil { + f.Logger.Info("Seed does not have secretRef set, skip constructing seed client") + return seed, nil, nil + } + + seedClient, err := kubernetes.NewClientFromSecret(ctx, f.GardenClient.Client(), seedSecretRef.Namespace, seedSecretRef.Name, + kubernetes.WithClientOptions(client.Options{Scheme: kubernetes.SeedScheme}), + kubernetes.WithDisabledCachedClient(), + ) + if err != nil { + return nil, nil, fmt.Errorf("could not construct Seed client: %w", err) + } + return seed, seedClient, nil +} + +// GetShoot gets the test shoot +func (f *GardenerFramework) GetShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + return f.GardenClient.Client().Get(ctx, kubernetesutils.Key(shoot.Namespace, shoot.Name), shoot) +} + +// GetShootProject returns the project of a shoot +func (f *GardenerFramework) GetShootProject(ctx context.Context, shootNamespace string) (*gardencorev1beta1.Project, error) { + var ( + project = &gardencorev1beta1.Project{} + ns = &corev1.Namespace{} + ) + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: shootNamespace}, ns); err != nil { + return nil, fmt.Errorf("could not get the Shoot namespace in Garden cluster: %w", err) + } + + if ns.Labels == nil { + return nil, fmt.Errorf("namespace %q does not have any labels", ns.Name) + } + projectName, ok := ns.Labels[v1beta1constants.ProjectName] + if !ok { + return nil, fmt.Errorf("namespace %q did not contain a project label", ns.Name) + } + + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: projectName}, project); err != nil { + return nil, fmt.Errorf("could not get Project in Garden cluster: %w", err) + } + return project, nil +} + +// createShootResource creates a shoot from a shoot Object +func (f *GardenerFramework) createShootResource(ctx context.Context, shoot *gardencorev1beta1.Shoot) (*gardencorev1beta1.Shoot, error) { + if err := f.GardenClient.Client().Create(ctx, shoot); err != nil { + return nil, err + } + f.Logger.Info("Shoot resource was created", "shoot", client.ObjectKeyFromObject(shoot)) + return shoot, nil +} + +// CreateShoot Creates a shoot from a shoot Object and waits until it is successfully reconciled +func (f *GardenerFramework) CreateShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + err := retry.UntilTimeout(ctx, 20*time.Second, 5*time.Minute, func(ctx context.Context) (done bool, err error) { + _, err = f.createShootResource(ctx, shoot) + if apierrors.IsInvalid(err) || apierrors.IsForbidden(err) || apierrors.IsAlreadyExists(err) { + return retry.SevereError(err) + } + if err != nil { + log.Error(err, "Unable to create shoot") + return retry.MinorError(err) + } + return retry.Ok() + }) + if err != nil { + return err + } + + // Then we wait for the shoot to be created + err = f.WaitForShootToBeCreated(ctx, shoot) + if err != nil { + return err + } + + log.Info("Shoot was created") + return nil +} + +// DeleteShootAndWaitForDeletion deletes the test shoot and waits until it cannot be found any more +func (f *GardenerFramework) DeleteShootAndWaitForDeletion(ctx context.Context, shoot *gardencorev1beta1.Shoot) (rErr error) { + if f.Config.ExistingShootName != "" { + f.Logger.Info("Skip deletion of existing shoot", "shoot", client.ObjectKey{Name: f.Config.ExistingShootName, Namespace: f.ProjectNamespace}) + return nil + } + + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + defer func() { + if rErr != nil { + dumpCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + if shootFramework, err := f.NewShootFramework(dumpCtx, shoot); err != nil { + log.Error(err, "Cannot dump shoot state") + } else { + shootFramework.DumpState(dumpCtx) + } + } + }() + + err := f.DeleteShoot(ctx, shoot) + if err != nil { + return err + } + + err = f.WaitForShootToBeDeleted(ctx, shoot) + if err != nil { + return err + } + + log.Info("Shoot was deleted successfully") + return nil +} + +// DeleteShoot deletes the test shoot +func (f *GardenerFramework) DeleteShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + err := retry.UntilTimeout(ctx, 20*time.Second, 5*time.Minute, func(ctx context.Context) (done bool, err error) { + err = f.RemoveShootAnnotation(ctx, shoot, v1beta1constants.ShootIgnore) + if err != nil { + if apierrors.IsNotFound(err) { + return retry.Ok() + } + return retry.MinorError(err) + } + + // First we annotate the shoot to be deleted. + err = f.AnnotateShoot(ctx, shoot, map[string]string{ + gardenerutils.ConfirmationDeletion: "true", + }) + if err != nil { + return retry.MinorError(err) + } + + err = f.GardenClient.Client().Delete(ctx, shoot) + if err != nil { + return retry.MinorError(err) + } + + return retry.Ok() + }) + if err != nil { + return err + } + return nil +} + +// UpdateShoot Updates a shoot from a shoot Object and waits for its reconciliation +func (f *GardenerFramework) UpdateShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot, update func(shoot *gardencorev1beta1.Shoot) error) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + err := retry.UntilTimeout(ctx, 20*time.Second, 5*time.Minute, func(ctx context.Context) (done bool, err error) { + updatedShoot := &gardencorev1beta1.Shoot{} + if err := f.GardenClient.Client().Get(ctx, client.ObjectKeyFromObject(shoot), updatedShoot); err != nil { + return retry.MinorError(err) + } + + if err := update(updatedShoot); err != nil { + return retry.MinorError(err) + } + + if err := f.GardenClient.Client().Update(ctx, updatedShoot); err != nil { + log.Error(err, "Unable to update shoot") + return retry.MinorError(err) + } + *shoot = *updatedShoot + return retry.Ok() + }) + if err != nil { + return err + } + + // Then we wait for the shoot to be created + err = f.WaitForShootToBeReconciled(ctx, shoot) + if err != nil { + return err + } + + log.Info("Shoot was successfully updated") + return nil +} + +// HibernateShoot hibernates the test shoot +func (f *GardenerFramework) HibernateShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + // return if the shoot is already hibernated + if shoot.Spec.Hibernation != nil && shoot.Spec.Hibernation.Enabled != nil && *shoot.Spec.Hibernation.Enabled { + return nil + } + + err := retry.UntilTimeout(ctx, 20*time.Second, 5*time.Minute, func(ctx context.Context) (done bool, err error) { + patch := client.MergeFrom(shoot.DeepCopy()) + setHibernation(shoot, true) + if err := f.GardenClient.Client().Patch(ctx, shoot, patch); err != nil { + return retry.MinorError(err) + } + return retry.Ok() + }) + if err != nil { + return err + } + + if err := f.WaitForShootToBeReconciled(ctx, shoot); err != nil { + return err + } + + if !v1beta1helper.IsWorkerless(shoot) { + // Verify no running pods after hibernation + if err := f.VerifyNoRunningPods(ctx, shoot); err != nil { + return fmt.Errorf("failed to verify no running pods after hibernation: %v", err) + } + } + + log.Info("Shoot was hibernated successfully") + return nil +} + +// WakeUpShoot wakes up the test shoot from hibernation +func (f *GardenerFramework) WakeUpShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + // return if the shoot is already running + if shoot.Spec.Hibernation == nil || shoot.Spec.Hibernation.Enabled == nil || !*shoot.Spec.Hibernation.Enabled { + return nil + } + + err := retry.UntilTimeout(ctx, 20*time.Second, 5*time.Minute, func(ctx context.Context) (done bool, err error) { + patch := client.MergeFrom(shoot.DeepCopy()) + setHibernation(shoot, false) + if err := f.GardenClient.Client().Patch(ctx, shoot, patch); err != nil { + return retry.MinorError(err) + } + return retry.Ok() + }) + if err != nil { + return err + } + + if err := f.WaitForShootToBeReconciled(ctx, shoot); err != nil { + return err + } + + log.Info("Shoot was woken up successfully") + return nil +} + +// ScheduleShoot set the Spec.Cloud.Seed of a shoot to the specified seed. +// This is the request the Gardener Scheduler executes after a scheduling decision. +func (f *GardenerFramework) ScheduleShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot, seed *gardencorev1beta1.Seed) error { + shoot.Spec.SeedName = &seed.Name + return f.GardenClient.Client().Update(ctx, shoot) +} + +// WaitForShootToBeCreated waits for the shoot to be created +func (f *GardenerFramework) WaitForShootToBeCreated(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + return retry.UntilTimeout(ctx, 30*time.Second, 60*time.Minute, func(ctx context.Context) (done bool, err error) { + err = f.GardenClient.Client().Get(ctx, client.ObjectKey{Namespace: shoot.Namespace, Name: shoot.Name}, shoot) + if err != nil { + log.Error(err, "Error while waiting for shoot to be created") + return retry.MinorError(err) + } + completed, msg := ShootReconciliationSuccessful(shoot) + if completed { + return retry.Ok() + } + log.Info("Shoot not yet created", "reason", msg) + if shoot.Status.LastOperation != nil { + log.Info("Last Operation", "lastOperation", shoot.Status.LastOperation) + } + return retry.MinorError(fmt.Errorf("shoot %q was not successfully reconciled", shoot.Name)) + }) +} + +// WaitForShootToBeDeleted waits for the shoot to be deleted +func (f *GardenerFramework) WaitForShootToBeDeleted(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + return retry.UntilTimeout(ctx, 30*time.Second, 60*time.Minute, func(ctx context.Context) (done bool, err error) { + err = f.GardenClient.Client().Get(ctx, client.ObjectKey{Namespace: shoot.Namespace, Name: shoot.Name}, shoot) + if err != nil { + if apierrors.IsNotFound(err) { + return retry.Ok() + } + log.Error(err, "Error while waiting for shoot to be deleted") + return retry.MinorError(err) + } + log.Info("Shoot is not yet deleted") + if shoot.Status.LastOperation != nil { + log.Info("Last Operation", "lastOperation", shoot.Status.LastOperation) + } + return retry.MinorError(fmt.Errorf("shoot %q still exists", shoot.Name)) + }) +} + +// WaitForShootToBeReconciled waits for the shoot to be successfully reconciled +func (f *GardenerFramework) WaitForShootToBeReconciled(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(shoot)) + + return retry.UntilTimeout(ctx, 30*time.Second, 60*time.Minute, func(ctx context.Context) (done bool, err error) { + err = f.GardenClient.Client().Get(ctx, client.ObjectKey{Namespace: shoot.Namespace, Name: shoot.Name}, shoot) + if err != nil { + log.Error(err, "Error while waiting for shoot to be reconciled") + return retry.MinorError(err) + } + completed, msg := ShootReconciliationSuccessful(shoot) + if completed { + return retry.Ok() + } + log.Info("Shoot is not yet reconciled", "reason", msg) + if shoot.Status.LastOperation != nil { + log.Info("Last Operation", "lastOperation", shoot.Status.LastOperation) + } + return retry.MinorError(fmt.Errorf("shoot %q was not successfully reconciled", shoot.Name)) + }) +} + +// AnnotateShoot adds shoot annotation(s) +func (f *GardenerFramework) AnnotateShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot, annotations map[string]string) error { + patch := client.MergeFrom(shoot.DeepCopy()) + + for annotationKey, annotationValue := range annotations { + metav1.SetMetaDataAnnotation(&shoot.ObjectMeta, annotationKey, annotationValue) + } + + return f.GardenClient.Client().Patch(ctx, shoot, patch) +} + +// RemoveShootAnnotation removes an annotation with key from a shoot object +func (f *GardenerFramework) RemoveShootAnnotation(ctx context.Context, shoot *gardencorev1beta1.Shoot, annotationKey string) error { + if len(shoot.Annotations) == 0 { + return nil + } + if _, ok := shoot.Annotations[annotationKey]; !ok { + return nil + } + + patch := client.MergeFrom(shoot.DeepCopy()) + delete(shoot.Annotations, annotationKey) + + return f.GardenClient.Client().Patch(ctx, shoot, patch) +} + +// MigrateShoot changes the spec.Seed.Name of a shoot and waits for it to be migrated +func (f *GardenerFramework) MigrateShoot(ctx context.Context, shoot *gardencorev1beta1.Shoot, seed *gardencorev1beta1.Seed, prerequisites func(shoot *gardencorev1beta1.Shoot) error) error { + if prerequisites != nil { + if err := f.UpdateShoot(ctx, shoot, func(shoot *gardencorev1beta1.Shoot) error { + return prerequisites(shoot) + }); err != nil { + return err + } + } + + if err := f.GetShoot(ctx, shoot); err != nil { + return err + } + + if _, _, err := f.GetSeed(ctx, seed.Name); err != nil { + return err + } + + shoot.Spec.SeedName = &seed.Name + if err := f.GardenClient.Client().SubResource("binding").Update(ctx, shoot); err != nil { + return fmt.Errorf("failed updating binding for shoot %q: %w", client.ObjectKeyFromObject(shoot), err) + } + + return f.WaitForShootToBeCreated(ctx, shoot) +} + +// GetCloudProfile returns the cloudprofile from gardener with the give name +func (f *GardenerFramework) GetCloudProfile(ctx context.Context, name string) (*gardencorev1beta1.CloudProfile, error) { + cloudProfile := &gardencorev1beta1.CloudProfile{} + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: name}, cloudProfile); err != nil { + return nil, fmt.Errorf("could not get CloudProfile '%s' in Garden cluster: %w", name, err) + } + return cloudProfile, nil +} + +// DumpState greps all necessary logs and state of the cluster if the test failed +// TODO: dump extension controller namespaces +// TODO: dump logs of gardener extension controllers and other system components +func (f *GardenerFramework) DumpState(ctx context.Context) { + if f.DisableStateDump { + return + } + if f.GardenClient == nil { + return + } + + if err := f.dumpSeeds(ctx); err != nil { + f.Logger.Error(err, "Unable to dump seed status") + } + + // dump events if project namespace set + if f.ProjectNamespace != "" { + if err := f.dumpEventsInNamespace(ctx, f.Logger, f.GardenClient, f.ProjectNamespace); err != nil { + f.Logger.Error(err, "Unable to dump gardener events from project namespace", "namespace", f.ProjectNamespace) + } + } +} + +// dumpSeeds prints information about all seeds +func (f *GardenerFramework) dumpSeeds(ctx context.Context) error { + f.Logger.Info("Dumping core.gardener.cloud/v1beta1.Seed resources") + + seeds := &gardencorev1beta1.SeedList{} + if err := f.GardenClient.Client().List(ctx, seeds); err != nil { + return err + } + + for _, seed := range seeds.Items { + f.dumpSeed(&seed) + } + return nil +} + +// dumpSeed prints information about a seed +func (f *GardenerFramework) dumpSeed(seed *gardencorev1beta1.Seed) { + log := f.Logger.WithValues("seedName", seed.Name) + + if err := health.CheckSeed(seed, seed.Status.Gardener); err != nil { + log.Info("Found unhealthy Seed", "reason", err.Error(), "conditions", seed.Status.Conditions) + } else { + log.Info("Found healthy Seed") + } +} + +func setHibernation(shoot *gardencorev1beta1.Shoot, hibernated bool) { + if shoot.Spec.Hibernation != nil { + shoot.Spec.Hibernation.Enabled = &hibernated + } + shoot.Spec.Hibernation = &gardencorev1beta1.Hibernation{ + Enabled: &hibernated, + } +} + +// VerifyNoRunningPods verifies that no control plane pods are running for a given shoot. +// If any control plane pods are found to be running, returns an error with their names. Otherwise, returns nil. +func (f *GardenerFramework) VerifyNoRunningPods(ctx context.Context, shoot *gardencorev1beta1.Shoot) error { + _, seedClient, err := f.GetSeed(ctx, *shoot.Spec.SeedName) + if err != nil { + return err + } + + shootSeedNamespace := shoot.Status.TechnicalID + podList := &metav1.PartialObjectMetadataList{} + podList.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PodList")) + if err := seedClient.Client().List(ctx, podList, client.InNamespace(shootSeedNamespace)); err != nil { + return err + } + + if len(podList.Items) > 0 { + runningPodNames := []string{} + for _, pod := range podList.Items { + runningPodNames = append(runningPodNames, pod.Name) + } + return fmt.Errorf("found pods in namespace %s: %v", shootSeedNamespace, runningPodNames) + } + + return nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/gardenerframework.go b/vendor/github.com/gardener/gardener/test/framework/gardenerframework.go new file mode 100644 index 00000000..eca2376e --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/gardenerframework.go @@ -0,0 +1,165 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "flag" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +var gardenerCfg *GardenerConfig + +// GardenerConfig is the configuration for a gardener framework +type GardenerConfig struct { + CommonConfig *CommonConfig + GardenerKubeconfig string + ProjectNamespace string + ExistingShootName string + SkipAccessingShoot bool +} + +// GardenerFramework is the gardener test framework that includes functions for working with a gardener instance +type GardenerFramework struct { + *CommonFramework + TestDescription + GardenClient kubernetes.Interface + + ProjectNamespace string + Config *GardenerConfig +} + +// NewGardenerFramework creates a new gardener test framework. +// All needed flags are parsed during before each suite. +func NewGardenerFramework(cfg *GardenerConfig) *GardenerFramework { + f := newGardenerFrameworkFromConfig(cfg) + ginkgo.BeforeEach(f.CommonFramework.BeforeEach) + ginkgo.BeforeEach(f.BeforeEach) + CAfterEach(func(ctx context.Context) { + if !ginkgo.CurrentSpecReport().Failed() { + return + } + f.DumpState(ctx) + }, 10*time.Minute) + return f +} + +// newGardenerFrameworkFromConfig creates a new gardener test framework without registering ginkgo specific functions +func newGardenerFrameworkFromConfig(cfg *GardenerConfig) *GardenerFramework { + var commonConfig *CommonConfig + if cfg != nil { + commonConfig = cfg.CommonConfig + } + f := &GardenerFramework{ + CommonFramework: newCommonFrameworkFromConfig(commonConfig), + TestDescription: NewTestDescription("GARDENER"), + Config: cfg, + } + return f +} + +// BeforeEach should be called in ginkgo's BeforeEach. +// It sets up the gardener framework. +func (f *GardenerFramework) BeforeEach() { + f.Config = mergeGardenerConfig(f.Config, gardenerCfg) + validateGardenerConfig(f.Config) + gardenClient, err := kubernetes.NewClientFromFile("", f.Config.GardenerKubeconfig, + kubernetes.WithClientOptions(client.Options{Scheme: kubernetes.GardenScheme}), + kubernetes.WithAllowedUserFields([]string{kubernetes.AuthTokenFile}), + kubernetes.WithDisabledCachedClient(), + ) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + f.GardenClient = gardenClient + + f.ProjectNamespace = f.Config.ProjectNamespace +} + +func validateGardenerConfig(cfg *GardenerConfig) { + if cfg == nil { + ginkgo.Fail("no gardener framework configuration provided") + return // make linters happy + } + if !StringSet(cfg.GardenerKubeconfig) { + ginkgo.Fail("you need to specify the correct path for the kubeconfig") + } + + if !FileExists(cfg.GardenerKubeconfig) { + ginkgo.Fail("kubeconfig path does not exist") + } +} + +func mergeGardenerConfig(base, overwrite *GardenerConfig) *GardenerConfig { + if base == nil { + return overwrite + } + if overwrite == nil { + return base + } + + if overwrite.CommonConfig != nil { + base.CommonConfig = overwrite.CommonConfig + } + if StringSet(overwrite.ProjectNamespace) { + base.ProjectNamespace = overwrite.ProjectNamespace + } + if StringSet(overwrite.GardenerKubeconfig) { + base.GardenerKubeconfig = overwrite.GardenerKubeconfig + } + if overwrite.SkipAccessingShoot { + base.SkipAccessingShoot = overwrite.SkipAccessingShoot + } + if overwrite.ExistingShootName != "" { + base.ExistingShootName = overwrite.ExistingShootName + } + + return base +} + +// RegisterGardenerFrameworkFlags adds all flags that are needed to configure a gardener framework to the provided flagset. +func RegisterGardenerFrameworkFlags() *GardenerConfig { + _ = RegisterCommonFrameworkFlags() + + newCfg := &GardenerConfig{} + + flag.StringVar(&newCfg.ExistingShootName, "existing-shoot-name", "", "Name of an existing shoot to use instead of creating a new one.") + flag.StringVar(&newCfg.GardenerKubeconfig, "kubecfg", "", "the path to the kubeconfig of the garden cluster that will be used for integration tests") + flag.StringVar(&newCfg.ProjectNamespace, "project-namespace", "", "specify the gardener project namespace to run tests") + flag.BoolVar(&newCfg.SkipAccessingShoot, "skip-accessing-shoot", false, "if set to true then the test does not try to access the shoot via its kubeconfig") + + gardenerCfg = newCfg + return gardenerCfg +} + +// NewShootFramework creates a new shoot framework with the current gardener framework +// and a shoot +func (f *GardenerFramework) NewShootFramework(ctx context.Context, shoot *gardencorev1beta1.Shoot) (*ShootFramework, error) { + shootFramework := &ShootFramework{ + GardenerFramework: f, + Config: &ShootConfig{ + GardenerConfig: f.Config, + }, + } + if err := shootFramework.AddShoot(ctx, shoot.GetName(), shoot.GetNamespace()); err != nil { + return nil, err + } + return shootFramework, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/gingko_utils.go b/vendor/github.com/gardener/gardener/test/framework/gingko_utils.go new file mode 100644 index 00000000..3ab2e123 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/gingko_utils.go @@ -0,0 +1,66 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "time" + + "github.com/onsi/ginkgo/v2" +) + +// CIt contextifies Gingko's It +func CIt(text string, body func(context.Context), timeout time.Duration) { + ginkgo.It(text, contextify(body, timeout), timeout.Seconds()) +} + +// FCIt contextifies Gingko's FIt +func FCIt(text string, body func(context.Context), timeout time.Duration) { + ginkgo.FIt(text, contextify(body, timeout), timeout.Seconds()) +} + +// CAfterSuite contextifies Gingko's FIt +func CAfterSuite(body func(context.Context), timeout time.Duration) { + ginkgo.AfterSuite(contextify(body, timeout)) +} + +// CAfterEach contextifies Gingko's AfterEach +func CAfterEach(body func(context.Context), timeout time.Duration) { + ginkgo.AfterEach(contextify(body, timeout), timeout.Seconds()) +} + +// CBeforeSuite contextifies Gingko's FIt +func CBeforeSuite(body func(context.Context), timeout time.Duration) { + ginkgo.BeforeSuite(contextify(body, timeout)) +} + +// CBeforeEach contextifies Gingko's BeforeEach +func CBeforeEach(body func(ctx context.Context), timeout time.Duration) { + ginkgo.BeforeEach(contextify(body, timeout), timeout.Seconds()) +} + +// CJustBeforeEach contextifies Gingko's JustBeforeEach +func CJustBeforeEach(body func(ctx context.Context), timeout time.Duration) { + ginkgo.JustBeforeEach(contextify(body, timeout), timeout.Seconds()) +} + +func contextify(body func(context.Context), timeout time.Duration) func() { + return func() { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + body(ctx) + } +} diff --git a/vendor/github.com/gardener/gardener/test/framework/gomega_utils.go b/vendor/github.com/gardener/gardener/test/framework/gomega_utils.go new file mode 100644 index 00000000..dc93285c --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/gomega_utils.go @@ -0,0 +1,24 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "github.com/onsi/gomega" +) + +// ExpectNoError checks if an error has occurred +func ExpectNoError(actual interface{}, extra ...interface{}) { + gomega.ExpectWithOffset(1, actual, extra...).ToNot(gomega.HaveOccurred()) +} diff --git a/vendor/github.com/gardener/gardener/test/framework/helm.go b/vendor/github.com/gardener/gardener/test/framework/helm.go new file mode 100644 index 00000000..52352b5a --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/helm.go @@ -0,0 +1,247 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mholt/archiver" + "github.com/onsi/ginkgo/v2" + "k8s.io/helm/pkg/downloader" + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/helm/environment" + "k8s.io/helm/pkg/helm/helmpath" + "k8s.io/helm/pkg/repo" + + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +const ( + stableRepository = "stable" +) + +// RenderAndDeployChart downloads a helm chart from helm stable repo url available in resources/repositories and deploys it on the test cluster +func (f *CommonFramework) RenderAndDeployChart(ctx context.Context, k8sClient kubernetes.Interface, c Chart, values map[string]interface{}) error { + helmRepo := Helm(f.ResourcesDir) + err := EnsureRepositoryDirectories(helmRepo) + if err != nil { + return err + } + + ginkgo.By("Download chart artifacts") + err = f.DownloadChartArtifacts(ctx, helmRepo, f.ChartDir, c.Name, c.Version) + if err != nil { + return fmt.Errorf("unable to download chart artifacts for chart %s with version %s: %w", c.Name, c.Version, err) + } + + return f.DeployChart(ctx, k8sClient, c.Namespace, f.ChartDir, c.ReleaseName, values) +} + +// DeployChart deploys it on the test cluster +func (f *CommonFramework) DeployChart(ctx context.Context, k8sClient kubernetes.Interface, namespace, chartRepoDestination, chartNameToDeploy string, values map[string]interface{}) error { + chartPathToRender := filepath.Join(chartRepoDestination, chartNameToDeploy) + return k8sClient.ChartApplier().Apply(ctx, chartPathToRender, namespace, chartNameToDeploy, kubernetes.Values(values), kubernetes.ForceNamespace) +} + +// DownloadChartArtifacts downloads a helm chart from helm stable repo url available in resources/repositories +func (f *CommonFramework) DownloadChartArtifacts(ctx context.Context, helm Helm, chartRepoDestination, chartNameToDownload, chartVersionToDownload string) error { + exists, err := Exists(chartRepoDestination) + if err != nil { + return err + } + + if !exists { + if err := os.MkdirAll(chartRepoDestination, 0755); err != nil { + return err + } + } + + rf, err := repo.LoadRepositoriesFile(helm.RepositoryFile()) + if err != nil { + return err + } + + if len(rf.Repositories) == 0 { + return ErrNoRepositoriesFound + } + + stableRepo := rf.Repositories[0] + var chartPath string + + chartDownloaded, err := Exists(filepath.Join(chartRepoDestination, strings.Split(chartNameToDownload, "/")[1])) + if err != nil { + return err + } + + if !chartDownloaded { + chartPath, err = downloadChart(ctx, chartNameToDownload, chartVersionToDownload, chartRepoDestination, stableRepo.URL, HelmAccess{ + HelmPath: helm, + }) + if err != nil { + return err + } + f.Logger.Info("Chart downloaded", "chartPath", chartPath) + } + return nil +} + +// Chart represents a external helm chart with a specific version and namespace +type Chart struct { + Name string + ReleaseName string + Namespace string + Version string +} + +// Helm is the home for the HELM repo +type Helm string + +// Path returns Helm path with elements appended. +func (h Helm) Path(elem ...string) string { + p := []string{h.String()} + p = append(p, elem...) + return filepath.Join(p...) +} + +// Path returns the home for the helm repo with. +func (h Helm) String(elem ...string) string { + return string(h) +} + +// Repository returns the path to the local repository. +func (h Helm) Repository() string { + return h.Path("repository") +} + +// RepositoryFile returns the path to the repositories.yaml file. +func (h Helm) RepositoryFile() string { + return h.Path("repository", "repositories.yaml") +} + +// CacheIndex returns the path to an index for the given named repository. +func (h Helm) CacheIndex(name string) string { + target := fmt.Sprintf("%s-index.yaml", name) + return h.Path("repository", "cache", target) +} + +// HelmAccess is a struct that holds the helm home +type HelmAccess struct { + HelmPath Helm +} + +// EnsureRepositoryDirectories creates the repository directory which holds the repositories.yaml config file +func EnsureRepositoryDirectories(helm Helm) error { + configDirectories := []string{ + helm.String(), + helm.Repository(), + } + for _, p := range configDirectories { + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(p, os.ModePerm); err != nil { + return fmt.Errorf("unable to create %s: %w", p, err) + } + continue + } + return err + } + if !fi.IsDir() { + return fmt.Errorf("%s must be a directory", p) + } + } + return nil +} + +// downloadChart downloads a native chart with to from +func downloadChart(ctx context.Context, name, version, downloadDestination, stableRepoURL string, helmSettings HelmAccess) (string, error) { + providers := getter.All(environment.EnvSettings{}) + dl := downloader.ChartDownloader{ + Getters: providers, + HelmHome: helmpath.Home(helmSettings.HelmPath), + Out: os.Stdout, + } + + err := ensureCacheIndex(ctx, helmSettings, stableRepoURL, providers) + if err != nil { + return "", err + } + + // Download the chart + filename, _, err := dl.DownloadTo(name, version, downloadDestination) + if err != nil { + return "", err + } + + lname, err := filepath.Abs(filename) + if err != nil { + return "", err + } + + err = archiver.Unarchive(lname, downloadDestination) + if err != nil { + return "", err + } + + err = os.Remove(lname) + if err != nil { + return "", err + } + return lname, nil +} + +func ensureCacheIndex(ctx context.Context, helmSettings HelmAccess, stableRepoURL string, providers getter.Providers) error { + // This will download the cache index file only if it does not exist + stableRepoCacheIndexPath := helmSettings.HelmPath.CacheIndex(stableRepository) + if _, err := os.Stat(stableRepoCacheIndexPath); err != nil { + if os.IsNotExist(err) { + directory := filepath.Dir(stableRepoCacheIndexPath) + err := os.MkdirAll(directory, os.ModePerm) + if err != nil { + return err + } + _, err = downloadCacheIndex(ctx, stableRepoCacheIndexPath, stableRepoURL, providers) + if err != nil { + return err + } + return nil + } + return err + } + return nil +} + +// downloadCacheIndex downloads the cache index for repository +func downloadCacheIndex(ctx context.Context, cacheFile, stableRepositoryURL string, providers getter.Providers) (*repo.Entry, error) { + c := repo.Entry{ + Name: stableRepository, + URL: stableRepositoryURL, + Cache: cacheFile, + } + + r, err := repo.NewChartRepository(&c, providers) + if err != nil { + return nil, err + } + + if err := r.DownloadIndexFile(""); err != nil { + return nil, fmt.Errorf("looks like %q is not a valid chart repository or cannot be reached: %s", stableRepositoryURL, err.Error()) + } + return &c, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/http_utils.go b/vendor/github.com/gardener/gardener/test/framework/http_utils.go new file mode 100644 index 00000000..10cc8b12 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/http_utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "time" +) + +// HTTPGet performs an HTTP GET request with context +func HTTPGet(ctx context.Context, url string) (*http.Response, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + } + httpClient := http.Client{ + Transport: transport, + } + httpRequest, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + httpRequest = httpRequest.WithContext(ctx) + return httpClient.Do(httpRequest) +} + +// TestHTTPEndpointWithBasicAuth validates that a http endpoint can be accessed using basic authentication +func TestHTTPEndpointWithBasicAuth(ctx context.Context, url, userName, password string) error { + return testHTTPEndpointWith(ctx, url, func(r *http.Request) { + r.SetBasicAuth(userName, password) + }) +} + +// TestHTTPEndpointWithToken validates that a http endpoint can be accessed using a bearer token +func TestHTTPEndpointWithToken(ctx context.Context, url, token string) error { + return testHTTPEndpointWith(ctx, url, func(r *http.Request) { + bearerToken := fmt.Sprintf("Bearer %s", token) + r.Header.Set("Authorization", bearerToken) + }) +} + +func testHTTPEndpointWith(ctx context.Context, url string, mutator func(*http.Request)) error { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyFromEnvironment, + } + + httpClient := http.Client{ + Transport: transport, + Timeout: 5 * time.Second, + } + + httpRequest, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return err + } + + mutator(httpRequest) + httpRequest = httpRequest.WithContext(ctx) + + r, err := httpClient.Do(httpRequest) + if err != nil { + return err + } + if r.StatusCode != http.StatusOK { + return fmt.Errorf("http request should return %d but returned %d instead", http.StatusOK, r.StatusCode) + } + return nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/k8s_utils.go b/vendor/github.com/gardener/gardener/test/framework/k8s_utils.go new file mode 100644 index 00000000..8929f0ae --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/k8s_utils.go @@ -0,0 +1,591 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/rest" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils" + kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes" + "github.com/gardener/gardener/pkg/utils/kubernetes/health" + "github.com/gardener/gardener/pkg/utils/retry" + "github.com/gardener/gardener/test/utils/access" +) + +// WaitUntilDaemonSetIsRunning waits until the daemon set with is running +func (f *CommonFramework) WaitUntilDaemonSetIsRunning(ctx context.Context, k8sClient client.Client, name, namespace string) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + daemonSet := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + log := f.Logger.WithValues("daemonSet", client.ObjectKeyFromObject(daemonSet)) + + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(daemonSet), daemonSet); err != nil { + return retry.MinorError(err) + } + + if err := health.CheckDaemonSet(daemonSet); err != nil { + log.Info("Waiting for DaemonSet to be ready") + return retry.MinorError(fmt.Errorf("daemon set %q is not healthy: %v", name, err)) + } + + log.Info("DaemonSet is ready now") + return retry.Ok() + }) +} + +// WaitUntilStatefulSetIsRunning waits until the stateful set with is running +func (f *CommonFramework) WaitUntilStatefulSetIsRunning(ctx context.Context, name, namespace string, c kubernetes.Interface) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + statefulSet := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + log := f.Logger.WithValues("statefulSet", client.ObjectKeyFromObject(statefulSet)) + + if err := c.Client().Get(ctx, client.ObjectKeyFromObject(statefulSet), statefulSet); err != nil { + return retry.MinorError(err) + } + + if err := health.CheckStatefulSet(statefulSet); err != nil { + log.Info("Waiting for StatefulSet to be ready") + return retry.MinorError(fmt.Errorf("stateful set %q is not healthy: %v", name, err)) + } + + log.Info("StatefulSet is ready now") + return retry.Ok() + }) +} + +// WaitUntilIngressIsReady waits until the given ingress is ready +func (f *CommonFramework) WaitUntilIngressIsReady(ctx context.Context, name string, namespace string, k8sClient kubernetes.Interface) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + ingress := &networkingv1.Ingress{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + log := f.Logger.WithValues("ingress", client.ObjectKeyFromObject(ingress)) + + if err := k8sClient.Client().Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, ingress); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Waiting for ingress to be ready") + return retry.MinorError(fmt.Errorf("ingress %q in namespace %q does not exist", name, namespace)) + } + return retry.SevereError(err) + } + + if len(ingress.Status.LoadBalancer.Ingress) > 0 { + log.Info("Ingress is ready now") + return retry.Ok() + } + + log.Info("Waiting for Ingress to be ready") + return retry.MinorError(fmt.Errorf("ingress %q in namespace %q is not healthy", name, namespace)) + }) +} + +// WaitUntilDeploymentIsReady waits until the given deployment is ready +func (f *CommonFramework) WaitUntilDeploymentIsReady(ctx context.Context, name string, namespace string, k8sClient kubernetes.Interface) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + log := f.Logger.WithValues("deployment", client.ObjectKeyFromObject(deployment)) + + if err := k8sClient.Client().Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, deployment); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Waiting for Deployment to be ready") + return retry.MinorError(fmt.Errorf("deployment %q in namespace %q does not exist", name, namespace)) + } + return retry.SevereError(err) + } + + if err := health.CheckDeployment(deployment); err != nil { + log.Info("Waiting for Deployment to be ready") + return retry.MinorError(fmt.Errorf("deployment %q in namespace %q is not healthy", name, namespace)) + } + + log.Info("Deployment is ready now") + return retry.Ok() + }) +} + +// WaitUntilDeploymentsWithLabelsIsReady wait until pod with labels is running +func (f *CommonFramework) WaitUntilDeploymentsWithLabelsIsReady(ctx context.Context, deploymentLabels labels.Selector, namespace string, k8sClient kubernetes.Interface) error { + log := f.Logger.WithValues("labelSelector", client.MatchingLabelsSelector{Selector: deploymentLabels}.String()) + + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + deployments := &appsv1.DeploymentList{} + if err := k8sClient.Client().List(ctx, deployments, client.MatchingLabelsSelector{Selector: deploymentLabels}, client.InNamespace(namespace)); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Waiting for deployments to be ready") + return retry.MinorError(fmt.Errorf("no deployments with labels '%s' exist", deploymentLabels.String())) + } + return retry.SevereError(err) + } + + for _, deployment := range deployments.Items { + err = health.CheckDeployment(&deployment) + if err != nil { + log.Info("Waiting for deployments to be ready") + return retry.MinorError(fmt.Errorf("deployment %q is not healthy: %v", deployment.Name, err)) + } + } + + log.Info("Deployments are ready now") + return retry.Ok() + }) +} + +// WaitUntilNamespaceIsDeleted waits until a namespace has been deleted +func (f *CommonFramework) WaitUntilNamespaceIsDeleted(ctx context.Context, k8sClient kubernetes.Interface, ns string) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (bool, error) { + if err := k8sClient.Client().Get(ctx, client.ObjectKey{Name: ns}, &corev1.Namespace{}); err != nil { + if apierrors.IsNotFound(err) { + return retry.Ok() + } + return retry.MinorError(err) + } + return retry.MinorError(fmt.Errorf("namespace %q is not deleted yet", ns)) + }) +} + +// WaitForNNodesToBeHealthy waits for exactly Nodes to be healthy within a given timeout +func WaitForNNodesToBeHealthy(ctx context.Context, k8sClient kubernetes.Interface, n int, timeout time.Duration) error { + return WaitForNNodesToBeHealthyInWorkerPool(ctx, k8sClient, n, nil, timeout) +} + +// WaitForNNodesToBeHealthyInWorkerPool waits for exactly Nodes in a given worker pool to be healthy within a given timeout +func WaitForNNodesToBeHealthyInWorkerPool(ctx context.Context, k8sClient kubernetes.Interface, n int, workerGroup *string, timeout time.Duration) error { + return retry.UntilTimeout(ctx, defaultPollInterval, timeout, func(ctx context.Context) (done bool, err error) { + nodeList, err := GetAllNodesInWorkerPool(ctx, k8sClient, workerGroup) + if err != nil { + return retry.SevereError(err) + } + + nodeCount := len(nodeList.Items) + if nodeCount != n { + return retry.MinorError(fmt.Errorf("waiting for %d nodes to be ready: only %d nodes registered in the cluster", n, nodeCount)) + } + + for _, node := range nodeList.Items { + if err := health.CheckNode(&node); err != nil { + return retry.MinorError(fmt.Errorf("waiting for %d nodes to be ready: node %q is not healthy: %v", n, node.Name, err)) + } + } + + return retry.Ok() + }) +} + +// GetAllNodes fetches all nodes +func GetAllNodes(ctx context.Context, c kubernetes.Interface) (*corev1.NodeList, error) { + return GetAllNodesInWorkerPool(ctx, c, nil) +} + +// GetAllNodesInWorkerPool fetches all nodes of a specific worker group +func GetAllNodesInWorkerPool(ctx context.Context, c kubernetes.Interface, workerGroup *string) (*corev1.NodeList, error) { + nodeList := &corev1.NodeList{} + + selectorOption := &client.MatchingLabelsSelector{} + if workerGroup != nil && len(*workerGroup) > 0 { + selectorOption.Selector = labels.SelectorFromSet(labels.Set{"worker.gardener.cloud/pool": *workerGroup}) + } + + err := c.Client().List(ctx, nodeList, selectorOption) + return nodeList, err +} + +// GetPodsByLabels fetches all pods with the desired set of labels +func GetPodsByLabels(ctx context.Context, labelsSelector labels.Selector, c kubernetes.Interface, namespace string) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := c.Client().List(ctx, podList, + client.InNamespace(namespace), + client.MatchingLabelsSelector{Selector: labelsSelector}) + if err != nil { + return nil, err + } + return podList, nil +} + +// GetFirstRunningPodWithLabels fetches the first running pod with the desired set of labels +func GetFirstRunningPodWithLabels(ctx context.Context, labelsMap labels.Selector, namespace string, client kubernetes.Interface) (*corev1.Pod, error) { + var ( + podList *corev1.PodList + err error + ) + podList, err = GetPodsByLabels(ctx, labelsMap, client, namespace) + if err != nil { + return nil, err + } + if len(podList.Items) == 0 { + return nil, ErrNoRunningPodsFound + } + + for _, pod := range podList.Items { + if health.IsPodReady(&pod) { + return &pod, nil + } + } + + return nil, ErrNoRunningPodsFound +} + +// PodExecByLabel executes a command inside pods filtered by label +func PodExecByLabel(ctx context.Context, podLabels labels.Selector, podContainer, command, namespace string, client kubernetes.Interface) (io.Reader, error) { + pod, err := GetFirstRunningPodWithLabels(ctx, podLabels, namespace, client) + if err != nil { + return nil, err + } + + return NewPodExecutor(client).Execute(ctx, pod.Namespace, pod.Name, podContainer, command) +} + +// DeleteAndWaitForResource deletes a kubernetes resource and waits for its deletion +func DeleteAndWaitForResource(ctx context.Context, k8sClient kubernetes.Interface, resource client.Object, timeout time.Duration) error { + if err := kubernetesutils.DeleteObject(ctx, k8sClient.Client(), resource); err != nil { + return err + } + return retry.UntilTimeout(ctx, 5*time.Second, timeout, func(ctx context.Context) (done bool, err error) { + newResource := resource.DeepCopyObject().(client.Object) + if err := k8sClient.Client().Get(ctx, client.ObjectKeyFromObject(resource), newResource); err != nil { + if apierrors.IsNotFound(err) { + return retry.Ok() + } + return retry.MinorError(err) + } + return retry.MinorError(errors.New("Object still exists")) + }) +} + +// ScaleDeployment scales a deployment and waits until it is scaled +func ScaleDeployment(ctx context.Context, client client.Client, desiredReplicas *int32, name, namespace string) (*int32, error) { + if desiredReplicas == nil { + return nil, nil + } + + replicas, err := GetDeploymentReplicas(ctx, client, namespace, name) + if apierrors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to retrieve the replica count of deployment %q: '%w'", name, err) + } + if replicas == nil || *replicas == *desiredReplicas { + return replicas, nil + } + + // scale the deployment + if err := kubernetes.ScaleDeployment(ctx, client, kubernetesutils.Key(namespace, name), *desiredReplicas); err != nil { + return nil, fmt.Errorf("failed to scale the replica count of deployment %q: '%w'", name, err) + } + + // wait until scaled + if err := WaitUntilDeploymentScaled(ctx, client, namespace, name, *desiredReplicas); err != nil { + return nil, fmt.Errorf("failed to wait until deployment %q is scaled: '%w'", name, err) + } + return replicas, nil +} + +// WaitUntilDeploymentScaled waits until the deployment has the desired replica count in the status +func WaitUntilDeploymentScaled(ctx context.Context, client client.Client, namespace, name string, desiredReplicas int32) error { + return retry.Until(ctx, 5*time.Second, func(ctx context.Context) (done bool, err error) { + deployment := &appsv1.Deployment{} + if err := client.Get(ctx, kubernetesutils.Key(namespace, name), deployment); err != nil { + return retry.SevereError(err) + } + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != desiredReplicas { + return retry.SevereError(fmt.Errorf("waiting for deployment scale failed. spec.replicas does not match the desired replicas")) + } + + if deployment.Status.Replicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas { + return retry.Ok() + } + + return retry.MinorError(fmt.Errorf("deployment currently has '%d' replicas. Desired: %d", deployment.Status.AvailableReplicas, desiredReplicas)) + }) +} + +// GetDeploymentReplicas gets the spec.Replicas count from a deployment +func GetDeploymentReplicas(ctx context.Context, client client.Client, namespace, name string) (*int32, error) { + deployment := &appsv1.Deployment{} + if err := client.Get(ctx, kubernetesutils.Key(namespace, name), deployment); err != nil { + return nil, err + } + replicas := deployment.Spec.Replicas + return replicas, nil +} + +// ShootReconciliationSuccessful checks if a shoot is successfully reconciled. In case it is not, it also returns a descriptive message stating the reason. +func ShootReconciliationSuccessful(shoot *gardencorev1beta1.Shoot) (bool, string) { + if shoot.Generation != shoot.Status.ObservedGeneration { + return false, "shoot generation did not equal observed generation" + } + if len(shoot.Status.Conditions) == 0 && shoot.Status.LastOperation == nil { + return false, "no conditions and last operation present yet" + } + + shootConditions := sets.New( + gardencorev1beta1.ShootAPIServerAvailable, + gardencorev1beta1.ShootControlPlaneHealthy, + gardencorev1beta1.ShootObservabilityComponentsHealthy, + gardencorev1beta1.ShootSystemComponentsHealthy, + ) + + if !v1beta1helper.IsWorkerless(shoot) { + shootConditions.Insert( + gardencorev1beta1.ShootEveryNodeReady, + ) + } + + for _, condition := range shoot.Status.Conditions { + if condition.Status != gardencorev1beta1.ConditionTrue { + // Only return false if the status of a shoot condition is not True during hibernation. If the shoot also acts as a seed and + // the `gardenlet` that operates the seed has already been shut down as part of the hibernation, the seed conditions will never + // be updated to True if they were previously not True. + hibernation := shoot.Spec.Hibernation + if !shootConditions.Has(condition.Type) && hibernation != nil && pointer.BoolDeref(hibernation.Enabled, false) { + continue + } + return false, fmt.Sprintf("condition type %s is not true yet, had message %s with reason %s", condition.Type, condition.Message, condition.Reason) + } + } + + if shoot.Status.LastOperation != nil { + if shoot.Status.LastOperation.Type == gardencorev1beta1.LastOperationTypeCreate || + shoot.Status.LastOperation.Type == gardencorev1beta1.LastOperationTypeReconcile || + shoot.Status.LastOperation.Type == gardencorev1beta1.LastOperationTypeRestore { + if shoot.Status.LastOperation.State != gardencorev1beta1.LastOperationStateSucceeded { + return false, "last operation type was create, reconcile or restore but state was not succeeded" + } + } else if shoot.Status.LastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate { + return false, "last operation type was migrate, the migration process is not finished yet" + } + } + + return true, "" +} + +// DownloadKubeconfig retrieves the static token kubeconfig for the given shoot and writes the kubeconfig to the +// given download path. +func DownloadKubeconfig(ctx context.Context, client kubernetes.Interface, namespace, name, downloadPath string) error { + kubeconfig, err := GetObjectFromSecret(ctx, client, namespace, name, KubeconfigSecretKeyName) + if err != nil { + return err + } + err = os.WriteFile(downloadPath, []byte(kubeconfig), 0755) + if err != nil { + return err + } + + return nil +} + +// DownloadAdminKubeconfigForShoot requests an admin kubeconfig for the given shoot and writes the kubeconfig to the +// given download path. The kubeconfig expires in 6 hours. +func DownloadAdminKubeconfigForShoot(ctx context.Context, client kubernetes.Interface, shoot *gardencorev1beta1.Shoot, downloadPath string) error { + const expirationSeconds = 6 * 3600 // 6h + kubeconfig, err := access.RequestAdminKubeconfigForShoot(ctx, client, shoot, pointer.Int64(expirationSeconds)) + if err != nil { + return err + } + + err = os.WriteFile(downloadPath, kubeconfig, 0755) + if err != nil { + return err + } + + return nil +} + +// PatchSecret patches the Secret. +func PatchSecret(ctx context.Context, c client.Client, secret *corev1.Secret) error { + existingSecret := &corev1.Secret{} + if err := c.Get(ctx, client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}, existingSecret); err != nil { + return err + } + patch := client.MergeFrom(existingSecret.DeepCopy()) + + existingSecret.Data = secret.Data + return c.Patch(ctx, existingSecret, patch) +} + +// GetObjectFromSecret returns object from secret +func GetObjectFromSecret(ctx context.Context, k8sClient kubernetes.Interface, namespace, secretName, objectKey string) (string, error) { + secret := &corev1.Secret{} + err := k8sClient.Client().Get(ctx, client.ObjectKey{Namespace: namespace, Name: secretName}, secret) + if err != nil { + return "", err + } + + if _, ok := secret.Data[objectKey]; ok { + return string(secret.Data[objectKey]), nil + } + return "", fmt.Errorf("secret %s/%s did not contain object key %q", namespace, secretName, objectKey) +} + +// CreateTokenForServiceAccount requests a service account token. +func CreateTokenForServiceAccount(ctx context.Context, k8sClient kubernetes.Interface, serviceAccount *corev1.ServiceAccount, expirationSeconds *int64) (string, error) { + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: expirationSeconds, + }, + } + + if err := k8sClient.Client().SubResource("token").Create(ctx, serviceAccount, tokenRequest); err != nil { + return "", err + } + + return tokenRequest.Status.Token, nil +} + +// NewClientFromServiceAccount returns a kubernetes client for a service account. +func NewClientFromServiceAccount(ctx context.Context, k8sClient kubernetes.Interface, serviceAccount *corev1.ServiceAccount) (kubernetes.Interface, error) { + token, err := CreateTokenForServiceAccount(ctx, k8sClient, serviceAccount, pointer.Int64(3600)) + if err != nil { + return nil, err + } + + restConfig := &rest.Config{ + Host: k8sClient.RESTConfig().Host, + TLSClientConfig: rest.TLSClientConfig{ + Insecure: false, + CAData: k8sClient.RESTConfig().CAData, + }, + BearerToken: token, + } + + return kubernetes.NewWithConfig( + kubernetes.WithRESTConfig(restConfig), + kubernetes.WithClientOptions(client.Options{Scheme: kubernetes.GardenScheme}), + kubernetes.WithDisabledCachedClient(), + ) +} + +// WaitUntilPodIsRunning waits until the pod with is running +func WaitUntilPodIsRunning(ctx context.Context, log logr.Logger, name, namespace string, c kubernetes.Interface) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + podLog := log.WithValues("pod", client.ObjectKeyFromObject(pod)) + + if err := c.Client().Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, pod); err != nil { + return retry.SevereError(err) + } + + if !health.IsPodReady(pod) { + podLog.Info("Waiting for Pod to be ready") + return retry.MinorError(fmt.Errorf(`pod "%s/%s" is not ready: %v`, namespace, name, err)) + } + + podLog.Info("Pod is ready now") + return retry.Ok() + }) +} + +// WaitUntilPodIsRunningWithLabels waits until the pod with is running +func (f *CommonFramework) WaitUntilPodIsRunningWithLabels(ctx context.Context, labels labels.Selector, podNamespace string, c kubernetes.Interface) error { + return retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (done bool, err error) { + pod, err := GetFirstRunningPodWithLabels(ctx, labels, podNamespace, c) + if err != nil { + return retry.SevereError(err) + } + + log := f.Logger.WithValues("pod", client.ObjectKeyFromObject(pod)) + + if !health.IsPodReady(pod) { + log.Info("Waiting for Pod to be ready") + return retry.MinorError(fmt.Errorf(`pod "%s/%s" is not ready: %v`, pod.GetNamespace(), pod.GetName(), err)) + } + + log.Info("Pod is ready now") + return retry.Ok() + }) +} + +// DeployRootPod deploys a pod with root permissions for testing purposes. +func DeployRootPod(ctx context.Context, c client.Client, namespace string, nodename *string) (*corev1.Pod, error) { + podPriority := int32(0) + allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyz" + id, err := utils.GenerateRandomStringFromCharset(3, allowedCharacters) + if err != nil { + return nil, err + } + + rootPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rootpod-%s", id), + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "root-container", + Image: "eu.gcr.io/gardener-project/3rd/busybox:1.29.3", + Command: []string{ + "sleep", + "10000000", + }, + Resources: corev1.ResourceRequirements{}, + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: &corev1.SecurityContext{ + Privileged: pointer.Bool(true), + }, + Stdin: true, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "root-volume", + MountPath: "/hostroot", + }, + }, + }, + }, + HostNetwork: true, + HostPID: true, + Priority: &podPriority, + Volumes: []corev1.Volume{ + { + Name: "root-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/", + }, + }, + }, + }, + }, + } + + if nodename != nil { + rootPod.Spec.NodeName = *nodename + } + + if err := c.Create(ctx, &rootPod); err != nil { + return nil, err + } + return &rootPod, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/pod_executor.go b/vendor/github.com/gardener/gardener/test/framework/pod_executor.go new file mode 100644 index 00000000..cd45776e --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/pod_executor.go @@ -0,0 +1,78 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "strings" + + "k8s.io/client-go/tools/remotecommand" + + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +// NewPodExecutor returns a podExecutor +func NewPodExecutor(client kubernetes.Interface) PodExecutor { + return &podExecutor{ + client: client, + } +} + +// PodExecutor is the pod executor interface +type PodExecutor interface { + Execute(ctx context.Context, namespace, name, containerName, command string) (io.Reader, error) +} + +type podExecutor struct { + client kubernetes.Interface +} + +// Execute executes a command on a pod +func (p *podExecutor) Execute(ctx context.Context, namespace, name, containerName, command string) (io.Reader, error) { + var stdout, stderr bytes.Buffer + request := p.client.Kubernetes().CoreV1().RESTClient(). + Post(). + Resource("pods"). + Name(name). + Namespace(namespace). + SubResource("exec"). + Param("container", containerName). + Param("command", "/bin/sh"). + Param("stdin", "true"). + Param("stdout", "true"). + Param("stderr", "true"). + Param("tty", "false") + + executor, err := remotecommand.NewSPDYExecutor(p.client.RESTConfig(), http.MethodPost, request.URL()) + if err != nil { + return nil, fmt.Errorf("failed to initialized the command exector: %v", err) + } + + err = executor.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: strings.NewReader(command), + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + }) + if err != nil { + return &stderr, err + } + + return &stdout, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/rootpod_executor.go b/vendor/github.com/gardener/gardener/test/framework/rootpod_executor.go new file mode 100644 index 00000000..9128c3f5 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/rootpod_executor.go @@ -0,0 +1,125 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +// RootPodExecutor enables the execution of command on the operating system of a node. +// The executor deploys a pod with root privileged on a specified node. +// This pod is then used to execute commands on the host operating system. +type RootPodExecutor interface { + Execute(ctx context.Context, command string) ([]byte, error) + Clean(ctx context.Context) error +} + +// rootPodExecutor is the RootPodExecutor implementation +type rootPodExecutor struct { + log logr.Logger + client kubernetes.Interface + executor PodExecutor + + nodeName *string + namespace string + + Pod *corev1.Pod +} + +// NewRootPodExecutor creates a new root pod executor to run commands on a node. +func NewRootPodExecutor(log logr.Logger, c kubernetes.Interface, nodeName *string, namespace string) RootPodExecutor { + executor := NewPodExecutor(c) + return &rootPodExecutor{ + log: log, + client: c, + executor: executor, + nodeName: nodeName, + namespace: namespace, + } +} + +// Clean delete the deployed pod +func (e *rootPodExecutor) Clean(ctx context.Context) error { + if e.Pod == nil { + return nil + } + + return DeleteAndWaitForResource(ctx, e.client, e.Pod, 2*time.Minute) +} + +// Execute executes a command on the node the root pod is running +func (e *rootPodExecutor) Execute(ctx context.Context, command string) ([]byte, error) { + isRunning, err := e.checkPodRunning(ctx) + if err != nil { + return nil, err + } + if !isRunning { + if err := e.deploy(ctx); err != nil { + return nil, err + } + } + + command = fmt.Sprintf("chroot /hostroot %s", command) + reader, err := e.executor.Execute(ctx, e.Pod.Namespace, e.Pod.Name, e.Pod.Spec.Containers[0].Name, command) + if err != nil { + return nil, err + } + response, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + return response, nil +} + +// deploy deploys a root pod on the specified node and waits until it is running +func (e *rootPodExecutor) deploy(ctx context.Context) error { + rootPod, err := DeployRootPod(ctx, e.client.Client(), e.namespace, e.nodeName) + if err != nil { + return err + } + if err := WaitUntilPodIsRunning(ctx, e.log, rootPod.Name, rootPod.Namespace, e.client); err != nil { + return err + } + + e.Pod = rootPod + return nil +} + +// checkPodRunning checks if the root pod is still running. +func (e *rootPodExecutor) checkPodRunning(ctx context.Context) (bool, error) { + if e.Pod == nil { + return false, nil + } + + pod := e.Pod.DeepCopy() + if err := e.client.Client().Get(ctx, client.ObjectKeyFromObject(e.Pod), pod); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + return true, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/shoot_utils.go b/vendor/github.com/gardener/gardener/test/framework/shoot_utils.go new file mode 100644 index 00000000..72c83e7f --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/shoot_utils.go @@ -0,0 +1,393 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils" + "github.com/gardener/gardener/pkg/utils/retry" + versionutils "github.com/gardener/gardener/pkg/utils/version" +) + +// ShootSeedNamespace gets the shoot namespace in the seed +func (f *ShootFramework) ShootSeedNamespace() string { + return ComputeTechnicalID(f.Project.Name, f.Shoot) +} + +// ShootKubeconfigSecretName gets the name of the secret with the kubeconfig of the shoot +func (f *ShootFramework) ShootKubeconfigSecretName() string { + return fmt.Sprintf("%s.kubeconfig", f.Shoot.GetName()) +} + +// GetValiLogs gets logs from the last 1 hour for , from the vali instance in +func (f *ShootFramework) GetValiLogs(ctx context.Context, valiLabels map[string]string, tenant, valiNamespace, key, value string, client kubernetes.Interface) (*SearchResponse, error) { + valiLabelsSelector := labels.SelectorFromSet(labels.Set(valiLabels)) + + if tenant == "" { + tenant = "fake" + } + + query := fmt.Sprintf("query=count_over_time({%s=~\"%s\"}[1h])", key, value) + + command := fmt.Sprintf("wget 'http://localhost:%d/vali/api/v1/query' -O- '--header=X-Scope-OrgID: %s' --post-data='%s'", valiPort, tenant, query) + + var reader io.Reader + err := retry.Until(ctx, defaultPollInterval, func(ctx context.Context) (bool, error) { + var err error + reader, err = PodExecByLabel(ctx, valiLabelsSelector, valiLogging, command, valiNamespace, client) + + if err != nil { + f.Logger.Error(err, "Error exec'ing into pod") + return retry.MinorError(err) + } + return retry.Ok() + }) + if err != nil { + return nil, err + } + + search := &SearchResponse{} + + if err = json.NewDecoder(reader).Decode(search); err != nil { + return nil, err + } + + return search, nil +} + +// DumpState dumps the state of a shoot +// The state includes all k8s components running in the shoot itself as well as the controlplane +func (f *ShootFramework) DumpState(ctx context.Context) { + if f.DisableStateDump { + return + } + + if f.Shoot != nil { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(f.Shoot)) + if err := PrettyPrintObject(f.Shoot); err != nil { + f.Logger.Error(err, "Cannot decode shoot") + } + + isRunning, err := f.IsAPIServerRunning(ctx) + if f.ShootClient != nil && isRunning && err == nil { + if err := f.DumpDefaultResourcesInAllNamespaces(ctx, f.ShootClient); err != nil { + f.Logger.Error(err, "Unable to dump resources from all namespaces in shoot") + } + if err := f.dumpNodes(ctx, log, f.ShootClient); err != nil { + f.Logger.Error(err, "Unable to dump information of nodes from shoot") + } + } else { + errMsg := "" + if err != nil { + errMsg = ": " + err.Error() + } + f.Logger.Error(err, "Unable to dump resources from shoot because API server is currently not running", "reason", errMsg) + } + } + + // dump controlplane in the shoot namespace + if f.Seed != nil && f.SeedClient != nil { + if err := f.dumpControlplaneInSeed(ctx, f.Seed, f.ShootSeedNamespace()); err != nil { + f.Logger.Error(err, "Unable to dump controlplane in seed", "namespace", f.ShootSeedNamespace()) + } + } + + if f.Shoot != nil { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(f.Shoot)) + + project, err := f.GetShootProject(ctx, f.Shoot.GetNamespace()) + if err != nil { + log.Error(err, "Unable to get project namespace of shoot") + return + } + + // dump seed status if seed is available + if f.Shoot.Spec.SeedName != nil { + seed := &gardencorev1beta1.Seed{} + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: *f.Shoot.Spec.SeedName}, seed); err != nil { + log.Error(err, "Unable to get seed", "seedName", *f.Shoot.Spec.SeedName) + return + } + f.dumpSeed(seed) + } + + err = f.dumpEventsInNamespace(ctx, log, f.GardenClient, *project.Spec.Namespace, func(event corev1.Event) bool { + return event.InvolvedObject.Name == f.Shoot.Name + }) + if err != nil { + log.Error(err, "Unable to dump Events from project namespace in gardener", "namespace", *project.Spec.Namespace) + } + } +} + +// CreateShootTestArtifacts creates a shoot object from the given path and sets common attributes (test-individual settings like workers have to be handled by each test). +func CreateShootTestArtifacts(cfg *ShootCreationConfig, projectNamespace string, clearDNS bool, clearExtensions bool) (string, *gardencorev1beta1.Shoot, error) { + shoot := &gardencorev1beta1.Shoot{} + if cfg.shootYamlPath != "" { + if err := ReadObject(cfg.shootYamlPath, shoot); err != nil { + return "", nil, err + } + } + + if err := setShootMetadata(shoot, cfg, projectNamespace); err != nil { + return "", nil, err + } + + setShootGeneralSettings(shoot, cfg, clearExtensions) + + setShootNetworkingSettings(shoot, cfg, clearDNS) + + setShootTolerations(shoot) + + setShootControlPlaneHighAvailability(shoot, cfg) + + return shoot.Name, shoot, nil +} + +func parseAnnotationCfg(cfg string) (map[string]string, error) { + if !StringSet(cfg) { + return nil, nil + } + result := make(map[string]string) + annotations := strings.Split(cfg, ",") + for _, annotation := range annotations { + annotation = strings.TrimSpace(annotation) + if !StringSet(annotation) { + continue + } + keyValue := strings.Split(annotation, "=") + if len(keyValue) != 2 { + return nil, fmt.Errorf("annotation %s could not be parsed into key and value", annotation) + } + result[keyValue[0]] = keyValue[1] + } + + return result, nil +} + +// setShootMetadata sets the Shoot's metadata from the given config and project namespace +func setShootMetadata(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig, projectNamespace string) error { + if StringSet(cfg.testShootName) { + shoot.Name = cfg.testShootName + } else { + integrationTestName, err := generateRandomShootName(cfg.testShootPrefix, 8) + if err != nil { + return err + } + shoot.Name = integrationTestName + } + + if StringSet(projectNamespace) { + shoot.Namespace = projectNamespace + } + + if err := setConfiguredShootAnnotations(shoot, cfg); err != nil { + return err + } + + metav1.SetMetaDataAnnotation(&shoot.ObjectMeta, v1beta1constants.AnnotationShootIgnoreAlerts, "true") + + return nil +} + +// setConfiguredShootAnnotations sets annotations from the given config on the given shoot +func setConfiguredShootAnnotations(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig) error { + annotations, err := parseAnnotationCfg(cfg.shootAnnotations) + if err != nil { + return err + } + for k, v := range annotations { + metav1.SetMetaDataAnnotation(&shoot.ObjectMeta, k, v) + } + return nil +} + +// setShootGeneralSettings sets the Shoot's general settings from the given config +func setShootGeneralSettings(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig, clearExtensions bool) { + if StringSet(cfg.shootRegion) { + shoot.Spec.Region = cfg.shootRegion + } + + if StringSet(cfg.cloudProfile) { + shoot.Spec.CloudProfileName = cfg.cloudProfile + } + + if StringSet(cfg.secretBinding) { + shoot.Spec.SecretBindingName = pointer.String(cfg.secretBinding) + } + + if StringSet(cfg.shootProviderType) { + shoot.Spec.Provider.Type = cfg.shootProviderType + } + + if StringSet(cfg.shootK8sVersion) { + shoot.Spec.Kubernetes.Version = cfg.shootK8sVersion + } + + if StringSet(cfg.seedName) { + shoot.Spec.SeedName = &cfg.seedName + } + + if cfg.startHibernated { + if shoot.Spec.Hibernation == nil { + shoot.Spec.Hibernation = &gardencorev1beta1.Hibernation{} + } + shoot.Spec.Hibernation.Enabled = &cfg.startHibernated + } + + // Errors are ignored here because we cannot do anything meaningful with them - variables will default to `false`. + k8sLessEqual125, _ := versionutils.CheckVersionMeetsConstraint(shoot.Spec.Kubernetes.Version, "< 1.25") + // This field should not be set for kubernetes version >= 1.25 + if k8sLessEqual125 { + // allow privileged containers defaults to true + if cfg.allowPrivilegedContainers != nil { + shoot.Spec.Kubernetes.AllowPrivilegedContainers = cfg.allowPrivilegedContainers + } + } + + if clearExtensions { + shoot.Spec.Extensions = nil + } +} + +// setShootNetworkingSettings sets the Shoot's networking settings from the given config +func setShootNetworkingSettings(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig, clearDNS bool) { + if StringSet(cfg.externalDomain) { + shoot.Spec.DNS = &gardencorev1beta1.DNS{Domain: &cfg.externalDomain} + clearDNS = false + } + + if StringSet(cfg.networkingType) { + shoot.Spec.Networking.Type = pointer.String(cfg.networkingType) + } + + if StringSet(cfg.networkingPods) { + shoot.Spec.Networking.Pods = &cfg.networkingPods + } + + if StringSet(cfg.networkingServices) { + shoot.Spec.Networking.Services = &cfg.networkingServices + } + + if StringSet(cfg.networkingNodes) { + shoot.Spec.Networking.Nodes = &cfg.networkingNodes + } + + if clearDNS { + shoot.Spec.DNS = &gardencorev1beta1.DNS{} + } +} + +// setShootTolerations sets the Shoot's tolerations +func setShootTolerations(shoot *gardencorev1beta1.Shoot) { + shoot.Spec.Tolerations = []gardencorev1beta1.Toleration{ + { + Key: SeedTaintTestRun, + Value: pointer.String(GetTestRunID()), + }, + } +} + +// SetProviderConfigsFromFilepath parses the infrastructure, controlPlane and networking provider-configs and sets them on the shoot +func SetProviderConfigsFromFilepath(shoot *gardencorev1beta1.Shoot, infrastructureConfigPath, controlPlaneConfigPath, networkingConfigPath string) error { + // clear provider configs first + shoot.Spec.Provider.InfrastructureConfig = nil + shoot.Spec.Provider.ControlPlaneConfig = nil + shoot.Spec.Networking.ProviderConfig = nil + + if StringSet(infrastructureConfigPath) { + infrastructureProviderConfig, err := ParseFileAsProviderConfig(infrastructureConfigPath) + if err != nil { + return err + } + shoot.Spec.Provider.InfrastructureConfig = infrastructureProviderConfig + } + + if StringSet(controlPlaneConfigPath) { + controlPlaneProviderConfig, err := ParseFileAsProviderConfig(controlPlaneConfigPath) + if err != nil { + return err + } + shoot.Spec.Provider.ControlPlaneConfig = controlPlaneProviderConfig + } + + if StringSet(networkingConfigPath) { + networkingProviderConfig, err := ParseFileAsProviderConfig(networkingConfigPath) + if err != nil { + return err + } + shoot.Spec.Networking.ProviderConfig = networkingProviderConfig + } + + return nil +} + +func generateRandomShootName(prefix string, length int) (string, error) { + randomString, err := utils.GenerateRandomString(length) + if err != nil { + return "", err + } + + if len(prefix) > 0 { + return prefix + strings.ToLower(randomString), nil + } + + return IntegrationTestPrefix + strings.ToLower(randomString), nil +} + +// PrettyPrintObject prints a object as pretty printed yaml to stdout +func PrettyPrintObject(obj runtime.Object) error { + d, err := yaml.Marshal(obj) + if err != nil { + return err + } + fmt.Print(string(d)) + return nil +} + +func setShootControlPlaneHighAvailability(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig) { + if StringSet(cfg.controlPlaneFailureTolerance) { + if shoot.Spec.ControlPlane == nil { + shoot.Spec.ControlPlane = &gardencorev1beta1.ControlPlane{ + HighAvailability: &gardencorev1beta1.HighAvailability{ + FailureTolerance: gardencorev1beta1.FailureTolerance{}, + }, + } + } + + if shoot.Spec.ControlPlane.HighAvailability == nil { + shoot.Spec.ControlPlane.HighAvailability = &gardencorev1beta1.HighAvailability{ + FailureTolerance: gardencorev1beta1.FailureTolerance{}, + } + } + shoot.Spec.ControlPlane.HighAvailability.FailureTolerance.Type = gardencorev1beta1.FailureToleranceType(cfg.controlPlaneFailureTolerance) + } +} diff --git a/vendor/github.com/gardener/gardener/test/framework/shootcreationframework.go b/vendor/github.com/gardener/gardener/test/framework/shootcreationframework.go new file mode 100644 index 00000000..4cfade02 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/shootcreationframework.go @@ -0,0 +1,510 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "flag" + "fmt" + "path/filepath" + "strconv" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" +) + +var shootCreationCfg *ShootCreationConfig + +// ShootCreationConfig is the configuration for a shoot creation framework +type ShootCreationConfig struct { + GardenerConfig *GardenerConfig + + shootKubeconfigPath string + seedKubeconfigPath string + testShootName string + testShootPrefix string + shootMachineImageName string + shootMachineType string + shootMachineImageVersion string + cloudProfile string + seedName string + shootRegion string + secretBinding string + shootProviderType string + shootK8sVersion string + externalDomain string + workerZone string + networkingType string + networkingPods string + networkingServices string + networkingNodes string + startHibernatedFlag string + startHibernated bool + allowPrivilegedContainersFlag string + allowPrivilegedContainers *bool + infrastructureProviderConfig string + controlPlaneProviderConfig string + networkingProviderConfig string + workersConfig string + shootYamlPath string + shootAnnotations string + controlPlaneFailureTolerance string +} + +// ShootCreationFramework represents the shoot test framework that includes +// test functions that can be executed ona specific shoot +type ShootCreationFramework struct { + *GardenerFramework + TestDescription + Config *ShootCreationConfig + + Shoot *gardencorev1beta1.Shoot + + // ShootFramework is initialized once the shoot has been created successfully + ShootFramework *ShootFramework +} + +// NewShootCreationFramework creates a new simple Shoot creation framework +func NewShootCreationFramework(cfg *ShootCreationConfig) *ShootCreationFramework { + var gardenerConfig *GardenerConfig + if cfg != nil { + gardenerConfig = cfg.GardenerConfig + } + + f := &ShootCreationFramework{ + GardenerFramework: NewGardenerFramework(gardenerConfig), + TestDescription: NewTestDescription("SHOOTCREATION"), + Config: cfg, + } + + ginkgo.BeforeEach(func() { + f.GardenerFramework.BeforeEach() + f.BeforeEach() + }) + CAfterEach(f.AfterEach, 10*time.Minute) + return f +} + +// BeforeEach should be called in ginkgo's BeforeEach. +// It sets up the shoot creation framework. +func (f *ShootCreationFramework) BeforeEach() { + if f.Shoot == nil { + f.Config = mergeShootCreationConfig(f.Config, shootCreationCfg) + validateShootCreationConfig(f.Config) + } +} + +// AfterEach should be called in ginkgo's AfterEach. +// Cleans up resources and dumps the shoot state if the test failed +func (f *ShootCreationFramework) AfterEach(ctx context.Context) { + if ginkgo.CurrentSpecReport().Failed() { + f.DumpState(ctx) + } +} + +func validateShootCreationConfig(cfg *ShootCreationConfig) { + if cfg == nil { + ginkgo.Fail("no shoot creation framework configuration provided") + } + + if StringSet(cfg.shootAnnotations) { + _, err := parseAnnotationCfg(cfg.shootAnnotations) + if err != nil { + ginkgo.Fail(fmt.Sprintf("annotations could not be parsed: %+v", err)) + } + } + + if !StringSet(cfg.shootProviderType) { + ginkgo.Fail("you need to specify provider type of the shoot") + } + + if StringSet(cfg.shootMachineImageName) && !StringSet(cfg.shootMachineImageVersion) { + ginkgo.Fail("shootMachineImageVersion has to be defined if shootMachineImageName is set") + } + + if StringSet(cfg.shootMachineImageVersion) && !StringSet(cfg.shootMachineImageName) { + ginkgo.Fail("shootMachineImageName has to be defined if shootMachineImageVersion is set") + } + + if StringSet(cfg.startHibernatedFlag) { + parsedBool, err := strconv.ParseBool(cfg.startHibernatedFlag) + if err != nil { + ginkgo.Fail("startHibernated is not a boolean value") + } + cfg.startHibernated = parsedBool + } + + if StringSet(cfg.allowPrivilegedContainersFlag) { + parsedBool, err := strconv.ParseBool(cfg.allowPrivilegedContainersFlag) + if err != nil { + ginkgo.Fail("allowPrivilegedContainers is not a boolean value") + } + cfg.allowPrivilegedContainers = &parsedBool + } + + if StringSet(cfg.infrastructureProviderConfig) { + if !FileExists(cfg.infrastructureProviderConfig) { + ginkgo.Fail(fmt.Sprintf("you need to specify the filepath to the infrastructureProviderConfig for the provider '%s'", cfg.shootProviderType)) + } + } + + if StringSet(cfg.controlPlaneProviderConfig) { + if !FileExists(cfg.controlPlaneProviderConfig) { + ginkgo.Fail(fmt.Sprintf("path to the controlPlaneProviderConfig of the Shoot is invalid: %s", cfg.controlPlaneProviderConfig)) + } + } + + if StringSet(cfg.networkingProviderConfig) { + if !FileExists(cfg.networkingProviderConfig) { + ginkgo.Fail(fmt.Sprintf("path to the networkingProviderConfig of the Shoot is invalid: %s", cfg.networkingProviderConfig)) + } + } + + if StringSet(cfg.workersConfig) { + if !FileExists(cfg.workersConfig) { + ginkgo.Fail(fmt.Sprintf("path to the worker config of the Shoot is invalid: %s", cfg.workersConfig)) + } + } +} + +func mergeShootCreationConfig(base, overwrite *ShootCreationConfig) *ShootCreationConfig { + if base == nil { + return overwrite + } + if overwrite == nil { + return base + } + + if overwrite.GardenerConfig != nil { + base.GardenerConfig = mergeGardenerConfig(base.GardenerConfig, overwrite.GardenerConfig) + } + + if StringSet(overwrite.shootKubeconfigPath) { + base.shootKubeconfigPath = overwrite.shootKubeconfigPath + } + + if StringSet(overwrite.seedKubeconfigPath) { + base.seedKubeconfigPath = overwrite.seedKubeconfigPath + } + + if StringSet(overwrite.testShootName) { + base.testShootName = overwrite.testShootName + } + + if StringSet(overwrite.testShootPrefix) { + base.testShootPrefix = overwrite.testShootPrefix + } + + if StringSet(overwrite.shootAnnotations) { + base.shootAnnotations = overwrite.shootAnnotations + } + + if StringSet(overwrite.shootMachineImageName) { + base.shootMachineImageName = overwrite.shootMachineImageName + } + + if StringSet(overwrite.shootMachineType) { + base.shootMachineType = overwrite.shootMachineType + } + + if StringSet(overwrite.shootMachineImageVersion) { + base.shootMachineImageVersion = overwrite.shootMachineImageVersion + } + + if StringSet(overwrite.cloudProfile) { + base.cloudProfile = overwrite.cloudProfile + } + + if StringSet(overwrite.seedName) { + base.seedName = overwrite.seedName + } + + if StringSet(overwrite.shootRegion) { + base.shootRegion = overwrite.shootRegion + } + + if StringSet(overwrite.secretBinding) { + base.secretBinding = overwrite.secretBinding + } + + if StringSet(overwrite.shootProviderType) { + base.shootProviderType = overwrite.shootProviderType + } + + if StringSet(overwrite.shootK8sVersion) { + base.shootK8sVersion = overwrite.shootK8sVersion + } + + if StringSet(overwrite.externalDomain) { + base.externalDomain = overwrite.externalDomain + } + + if StringSet(overwrite.workerZone) { + base.workerZone = overwrite.workerZone + } + + if StringSet(overwrite.networkingType) { + base.networkingType = overwrite.networkingType + } + + if StringSet(overwrite.networkingPods) { + base.networkingPods = overwrite.networkingPods + } + + if StringSet(overwrite.networkingServices) { + base.networkingServices = overwrite.networkingServices + } + + if StringSet(overwrite.networkingNodes) { + base.networkingNodes = overwrite.networkingNodes + } + + if StringSet(overwrite.startHibernatedFlag) { + base.startHibernatedFlag = overwrite.startHibernatedFlag + } + + if overwrite.startHibernated { + base.startHibernated = overwrite.startHibernated + } + + if StringSet(overwrite.allowPrivilegedContainersFlag) { + base.allowPrivilegedContainersFlag = overwrite.allowPrivilegedContainersFlag + } + + if overwrite.allowPrivilegedContainers != nil { + base.allowPrivilegedContainers = overwrite.allowPrivilegedContainers + } + + if StringSet(overwrite.infrastructureProviderConfig) { + base.infrastructureProviderConfig = overwrite.infrastructureProviderConfig + } + + if StringSet(overwrite.controlPlaneProviderConfig) { + base.controlPlaneProviderConfig = overwrite.controlPlaneProviderConfig + } + + if StringSet(overwrite.networkingProviderConfig) { + base.networkingProviderConfig = overwrite.networkingProviderConfig + } + + if StringSet(overwrite.workersConfig) { + base.workersConfig = overwrite.workersConfig + } + + if StringSet(overwrite.controlPlaneFailureTolerance) { + base.controlPlaneFailureTolerance = overwrite.controlPlaneFailureTolerance + } + + if StringSet(overwrite.shootYamlPath) { + base.shootYamlPath = overwrite.shootYamlPath + } + + return base +} + +// RegisterShootCreationFrameworkFlags adds all flags that are needed to configure a shoot creation framework to the provided flagset. +func RegisterShootCreationFrameworkFlags() *ShootCreationConfig { + _ = RegisterGardenerFrameworkFlags() + + newCfg := &ShootCreationConfig{} + + flag.StringVar(&newCfg.shootKubeconfigPath, "shoot-kubecfg-path", "", "the path to where the Kubeconfig of the Shoot cluster will be downloaded to. The kubeconfig expires in 6 hours.") + flag.StringVar(&newCfg.seedKubeconfigPath, "seed-kubecfg-path", "", "the path to where the Kubeconfig of the Seed cluster will be downloaded to.") + flag.StringVar(&newCfg.testShootName, "shoot-name", "", "unique name to use for test shoots. Used by test-machinery.") + flag.StringVar(&newCfg.testShootPrefix, "prefix", "", "prefix for generated shoot name. Usually used locally to auto generate a unique name.") + flag.StringVar(&newCfg.shootAnnotations, "annotations", "", "annotations to be added to the test shoot. Expected format is key1=val1,key2=val2 (similar to kubectl --selector).") + flag.StringVar(&newCfg.shootMachineImageName, "machine-image-name", "", "the Machine Image Name of the test shoot. Defaults to first machine image in the CloudProfile.") + flag.StringVar(&newCfg.shootMachineType, "machine-type", "", "the Machine type of the first worker of the test shoot. Needs to match the machine types for that Provider available in the CloudProfile.") + flag.StringVar(&newCfg.shootMachineImageVersion, "machine-image-version", "", "the Machine Image version of the first worker of the test shoot. Needs to be set when the MachineImageName is set.") + flag.StringVar(&newCfg.cloudProfile, "cloud-profile", "", "cloudProfile to use for the shoot.") + flag.StringVar(&newCfg.seedName, "seed", "", "Name of the seed to use for the shoot.") + flag.StringVar(&newCfg.shootRegion, "region", "", "region to use for the shoot. Must be compatible with the infrastructureProvider.Zone.") + flag.StringVar(&newCfg.secretBinding, "secret-binding", "", "the secretBinding for the provider account of the shoot.") + flag.StringVar(&newCfg.shootProviderType, "provider-type", "", "the type of the cloud provider where the shoot is deployed to. e.g gcp, aws,azure,alicloud.") + flag.StringVar(&newCfg.shootK8sVersion, "k8s-version", "", "kubernetes version to use for the shoot.") + flag.StringVar(&newCfg.externalDomain, "external-domain", "", "external domain to use for the shoot. If not set, will use the default domain.") + flag.StringVar(&newCfg.workerZone, "worker-zone", "", "zone to use for every worker of the shoot.") + flag.StringVar(&newCfg.networkingType, "networking-type", "calico", "the spec.networking.type to use for this shoot. Optional. Defaults to calico.") + flag.StringVar(&newCfg.networkingPods, "networking-pods", "", "the spec.networking.pods to use for this shoot. Optional.") + flag.StringVar(&newCfg.networkingServices, "networking-services", "", "the spec.networking.services to use for this shoot. Optional.") + flag.StringVar(&newCfg.networkingNodes, "networking-nodes", "", "the spec.networking.nodes to use for this shoot. Optional.") + flag.StringVar(&newCfg.startHibernatedFlag, "start-hibernated", "", "the spec.hibernation.enabled to use for this shoot. Optional.") + flag.StringVar(&newCfg.allowPrivilegedContainersFlag, "allow-privileged-containers", "", "the spec.kubernetes.allowPrivilegedContainers to use for this shoot. Optional, defaults to true.") + flag.StringVar(&newCfg.controlPlaneFailureTolerance, "control-plane-failure-tolerance", "", "the .spec.controlPlane.HighAvailability.FailureTolerance.FailureToleranceType to use for this shoot. Optional, defaults to no failure tolerance") + + if newCfg.networkingType == "" { + newCfg.networkingType = "calico" + } + + newCfg.startHibernated = false + + // ProviderConfigs flags + flag.StringVar(&newCfg.infrastructureProviderConfig, "infrastructure-provider-config-filepath", "", "filepath to the provider specific infrastructure config.") + flag.StringVar(&newCfg.controlPlaneProviderConfig, "controlplane-provider-config-filepath", "", "filepath to the control plane config.") + flag.StringVar(&newCfg.networkingProviderConfig, "networking-provider-config-filepath", "", "filepath to the network provider config.") + flag.StringVar(&newCfg.workersConfig, "workers-config-filepath", "", "filepath to the worker config.") + + // other + flag.StringVar(&newCfg.shootYamlPath, "shoot-template-path", "default-shoot.yaml", "Specify the path to the shoot template that should be used to create the shoot") + + shootCreationCfg = newCfg + return shootCreationCfg +} + +// CreateShootAndWaitForCreation creates a shoot using this framework's configuration and waits for successful creation. +func (f *ShootCreationFramework) CreateShootAndWaitForCreation(ctx context.Context, initializeShootWithFlags bool) error { + if initializeShootWithFlags { + if err := f.InitializeShootWithFlags(ctx); err != nil { + return err + } + } else { + if f.Shoot.Namespace == "" { + f.Shoot.Namespace = f.ProjectNamespace + } + } + + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(f.Shoot)) + + if f.GardenerFramework.Config.ExistingShootName != "" { + shootKey := client.ObjectKey{Namespace: f.GardenerFramework.ProjectNamespace, Name: f.GardenerFramework.Config.ExistingShootName} + if err := f.GardenClient.Client().Get(ctx, shootKey, f.Shoot); err != nil { + return fmt.Errorf("failed to get existing shoot %q: %w", shootKey, err) + } + + shootHealthy, msg := ShootReconciliationSuccessful(f.Shoot) + if !shootHealthy { + return fmt.Errorf("cannot use existing shoot %q for test because it is unhealthy: %s", shootKey, msg) + } + + f.Logger.Info("Using existing shoot for test", "shoot", shootKey) + if err := PrettyPrintObject(f.Shoot); err != nil { + return err + } + } else { + log.Info("Creating shoot") + if err := PrettyPrintObject(f.Shoot); err != nil { + return err + } + + if err := f.GardenerFramework.CreateShoot(ctx, f.Shoot); err != nil { + log.Error(err, "Failed creating shoot") + + dumpCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + if shootFramework, err := f.NewShootFramework(dumpCtx, f.Shoot); err != nil { + log.Error(err, "Failed dumping shoot state") + } else { + shootFramework.DumpState(dumpCtx) + } + return err + } + + log.Info("Successfully created shoot") + } + + shootFramework, err := f.NewShootFramework(ctx, f.Shoot) + if err != nil { + return err + } + f.ShootFramework = shootFramework + f.Shoot = shootFramework.Shoot + + if f.Config.shootKubeconfigPath == "" { + f.Logger.Info("Shoot kubeconfig path is not specified, skipping downloading the admin kubeconfig for the Shoot") + } else { + if err := DownloadAdminKubeconfigForShoot(ctx, shootFramework.GardenClient, shootFramework.Shoot, f.Config.shootKubeconfigPath); err != nil { + return fmt.Errorf("failed downloading shoot kubeconfig: %w", err) + } + } + + if f.Config.seedKubeconfigPath == "" { + f.Logger.Info("Seed kubeconfig path is not specified, skipping downloading the static token kubeconfig for the Seed") + } else if seedSecretRef := shootFramework.Seed.Spec.SecretRef; seedSecretRef == nil { + f.Logger.Info("Seed does not have secretRef set, skipping constructing seed client") + } else { + if err := DownloadKubeconfig(ctx, shootFramework.GardenClient, shootFramework.Seed.Spec.SecretRef.Namespace, shootFramework.Seed.Spec.SecretRef.Name, f.Config.seedKubeconfigPath); err != nil { + return fmt.Errorf("failed downloading seed kubeconfig: %w", err) + } + } + + log.Info("Finished creating shoot") + return nil +} + +// Verify asserts that the shoot creation was successful. +func (f *ShootCreationFramework) Verify() { + var ( + expectedTechnicalID = fmt.Sprintf("shoot--%s--%s", f.ShootFramework.Project.Name, f.Shoot.Name) + expectedClusterIdentityPrefix = fmt.Sprintf("%s-%s", f.Shoot.Status.TechnicalID, f.Shoot.Status.UID) + ) + + // Shoot with failure tolerance 'zone' should only be scheduled on seed with at least 3 zones. + if v1beta1helper.IsMultiZonalShootControlPlane(f.Shoot) { + gomega.Expect(len(f.ShootFramework.Seed.Spec.Provider.Zones)).Should(gomega.BeNumerically(">=", 3)) + } + + gomega.Expect(f.Shoot.Status.Gardener.ID).NotTo(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.Gardener.Name).NotTo(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.Gardener.Version).NotTo(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.LastErrors).To(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.SeedName).NotTo(gomega.BeNil()) + gomega.Expect(*f.Shoot.Status.SeedName).NotTo(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.TechnicalID).To(gomega.Equal(expectedTechnicalID)) + gomega.Expect(f.Shoot.Status.UID).NotTo(gomega.BeEmpty()) + gomega.Expect(f.Shoot.Status.ClusterIdentity).NotTo(gomega.BeNil()) + gomega.Expect(*f.Shoot.Status.ClusterIdentity).To(gomega.HavePrefix(expectedClusterIdentityPrefix)) +} + +// InitializeShootWithFlags initializes a shoot to be created by this framework. +func (f *ShootCreationFramework) InitializeShootWithFlags(ctx context.Context) error { + // if running in test machinery, test will be executed from root of the project + if !FileExists(fmt.Sprintf(".%s", f.Config.shootYamlPath)) { + path := f.Config.shootYamlPath + if !filepath.IsAbs(f.Config.shootYamlPath) { + // locally, we need find the example shoot + path = filepath.Join(f.TemplatesDir, f.Config.shootYamlPath) + } + f.Config.shootYamlPath = path + if !FileExists(f.Config.shootYamlPath) { + return fmt.Errorf("shoot template should exist") + } + } + + // parse shoot yaml into shoot object and generate random test names for shoots + _, shootObject, err := CreateShootTestArtifacts(f.Config, f.ProjectNamespace, true, true) + if err != nil { + return err + } + f.Shoot = shootObject + + // set ProviderConfigs + err = SetProviderConfigsFromFilepath(shootObject, f.Config.infrastructureProviderConfig, f.Config.controlPlaneProviderConfig, f.Config.networkingProviderConfig) + if err != nil { + return err + } + + // set worker settings + cloudProfile, err := f.GetCloudProfile(ctx, shootObject.Spec.CloudProfileName) + if err != nil { + return err + } + + return setShootWorkerSettings(shootObject, f.Config, cloudProfile) +} diff --git a/vendor/github.com/gardener/gardener/test/framework/shootframework.go b/vendor/github.com/gardener/gardener/test/framework/shootframework.go new file mode 100644 index 00000000..e360f62b --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/shootframework.go @@ -0,0 +1,325 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + "github.com/gardener/gardener/pkg/client/kubernetes" + kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes" + "github.com/gardener/gardener/pkg/utils/retry" + "github.com/gardener/gardener/test/utils/access" +) + +var shootCfg *ShootConfig + +// ShootConfig is the configuration for a shoot framework +type ShootConfig struct { + GardenerConfig *GardenerConfig + ShootName string + Fenced bool + SeedScheme *runtime.Scheme + + CreateTestNamespace bool + DisableTestNamespaceCleanup bool + SkipSeedInitialization bool +} + +// ShootFramework represents the shoot test framework that includes +// test functions that can be executed on a specific shoot +type ShootFramework struct { + *GardenerFramework + TestDescription + Config *ShootConfig + + SeedClient kubernetes.Interface + ShootClient kubernetes.Interface + + Seed *gardencorev1beta1.Seed + CloudProfile *gardencorev1beta1.CloudProfile + Shoot *gardencorev1beta1.Shoot + Project *gardencorev1beta1.Project + + Namespace string +} + +// NewShootFramework creates a new simple Shoot framework +func NewShootFramework(cfg *ShootConfig) *ShootFramework { + f := &ShootFramework{ + GardenerFramework: newGardenerFrameworkFromConfig(nil), + TestDescription: NewTestDescription("SHOOT"), + Config: cfg, + } + + CBeforeEach(func(ctx context.Context) { + f.CommonFramework.BeforeEach() + f.GardenerFramework.BeforeEach() + f.BeforeEach(ctx) + }, 8*time.Minute) + CAfterEach(f.AfterEach, 10*time.Minute) + return f +} + +// BeforeEach should be called in ginkgo's BeforeEach. +// It sets up the shoot framework. +func (f *ShootFramework) BeforeEach(ctx context.Context) { + f.Config = mergeShootConfig(f.Config, shootCfg) + validateShootConfig(f.Config) + err := f.AddShoot(ctx, f.Config.ShootName, f.ProjectNamespace) + ExpectNoError(err) + + if f.Config.CreateTestNamespace { + _, err := f.CreateNewNamespace(ctx) + ExpectNoError(err) + } +} + +// AfterEach should be called in ginkgo's AfterEach. +// Cleans up resources and dumps the shoot state if the test failed +func (f *ShootFramework) AfterEach(ctx context.Context) { + if ginkgo.CurrentSpecReport().Failed() { + f.DumpState(ctx) + } + if !f.Config.DisableTestNamespaceCleanup && f.Namespace != "" { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: f.Namespace}, + } + f.Namespace = "" + err := f.ShootClient.Client().Delete(ctx, ns) + if err != nil { + if !apierrors.IsNotFound(err) { + ExpectNoError(err) + } + } + err = f.WaitUntilNamespaceIsDeleted(ctx, f.ShootClient, ns.Name) + if err != nil { + timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + err2 := f.dumpNamespaceResource(timeoutCtx, f.Logger, f.ShootClient, ns.Name) + ExpectNoError(err2) + err2 = f.DumpDefaultResourcesInNamespace(timeoutCtx, f.ShootClient, ns.Name) + ExpectNoError(err2) + } + ExpectNoError(err) + ginkgo.By(fmt.Sprintf("deleted test namespace %s", ns.Name)) + } +} + +// CreateNewNamespace creates a new namespace with a generated name prefixed with "gardener-e2e-". +// The created namespace is automatically cleaned up when the test is finished. +func (f *ShootFramework) CreateNewNamespace(ctx context.Context) (string, error) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "gardener-e2e-", + }, + } + if err := f.ShootClient.Client().Create(ctx, ns); err != nil { + return "", err + } + + f.Namespace = ns.GetName() + return ns.GetName(), nil +} + +// AddShoot sets the shoot and its seed for the GardenerOperation. +func (f *ShootFramework) AddShoot(ctx context.Context, shootName, shootNamespace string) error { + if f.GardenClient == nil { + return errors.New("no gardener client is defined") + } + + var ( + shoot = &gardencorev1beta1.Shoot{} + err error + ) + + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Namespace: shootNamespace, Name: shootName}, shoot); err != nil { + return fmt.Errorf("could not get shoot: %w", err) + } + + f.CloudProfile, err = f.GardenerFramework.GetCloudProfile(ctx, shoot.Spec.CloudProfileName) + if err != nil { + return fmt.Errorf("unable to get cloudprofile %s: %w", shoot.Spec.CloudProfileName, err) + } + + f.Project, err = f.GetShootProject(ctx, shootNamespace) + if err != nil { + return err + } + + // seed could be temporarily offline so no specified seed is a valid state + if shoot.Spec.SeedName != nil && !f.Config.SkipSeedInitialization { + f.Seed, f.SeedClient, err = f.GetSeed(ctx, *shoot.Spec.SeedName) + if err != nil { + return err + } + } + + f.Shoot = shoot + + if f.Shoot.Spec.Hibernation != nil && f.Shoot.Spec.Hibernation.Enabled != nil && *f.Shoot.Spec.Hibernation.Enabled { + return nil + } + + if !f.GardenerFramework.Config.SkipAccessingShoot { + var shootClient kubernetes.Interface + if err := retry.UntilTimeout(ctx, k8sClientInitPollInterval, k8sClientInitTimeout, func(ctx context.Context) (bool, error) { + shootClient, err = access.CreateShootClientFromAdminKubeconfig(ctx, f.GardenClient, f.Shoot) + if err != nil { + return retry.MinorError(fmt.Errorf("could not construct Shoot client: %w", err)) + } + return retry.Ok() + }); err != nil { + return err + } + + f.ShootClient = shootClient + } + + return nil +} + +func validateShootConfig(cfg *ShootConfig) { + if cfg == nil { + ginkgo.Fail("no shoot framework configuration provided") + } + if !StringSet(cfg.ShootName) { + ginkgo.Fail("You should specify a shootName to test against") + } +} + +func mergeShootConfig(base, overwrite *ShootConfig) *ShootConfig { + if base == nil { + return overwrite + } + if overwrite == nil { + return base + } + + if overwrite.GardenerConfig != nil { + base.GardenerConfig = overwrite.GardenerConfig + } + if StringSet(overwrite.ShootName) { + base.ShootName = overwrite.ShootName + } + if overwrite.CreateTestNamespace { + base.CreateTestNamespace = overwrite.CreateTestNamespace + } + if overwrite.DisableTestNamespaceCleanup { + base.DisableTestNamespaceCleanup = overwrite.DisableTestNamespaceCleanup + } + + return base +} + +// RegisterShootFrameworkFlags adds all flags that are needed to configure a shoot framework to the provided flagset. +func RegisterShootFrameworkFlags() *ShootConfig { + _ = RegisterGardenerFrameworkFlags() + + newCfg := &ShootConfig{} + + flag.StringVar(&newCfg.ShootName, "shoot-name", "", "name of the shoot") + flag.BoolVar(&newCfg.Fenced, "fenced", false, + "indicates if the shoot is running in a fenced environment which means that the shoot can only be reached from the gardenlet") + + shootCfg = newCfg + return shootCfg +} + +// HibernateShoot hibernates the shoot of the framework +func (f *ShootFramework) HibernateShoot(ctx context.Context) error { + return f.GardenerFramework.HibernateShoot(ctx, f.Shoot) +} + +// WakeUpShoot wakes up the hibernated shoot of the framework +func (f *ShootFramework) WakeUpShoot(ctx context.Context) error { + return f.GardenerFramework.WakeUpShoot(ctx, f.Shoot) +} + +// UpdateShoot Updates a shoot from a shoot Object and waits for its reconciliation +func (f *ShootFramework) UpdateShoot(ctx context.Context, update func(shoot *gardencorev1beta1.Shoot) error) error { + return f.GardenerFramework.UpdateShoot(ctx, f.Shoot, update) +} + +// GetCloudProfile returns the cloudprofile of the shoot +func (f *ShootFramework) GetCloudProfile(ctx context.Context) (*gardencorev1beta1.CloudProfile, error) { + cloudProfile := &gardencorev1beta1.CloudProfile{} + if err := f.GardenClient.Client().Get(ctx, client.ObjectKey{Name: f.Shoot.Spec.CloudProfileName}, cloudProfile); err != nil { + return nil, fmt.Errorf("could not get Seed's CloudProvider in Garden cluster: %w", err) + } + return cloudProfile, nil +} + +// WaitForShootCondition waits for the shoot to contain the specified condition +func (f *ShootFramework) WaitForShootCondition(ctx context.Context, interval, timeout time.Duration, conditionType gardencorev1beta1.ConditionType, conditionStatus gardencorev1beta1.ConditionStatus) error { + log := f.Logger.WithValues("shoot", client.ObjectKeyFromObject(f.Shoot)) + + return retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (done bool, err error) { + shoot := &gardencorev1beta1.Shoot{} + err = f.GardenClient.Client().Get(ctx, client.ObjectKey{Namespace: f.Shoot.Namespace, Name: f.Shoot.Name}, shoot) + if err != nil { + log.Error(err, "Error while waiting for shoot to have expected condition") + return retry.MinorError(err) + } + + cond := helper.GetCondition(shoot.Status.Conditions, conditionType) + if cond != nil && cond.Status == conditionStatus { + return retry.Ok() + } + + log = log.WithValues("expectedConditionType", conditionType, "expectedConditionStatus", conditionStatus) + + if cond == nil { + log.Info("Waiting for shoot to have expected condition status, currently the condition is not present") + return retry.MinorError(fmt.Errorf("shoot %q does not yet have expected condition status", shoot.Name)) + } + + log.Info("Waiting for shoot to have expected condition status", "currentConditionStatus", cond.Status) + return retry.MinorError(fmt.Errorf("shoot %q does not yet have expected condition", shoot.Name)) + }) +} + +// IsAPIServerRunning checks, if the Shoot's API server deployment is present, not yet deleted and has at least one +// available replica. +func (f *ShootFramework) IsAPIServerRunning(ctx context.Context) (bool, error) { + deployment := &appsv1.Deployment{} + if err := f.SeedClient.Client().Get(ctx, kubernetesutils.Key(f.ShootSeedNamespace(), v1beta1constants.DeploymentNameKubeAPIServer), deployment); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + if deployment.GetDeletionTimestamp() != nil { + return false, nil + } + + return deployment.Status.AvailableReplicas > 0, nil +} diff --git a/vendor/github.com/gardener/gardener/test/framework/shootmigrationtest.go b/vendor/github.com/gardener/gardener/test/framework/shootmigrationtest.go new file mode 100644 index 00000000..0e55e911 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/shootmigrationtest.go @@ -0,0 +1,491 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + resourcesv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils" + secretsmanager "github.com/gardener/gardener/pkg/utils/secrets/manager" + "github.com/gardener/gardener/test/utils/access" +) + +// ShootMigrationTest represents a shoot migration test. +// It can be used to test the migration of shoots between various seeds. +type ShootMigrationTest struct { + GardenerFramework *GardenerFramework + Config *ShootMigrationConfig + TargetSeedClient kubernetes.Interface + SourceSeedClient kubernetes.Interface + ShootClient kubernetes.Interface + TargetSeed *gardencorev1beta1.Seed + SourceSeed *gardencorev1beta1.Seed + ComparisonElementsBeforeMigration ShootComparisonElements + ComparisonElementsAfterMigration ShootComparisonElements + Shoot gardencorev1beta1.Shoot + SeedShootNamespace string + MigrationTime metav1.Time +} + +// ShootMigrationConfig is the configuration for a shoot migration test that will be filled with user provided data +type ShootMigrationConfig struct { + TargetSeedName string + SourceSeedName string + ShootName string + ShootNamespace string + AddTestRunTaint string + SkipNodeCheck bool + SkipMachinesCheck bool + SkipShootClientCreation bool + SkipProtectedToleration bool +} + +// ShootComparisonElements contains details about Machines and Nodes that will be compared during the tests +type ShootComparisonElements struct { + MachineNames []string + MachineNodes []string + NodeNames []string + SecretsMap map[string]corev1.Secret +} + +// NewShootMigrationTest creates a new simple shoot migration test +func NewShootMigrationTest(ctx context.Context, f *GardenerFramework, cfg *ShootMigrationConfig) (*ShootMigrationTest, error) { + t := &ShootMigrationTest{ + GardenerFramework: f, + Config: cfg, + } + return t, t.initializeShootMigrationTest(ctx) +} + +func (t *ShootMigrationTest) initializeShootMigrationTest(ctx context.Context) error { + if err := t.initShootAndClient(ctx); err != nil { + return err + } + t.SeedShootNamespace = ComputeTechnicalID(t.GardenerFramework.ProjectNamespace, &t.Shoot) + + if err := t.initSeedsAndClients(ctx); err != nil { + return err + } + + return t.populateBeforeMigrationComparisonElements(ctx) +} + +func (t *ShootMigrationTest) initShootAndClient(ctx context.Context) (err error) { + shoot := &gardencorev1beta1.Shoot{ObjectMeta: metav1.ObjectMeta{Name: t.Config.ShootName, Namespace: t.Config.ShootNamespace}} + if err = t.GardenerFramework.GetShoot(ctx, shoot); err != nil { + return err + } + + if !shoot.Status.IsHibernated && !t.Config.SkipShootClientCreation { + t.ShootClient, err = access.CreateShootClientFromAdminKubeconfig(ctx, t.GardenerFramework.GardenClient, shoot) + if err != nil { + return err + } + } + t.Shoot = *shoot + return +} + +func (t *ShootMigrationTest) initSeedsAndClients(ctx context.Context) error { + t.Config.SourceSeedName = *t.Shoot.Spec.SeedName + seed, seedClient, err := t.GardenerFramework.GetSeed(ctx, t.Config.TargetSeedName) + if err != nil { + return err + } + t.TargetSeedClient = seedClient + t.TargetSeed = seed + + seed, seedClient, err = t.GardenerFramework.GetSeed(ctx, t.Config.SourceSeedName) + if err != nil { + return err + } + t.SourceSeedClient = seedClient + t.SourceSeed = seed + return nil +} + +// MigrateShoot triggers shoot migration by changing the value of "shoot.Spec.SeedName" to the value of "ShootMigrationConfig.TargetSeedName" +func (t *ShootMigrationTest) MigrateShoot(ctx context.Context) error { + // Dump gardener state if delete shoot is in exit handler + if os.Getenv("TM_PHASE") == "Exit" { + if shootFramework, err := t.GardenerFramework.NewShootFramework(ctx, &t.Shoot); err == nil { + shootFramework.DumpState(ctx) + } else { + t.GardenerFramework.DumpState(ctx) + } + } + + t.MigrationTime = metav1.Now() + return t.GardenerFramework.MigrateShoot(ctx, &t.Shoot, t.TargetSeed, func(shoot *gardencorev1beta1.Shoot) error { + if !t.Config.SkipProtectedToleration { + shoot.Spec.Tolerations = appendToleration(shoot.Spec.Tolerations, gardencorev1beta1.SeedTaintProtected, nil) + } + if applyTestRunTaint, err := strconv.ParseBool(t.Config.AddTestRunTaint); applyTestRunTaint && err == nil { + shoot.Spec.Tolerations = appendToleration(shoot.Spec.Tolerations, SeedTaintTestRun, pointer.String(GetTestRunID())) + } + return nil + }) +} + +func appendToleration(tolerations []gardencorev1beta1.Toleration, key string, value *string) []gardencorev1beta1.Toleration { + toleration := gardencorev1beta1.Toleration{ + Key: key, + Value: value, + } + if tolerations == nil { + tolerations = make([]gardencorev1beta1.Toleration, 0) + } else { + for _, t := range tolerations { + if t.Key == key { + t.Value = value + return tolerations + } + } + } + return append(tolerations, toleration) +} + +// VerifyMigration checks that the shoot components are migrated properly +func (t ShootMigrationTest) VerifyMigration(ctx context.Context) error { + if err := t.populateAfterMigrationComparisonElements(ctx); err != nil { + return err + } + + ginkgo.By("Compare all Machines, Nodes and persisted Secrets after the migration") + if err := t.compareElementsAfterMigration(); err != nil { + return err + } + + ginkgo.By("Check for orphaned resources") + return t.checkForOrphanedNonNamespacedResources(ctx) +} + +// GetNodeNames uses the shootClient to fetch all Node names from the Shoot +func (t *ShootMigrationTest) GetNodeNames(ctx context.Context, shootClient kubernetes.Interface) (nodeNames []string, err error) { + if t.Shoot.Status.IsHibernated { + return make([]string, 0), nil // Initialize to empty slice in order pass 0 elements DeepEqual check + } + + nodeList := corev1.NodeList{} + t.GardenerFramework.Logger.Info("Listing nodes") + if err := shootClient.Client().List(ctx, &nodeList); err != nil { + return nil, err + } + + nodeNames = make([]string, len(nodeList.Items)) + for i, node := range nodeList.Items { + t.GardenerFramework.Logger.Info("Found node", "index", i, "nodeName", node.Name) + nodeNames[i] = node.Name + } + sort.Strings(nodeNames) + return +} + +// GetMachineDetails uses the seedClient to fetch all Machine names and the names of their corresponding Nodes +func (t *ShootMigrationTest) GetMachineDetails(ctx context.Context, seedClient kubernetes.Interface) (machineNames, machineNodes []string, err error) { + log := t.GardenerFramework.Logger.WithValues("namespace", t.SeedShootNamespace) + + machineList := unstructured.UnstructuredList{} + machineList.SetAPIVersion("machine.sapcloud.io/v1alpha1") + machineList.SetKind("Machine") + + log.Info("Listing machines") + if err := seedClient.Client().List(ctx, &machineList, client.InNamespace(t.SeedShootNamespace)); err != nil { + return nil, nil, err + } + + log.Info("Found machines", "count", len(machineList.Items)) + + machineNames = make([]string, len(machineList.Items)) + machineNodes = make([]string, len(machineList.Items)) + for i, machine := range machineList.Items { + log.Info("Found machine", "index", i, "machineName", machine.GetName(), "nodeName", machine.GetLabels()["node"]) + machineNames[i] = machine.GetName() + machineNodes[i] = machine.GetLabels()["node"] + } + sort.Strings(machineNames) + sort.Strings(machineNodes) + return +} + +// GetPersistedSecrets uses the seedClient to fetch the data of all Secrets that have the `persist` label key set to true +// from the Shoot's control plane namespace +func (t *ShootMigrationTest) GetPersistedSecrets(ctx context.Context, seedClient kubernetes.Interface) (map[string]corev1.Secret, error) { + secretList := &corev1.SecretList{} + if err := seedClient.Client().List( + ctx, + secretList, + client.InNamespace(t.SeedShootNamespace), + client.MatchingLabels(map[string]string{secretsmanager.LabelKeyPersist: secretsmanager.LabelValueTrue}), + ); err != nil { + return nil, err + } + + secretsMap := make(map[string]corev1.Secret, len(secretList.Items)) + for _, secret := range secretList.Items { + secretsMap[secret.Name] = secret + } + + return secretsMap, nil +} + +// PopulateBeforeMigrationComparisonElements fills the ShootMigrationTest.ComparisonElementsBeforeMigration with the necessary Machine details and Node names +func (t *ShootMigrationTest) populateBeforeMigrationComparisonElements(ctx context.Context) (err error) { + if !t.Config.SkipMachinesCheck { + t.ComparisonElementsBeforeMigration.MachineNames, t.ComparisonElementsBeforeMigration.MachineNodes, err = t.GetMachineDetails(ctx, t.SourceSeedClient) + if err != nil { + return + } + } + if !t.Config.SkipNodeCheck { + t.ComparisonElementsBeforeMigration.NodeNames, err = t.GetNodeNames(ctx, t.ShootClient) + if err != nil { + return + } + } + t.ComparisonElementsBeforeMigration.SecretsMap, err = t.GetPersistedSecrets(ctx, t.SourceSeedClient) + return +} + +// PopulateAfterMigrationComparisonElements fills the ShootMigrationTest.ComparisonElementsAfterMigration with the necessary Machine details and Node names +func (t *ShootMigrationTest) populateAfterMigrationComparisonElements(ctx context.Context) (err error) { + if !t.Config.SkipMachinesCheck { + t.ComparisonElementsAfterMigration.MachineNames, t.ComparisonElementsAfterMigration.MachineNodes, err = t.GetMachineDetails(ctx, t.TargetSeedClient) + if err != nil { + return + } + } + if !t.Config.SkipNodeCheck { + t.ComparisonElementsAfterMigration.NodeNames, err = t.GetNodeNames(ctx, t.ShootClient) + if err != nil { + return + } + } + t.ComparisonElementsAfterMigration.SecretsMap, err = t.GetPersistedSecrets(ctx, t.TargetSeedClient) + return +} + +// CompareElementsAfterMigration compares the Machine details, Node names and Pod statuses before and after migration and returns error if there are differences. +func (t *ShootMigrationTest) compareElementsAfterMigration() error { + if !t.Config.SkipMachinesCheck { + if !reflect.DeepEqual(t.ComparisonElementsBeforeMigration.MachineNames, t.ComparisonElementsAfterMigration.MachineNames) { + return fmt.Errorf("initial Machines %s, do not match after-migrate Machines %s", t.ComparisonElementsBeforeMigration.MachineNames, t.ComparisonElementsAfterMigration.MachineNames) + } + if !reflect.DeepEqual(t.ComparisonElementsBeforeMigration.MachineNodes, t.ComparisonElementsAfterMigration.MachineNodes) { + return fmt.Errorf("initial Machine Nodes (label) %s, do not match after-migrate Machine Nodes (label) %s", t.ComparisonElementsBeforeMigration.MachineNodes, t.ComparisonElementsAfterMigration.MachineNodes) + } + } + if t.Config.SkipNodeCheck { + if !reflect.DeepEqual(t.ComparisonElementsBeforeMigration.NodeNames, t.ComparisonElementsAfterMigration.NodeNames) { + return fmt.Errorf("initial Nodes %s, do not match after-migrate Nodes %s", t.ComparisonElementsBeforeMigration.NodeNames, t.ComparisonElementsAfterMigration.NodeNames) + } + if !reflect.DeepEqual(t.ComparisonElementsAfterMigration.MachineNodes, t.ComparisonElementsAfterMigration.NodeNames) { + return fmt.Errorf("machine Nodes (label) %s, do not match after-migrate Nodes %s", t.ComparisonElementsAfterMigration.MachineNodes, t.ComparisonElementsAfterMigration.NodeNames) + } + } + + var errorMsg string + for name, secret := range t.ComparisonElementsBeforeMigration.SecretsMap { + if !reflect.DeepEqual(secret.Data, t.ComparisonElementsAfterMigration.SecretsMap[name].Data) { + errorMsg += fmt.Sprintf("Secret %s/%s did not have it's data persisted.\n", secret.Namespace, secret.Name) + } + if !reflect.DeepEqual(secret.Labels, t.ComparisonElementsAfterMigration.SecretsMap[name].Labels) { + errorMsg += fmt.Sprintf("Secret %s/%s did not have it's labels persisted: labels before migration: %v, labels after migration: %v\n", + secret.Namespace, + secret.Name, + secret.Labels, + t.ComparisonElementsAfterMigration.SecretsMap[name].Labels, + ) + } + } + if len(errorMsg) > 0 { + return fmt.Errorf("control plane secrets did not have their data or labels persisted during control plane migration:\n %s", errorMsg) + } + + return nil +} + +// CheckObjectsTimestamp checks the timestamp of all objects that the resource-manager creates in the Shoot cluster. +// The timestamp should not be after ShootMigrationTest.MigrationTime. +func (t *ShootMigrationTest) CheckObjectsTimestamp(ctx context.Context, mrExcludeList, resourcesWithGeneratedName []string) error { + mrList := &resourcesv1alpha1.ManagedResourceList{} + if err := t.TargetSeedClient.Client().List( + ctx, + mrList, + client.InNamespace(t.SeedShootNamespace), + ); err != nil { + return err + } + + for _, mr := range mrList.Items { + if mr.Spec.Class == nil || *mr.Spec.Class != "seed" { + if !utils.ValueExists(mr.GetName(), mrExcludeList) { + log := t.GardenerFramework.Logger.WithValues("managedResource", client.ObjectKeyFromObject(&mr)) + log.Info("Found ManagedResource") + + for _, r := range mr.Status.Resources { + if len(r.Name) > 9 && utils.ValueExists(r.Name[:len(r.Name)-9], resourcesWithGeneratedName) { + continue + } + + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(r.APIVersion) + obj.SetKind(r.Kind) + + if err := t.ShootClient.Client().Get(ctx, client.ObjectKey{Namespace: r.Namespace, Name: r.Name}, obj); err != nil { + return err + } + + // Ignore immutable objects because if their data changes, they will be recreated + if isImmutable, ok := obj.Object["immutable"]; ok && isImmutable == true { + continue + } + + creationTimestamp := obj.GetCreationTimestamp() + objectLog := log.WithValues("objectKind", obj.GetKind(), "objectNamespace", obj.GetNamespace(), "objectName", obj.GetName(), "creationTimestamp", creationTimestamp) + + objectLog.Info("Found object") + if t.MigrationTime.Before(&creationTimestamp) { + objectLog.Info("Object is created after shoot migration", "migrationTime", t.MigrationTime) + return fmt.Errorf("object: %s %s/%s Created At: %s is created after the Shoot migration %s", obj.GetKind(), obj.GetNamespace(), obj.GetName(), creationTimestamp, t.MigrationTime) + } + } + } + } + } + return nil +} + +// CheckForOrphanedNonNamespacedResources checks if there are orphaned resources left on the target seed after the shoot migration. +// The function checks for Cluster, DNSOwner, BackupEntry, ClusterRoleBinding, ClusterRole and PersistentVolume +func (t *ShootMigrationTest) checkForOrphanedNonNamespacedResources(ctx context.Context) error { + seedClientScheme := t.SourceSeedClient.Client().Scheme() + + if err := extensionsv1alpha1.AddToScheme(seedClientScheme); err != nil { + return err + } + + leakedObjects := []string{} + + for _, obj := range []client.ObjectList{ + &extensionsv1alpha1.ClusterList{}, + &v1alpha1.BackupEntryList{}, + &rbacv1.ClusterRoleBindingList{}, + &rbacv1.ClusterRoleList{}, + } { + if err := t.SourceSeedClient.Client().List(ctx, obj, client.InNamespace(corev1.NamespaceAll)); err != nil { + return err + } + + if err := meta.EachListItem(obj, func(object runtime.Object) error { + if strings.Contains(object.(client.Object).GetName(), t.SeedShootNamespace) { + leakedObjects = append(leakedObjects, fmt.Sprintf("%T %s", object, object.(client.Object).GetName())) + } + return nil + }); err != nil { + return err + } + } + + pvList := &corev1.PersistentVolumeList{} + if err := t.SourceSeedClient.Client().List(ctx, pvList, client.InNamespace(corev1.NamespaceAll)); err != nil { + return err + } + if err := meta.EachListItem(pvList, func(obj runtime.Object) error { + pv := obj.(*corev1.PersistentVolume) + if strings.Contains(pv.Spec.ClaimRef.Namespace, t.SeedShootNamespace) { + leakedObjects = append(leakedObjects, fmt.Sprintf("PersistentVolume/%s", pv.GetName())) + } + return nil + }); err != nil { + return err + } + if len(leakedObjects) > 0 { + return fmt.Errorf("the following object(s) still exists in the source seed %v", leakedObjects) + } + return nil +} + +// CreateSecretAndServiceAccount creates test secret and service account +func (t ShootMigrationTest) CreateSecretAndServiceAccount(ctx context.Context) error { + testSecret, testServiceAccount := constructTestSecretAndServiceAccount() + if err := t.ShootClient.Client().Create(ctx, testSecret); err != nil { + return err + } + + return t.ShootClient.Client().Create(ctx, testServiceAccount) +} + +// CheckSecretAndServiceAccount checks the test secret and service account exists in the shoot. +func (t ShootMigrationTest) CheckSecretAndServiceAccount(ctx context.Context) error { + testSecret, testServiceAccount := constructTestSecretAndServiceAccount() + if err := t.ShootClient.Client().Get(ctx, client.ObjectKeyFromObject(testSecret), testSecret); err != nil { + return err + } + + return t.ShootClient.Client().Get(ctx, client.ObjectKeyFromObject(testServiceAccount), testServiceAccount) +} + +// CleanUpSecretAndServiceAccount cleans up the test secret and service account +func (t ShootMigrationTest) CleanUpSecretAndServiceAccount(ctx context.Context) error { + testSecret, testServiceAccount := constructTestSecretAndServiceAccount() + if err := t.ShootClient.Client().Delete(ctx, testSecret); err != nil { + return err + } + + return t.ShootClient.Client().Delete(ctx, testServiceAccount) +} + +func constructTestSecretAndServiceAccount() (*corev1.Secret, *corev1.ServiceAccount) { + const ( + secretName = "test-shoot-migration-secret" + secretNamespace = metav1.NamespaceDefault + serviceAccountName = "test-service-account" + serviceAccountNamespace = metav1.NamespaceDefault + ) + testSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + }, + } + testServiceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: serviceAccountNamespace, + }} + return testSecret, testServiceAccount +} diff --git a/vendor/github.com/gardener/gardener/test/framework/template.go b/vendor/github.com/gardener/gardener/test/framework/template.go new file mode 100644 index 00000000..1750e80e --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/template.go @@ -0,0 +1,53 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "bytes" + "context" + "fmt" + "html/template" + "os" + "path/filepath" + + "github.com/Masterminds/sprig" + + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +// RenderAndDeployTemplate renders a template from the resource template directory and deploys it to the cluster +func (f *CommonFramework) RenderAndDeployTemplate(ctx context.Context, k8sClient kubernetes.Interface, templateName string, values interface{}) error { + templateFilepath := filepath.Join(f.TemplatesDir, templateName) + if _, err := os.Stat(templateFilepath); err != nil { + return fmt.Errorf("could not find template in %q", templateFilepath) + } + + tpl, err := template. + New(templateName). + Funcs(sprig.HtmlFuncMap()). + ParseFiles(templateFilepath) + if err != nil { + return fmt.Errorf("unable to parse template in %s: %w", templateFilepath, err) + } + + var writer bytes.Buffer + err = tpl.Execute(&writer, values) + if err != nil { + return err + } + + manifestReader := kubernetes.NewManifestReader(writer.Bytes()) + return k8sClient.Applier().ApplyManifest(ctx, manifestReader, kubernetes.DefaultMergeFuncs) +} diff --git a/vendor/github.com/gardener/gardener/test/framework/test_description.go b/vendor/github.com/gardener/gardener/test/framework/test_description.go new file mode 100644 index 00000000..98f0c004 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/test_description.go @@ -0,0 +1,120 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TestDescription labels tests according to the provided labels in the expected order. +type TestDescription struct { + labels sets.Set[string] +} + +// NewTestDescription creates a new test description +func NewTestDescription(baseLabel string) TestDescription { + return TestDescription{ + labels: sets.New(baseLabel), + } +} + +// Beta labels a test as beta test +func (t TestDescription) Beta() TestDescription { + return t.newLabel("BETA") +} + +// Default labels a test as default test +func (t TestDescription) Default() TestDescription { + return t.newLabel("DEFAULT") +} + +// Release labels a test as release relevant test +func (t TestDescription) Release() TestDescription { + return t.newLabel("RELEASE") +} + +// Serial labels a test to be run as serial step +func (t TestDescription) Serial() TestDescription { + return t.newLabel("SERIAL") +} + +// Disruptive labels a test as disruptive. +// This kind of test should run with care. +func (t TestDescription) Disruptive() TestDescription { + return t.newLabel("DISRUPTIVE") +} + +func (t TestDescription) newLabel(label string) TestDescription { + labels := t.labels.Union(nil) + labels.Insert(label) + return TestDescription{ + labels: labels, + } +} + +// It defines a ginkgo It block and enhances the test description with the provided labels +func (t TestDescription) It(text string, body func(), opts ...TestOption) { + testOptions := &TestOptions{} + testOptions.ApplyOptions(opts) + + testOptions.Complete(func() { + ginkgo.It(fmt.Sprintf("%s %s", t.String(), text), body) + }) +} + +// FIt defines a ginkgo FIt block and enhances the test description with the provided labels +func (t TestDescription) FIt(text string, body func(), opts ...TestOption) { + testOptions := &TestOptions{} + testOptions.ApplyOptions(opts) + + testOptions.Complete(func() { + ginkgo.FIt(fmt.Sprintf("%s %s", t.String(), text), body) + }) +} + +// CIt defines a contextified ginkgo It block and enhances the test description with the provided labels +func (t TestDescription) CIt(text string, body func(context.Context), timeout time.Duration, opts ...TestOption) { + testOptions := &TestOptions{} + testOptions.ApplyOptions(opts) + + testOptions.Complete(func() { + CIt(fmt.Sprintf("%s %s", t.String(), text), body, timeout) + }) +} + +// FCIt defines a contextified ginkgo FIt block and enhances the test description with the provided labels +func (t TestDescription) FCIt(text string, body func(context.Context), timeout time.Duration, opts ...TestOption) { + testOptions := &TestOptions{} + testOptions.ApplyOptions(opts) + + testOptions.Complete(func() { + FCIt(fmt.Sprintf("%s %s", t.String(), text), body, timeout) + }) +} + +// String returns the test description labels +func (t TestDescription) String() string { + labelsList := sets.List(t.labels) + testText := fmt.Sprintf("[%s]", labelsList[0]) + for i := 1; i < len(labelsList); i++ { + testText = fmt.Sprintf("%s [%s]", testText, labelsList[i]) + } + return testText +} diff --git a/vendor/github.com/gardener/gardener/test/framework/test_options.go b/vendor/github.com/gardener/gardener/test/framework/test_options.go new file mode 100644 index 00000000..8274b94d --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/test_options.go @@ -0,0 +1,125 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "context" + "time" + + "github.com/onsi/ginkgo/v2" +) + +// TestOptions contains options to add additional functionality +// or cleanup handlers to a testcase. +type TestOptions struct { + // afterTests holds a list of all registered AfterTest functions + // that are executed when the test has finished. + AfterTests afterTests + + // CAfterTests holds a list of all registered contextified AfterTest functions + // that are executed when the test has finished. + CAfterTests []cAfterTestOption +} + +// ApplyOptions applies the given test options on these options. +func (o *TestOptions) ApplyOptions(opts []TestOption) *TestOptions { + for _, opt := range opts { + opt.ApplyToTestOptions(o) + } + return o +} + +// Complete registers all test options that are configured. +// it should be a function that configures a ginkgo test case +// This should get called when all options are applied. +func (o *TestOptions) Complete(it func()) { + + if len(o.AfterTests) == 0 && len(o.CAfterTests) == 0 { + it() + return + } + + // Create a new context so that the afterTests function only runs once after this one testcase. + // Otherwise the after tests would run after every test case in the outer context. + ginkgo.Context("", func() { + it() + + for _, aftertest := range o.AfterTests { + // register afterTest to global aftersuite in case the test interrupts + var h CleanupActionHandle + ginkgo.BeforeEach(func() { + h = AddCleanupAction(aftertest) + }) + ginkgo.AfterEach(func() { + RemoveCleanupAction(h) + aftertest() + }) + } + + for _, caftertest := range o.CAfterTests { + // register afterTest to global aftersuite in case the test interrupts + var h CleanupActionHandle + ginkgo.BeforeEach(func() { + h = AddCleanupAction(func() { + contextify(caftertest.Body, caftertest.Timeout)() + }) + }) + CAfterEach(func(ctx context.Context) { + RemoveCleanupAction(h) + caftertest.Body(ctx) + }, caftertest.Timeout) + } + }) +} + +// cAfterTestOption contains options for contextified after test function. +type cAfterTestOption struct { + Body func(ctx context.Context) + Timeout time.Duration +} + +// ApplyToTestOptions adds contextified after test functions to test options +func (at *cAfterTestOption) ApplyToTestOptions(opts *TestOptions) { + opts.CAfterTests = append(opts.CAfterTests, *at) +} + +// TestOption is some configuration that modifies options for testcase. +type TestOption interface { + // ApplyToTestOptions applies this configuration to the given test options. + ApplyToTestOptions(*TestOptions) +} + +// afterTests are functions that should run when a test has finished +type afterTests []func() + +// ApplyToTestOptions adds after test functions to test options +func (at afterTests) ApplyToTestOptions(opts *TestOptions) { + opts.AfterTests = append(opts.AfterTests, at...) +} + +// WithAfterTests adds functions to the current test that are called +// when the test has finished +func WithAfterTests(funcs ...func()) TestOption { + return afterTests(funcs) +} + +// WithCAfterTest adds contextified functions to the current test that are called +// when the test has finished +func WithCAfterTest(body func(ctx context.Context), timeout time.Duration) TestOption { + return &cAfterTestOption{ + Body: body, + Timeout: timeout, + } +} diff --git a/vendor/github.com/gardener/gardener/test/framework/utils.go b/vendor/github.com/gardener/gardener/test/framework/utils.go new file mode 100644 index 00000000..e20b7fce --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/utils.go @@ -0,0 +1,209 @@ +// Copyright 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "errors" + "fmt" + "os" + "reflect" + "regexp" + + "github.com/hashicorp/go-multierror" + "github.com/onsi/ginkgo/v2" + apimachineryRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "sigs.k8s.io/yaml" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +// Must errors with `GinkgoT().Fatal` if the error is non-nil. +func Must(err error) { + if err != nil { + ginkgo.GinkgoT().Fatal(err) + } +} + +func checkPtr(v reflect.Value) error { + if v.Type().Kind() != reflect.Ptr { + return fmt.Errorf("value has to be a pointer-type but got %T", v.Interface()) + } + return nil +} + +func checkAssignable(src, dst reflect.Value) error { + if !src.Type().AssignableTo(dst.Type().Elem()) { + return fmt.Errorf("src of type %T cannot be assigned to dst of type %T", src.Interface(), dst.Interface()) + } + return nil +} + +func dereference(v interface{}) interface{} { + dstValue := reflect.ValueOf(v) + Must(checkPtr(dstValue)) + + return dstValue.Elem().Interface() +} + +// RevertableSet sets the element of dst to src and returns a function that can revert back to the original values. +func RevertableSet(dst, src interface{}) (revert func()) { + tmp := dereference(dst) + Set(dst, src) + return func() { Set(dst, tmp) } +} + +// Set sets the pointer dst to the value of src. +// +// dst has to be a pointer, src has to be assignable to the element type of dst. +func Set(dst, src interface{}) { + dstValue := reflect.ValueOf(dst) + Must(checkPtr(dstValue)) + + srcValue := reflect.ValueOf(src) + Must(checkAssignable(srcValue, dstValue)) + + dstValue.Elem().Set(srcValue) +} + +// ComputeTechnicalID computes the technical ID of a shoot +func ComputeTechnicalID(projectName string, shoot *gardencorev1beta1.Shoot) string { + // Use the stored technical ID in the Shoot's status field if it's there. + // For backwards compatibility we keep the pattern as it was before we had to change it + // (double hyphens). + if len(shoot.Status.TechnicalID) > 0 { + return shoot.Status.TechnicalID + } + + // New clusters shall be created with the new technical id (double hyphens). + return fmt.Sprintf("shoot--%s--%s", projectName, shoot.Name) +} + +// Exists checks if a path exists +func Exists(path string) (bool, error) { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// StringSet checks if a string is set +func StringSet(s string) bool { + return len(s) != 0 +} + +// FileExists Checks if a file path exists and fail otherwise +func FileExists(kc string) bool { + ok, err := Exists(kc) + if err != nil { + ginkgo.Fail(err.Error()) + } + return ok +} + +// ReadObject loads the contents of file and decodes it as an object. +func ReadObject(file string, into apimachineryRuntime.Object) error { + data, err := os.ReadFile(file) + if err != nil { + return err + } + + _, _, err = serializer.NewCodecFactory(kubernetes.GardenScheme).UniversalDeserializer().Decode(data, nil, into) + return err +} + +// ParseFileAsProviderConfig parses a file as a ProviderConfig +func ParseFileAsProviderConfig(filepath string) (*apimachineryRuntime.RawExtension, error) { + data, err := os.ReadFile(filepath) + if err != nil { + return nil, err + } + + // apiServer needs JSON for the Raw data + jsonData, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, fmt.Errorf("unable to decode ProviderConfig: %v", err) + } + return &apimachineryRuntime.RawExtension{Raw: jsonData}, nil +} + +// ParseFileAsWorkers parses a file as a Worker configuration +func ParseFileAsWorkers(filepath string) ([]gardencorev1beta1.Worker, error) { + data, err := os.ReadFile(filepath) + if err != nil { + return nil, err + } + + workers := []gardencorev1beta1.Worker{} + if err := yaml.Unmarshal(data, &workers); err != nil { + return nil, fmt.Errorf("unable to decode workers: %v", err) + } + return workers, nil +} + +// GetTestRunID returns the current testmachinery testrun ID. +func GetTestRunID() string { + return os.Getenv(TestMachineryTestRunIDEnvVarName) +} + +// TextValidation is a map of regular expression to description +// that is used to validate texts based on allowed or denied regexps. +type TextValidation map[string]string + +// ValidateAsAllowlist validates that all allowed regular expressions +// are in the given text. +func (v *TextValidation) ValidateAsAllowlist(text []byte) error { + return v.validate(text, func(matches [][]byte) error { + if len(matches) == 0 { + return errors.New("allowed RegExp not found") + } + return nil + }) +} + +// ValidateAsDenylist validates that no denied regular expressions +// are in the given text. +func (v *TextValidation) ValidateAsDenylist(text []byte) error { + return v.validate(text, func(matches [][]byte) error { + if len(matches) != 0 { + return errors.New("denied RegExp found") + } + return nil + }) +} + +// validate compiles all given regular expressions strings and finds all matches in the given text. +func (v *TextValidation) validate(text []byte, validationFunc func([][]byte) error) error { + var allErrs error + + for reString, description := range *v { + re, err := regexp.Compile(reString) + if err != nil { + allErrs = multierror.Append(allErrs, err) + continue + } + + matches := re.FindAll(text, -1) + if err := validationFunc(matches); err != nil { + allErrs = multierror.Append(allErrs, fmt.Errorf("RegExp %s validation failed: %s: %w", reString, description, err)) + } + } + + return allErrs +} diff --git a/vendor/github.com/gardener/gardener/test/framework/worker_utils.go b/vendor/github.com/gardener/gardener/test/framework/worker_utils.go new file mode 100644 index 00000000..39ada09e --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/framework/worker_utils.go @@ -0,0 +1,169 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package framework + +import ( + "fmt" + "strings" + + "k8s.io/utils/strings/slices" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + "github.com/gardener/gardener/pkg/utils" +) + +// setShootWorkerSettings sets the Shoot's worker settings from the given config +func setShootWorkerSettings(shoot *gardencorev1beta1.Shoot, cfg *ShootCreationConfig, cloudProfile *gardencorev1beta1.CloudProfile) error { + if StringSet(cfg.workersConfig) { + workers, err := ParseFileAsWorkers(cfg.workersConfig) + if err != nil { + return err + } + shoot.Spec.Provider.Workers = workers + } else { + if err := SetupShootWorker(shoot, cloudProfile, cfg.workerZone); err != nil { + return err + } + } + + if StringSet(cfg.shootMachineType) { + for i := range shoot.Spec.Provider.Workers { + shoot.Spec.Provider.Workers[i].Machine.Type = cfg.shootMachineType + } + } + + if StringSet(cfg.shootMachineImageName) { + for i := range shoot.Spec.Provider.Workers { + shoot.Spec.Provider.Workers[i].Machine.Image.Name = cfg.shootMachineImageName + } + } + + if StringSet(cfg.shootMachineImageVersion) { + for i := range shoot.Spec.Provider.Workers { + shoot.Spec.Provider.Workers[i].Machine.Image.Version = &cfg.shootMachineImageVersion + } + } + + return nil +} + +// SetupShootWorker prepares the Shoot with one worker with provider specific volume. Clears the currently configured workers. +func SetupShootWorker(shoot *gardencorev1beta1.Shoot, cloudProfile *gardencorev1beta1.CloudProfile, workerZone string) error { + if len(cloudProfile.Spec.MachineImages) < 1 { + return fmt.Errorf("at least one different machine image has to be defined in the CloudProfile") + } + + // clear current workers + shoot.Spec.Provider.Workers = []gardencorev1beta1.Worker{} + + return AddWorker(shoot, cloudProfile, workerZone) +} + +// AddWorker adds a valid default worker to the shoot for the given machineImage and CloudProfile. +func AddWorker(shoot *gardencorev1beta1.Shoot, cloudProfile *gardencorev1beta1.CloudProfile, workerZone string) error { + if len(cloudProfile.Spec.MachineTypes) == 0 { + return fmt.Errorf("no MachineTypes configured in the Cloudprofile '%s'", cloudProfile.Name) + } + + machineType := cloudProfile.Spec.MachineTypes[0] + + //select first machine type of CPU architecture amd64 + for _, machine := range cloudProfile.Spec.MachineTypes { + if *machine.Architecture == v1beta1constants.ArchitectureAMD64 { + machineType = machine + break + } + } + + if *machineType.Architecture != v1beta1constants.ArchitectureAMD64 { + return fmt.Errorf("no MachineTypes of architecture amd64 configured in the Cloudprofile '%s'", cloudProfile.Name) + } + + machineImage := firstMachineImageWithAMDSupport(cloudProfile.Spec.MachineImages) + + if machineImage == nil { + return fmt.Errorf("no MachineImage that supports architecture amd64 configured in the Cloudprofile '%s'", cloudProfile.Name) + } + + qualifyingVersionFound, shootMachineImage, err := helper.GetLatestQualifyingShootMachineImage(*machineImage) + if err != nil { + return err + } + + if !qualifyingVersionFound { + return fmt.Errorf("could not add worker. No latest qualifying Shoot machine image could be determined for machine image %q. Make sure the machine image in the CloudProfile has at least one version that is not expired and not in preview", machineImage.Name) + } + + workerName, err := generateRandomWorkerName(fmt.Sprintf("%s-", shootMachineImage.Name)) + if err != nil { + return err + } + + shoot.Spec.Provider.Workers = append(shoot.Spec.Provider.Workers, gardencorev1beta1.Worker{ + Name: workerName, + Maximum: 2, + Minimum: 2, + Machine: gardencorev1beta1.Machine{ + Type: machineType.Name, + Image: shootMachineImage, + }, + }) + + if machineType.Storage == nil && len(cloudProfile.Spec.VolumeTypes) > 0 { + shoot.Spec.Provider.Workers[0].Volume = &gardencorev1beta1.Volume{ + Type: &cloudProfile.Spec.VolumeTypes[0].Name, + VolumeSize: "35Gi", + } + } + + if StringSet(workerZone) { + // using one zone as default + shoot.Spec.Provider.Workers[0].Zones = []string{workerZone} + } + + return nil +} + +func generateRandomWorkerName(prefix string) (string, error) { + var length int + remainingCharacters := 15 - len(prefix) + if remainingCharacters > 0 { + length = remainingCharacters + } else { + prefix = WorkerNamePrefix + length = 15 - len(WorkerNamePrefix) + } + + randomString, err := utils.GenerateRandomString(length) + if err != nil { + return "", err + } + + return prefix + strings.ToLower(randomString), nil +} + +func firstMachineImageWithAMDSupport(machineImageFromCloudProfile []gardencorev1beta1.MachineImage) *gardencorev1beta1.MachineImage { + for _, machineImage := range machineImageFromCloudProfile { + for _, version := range machineImage.Versions { + if slices.Contains(version.Architectures, v1beta1constants.ArchitectureAMD64) { + return &machineImage + } + } + } + + return nil +} diff --git a/vendor/github.com/gardener/gardener/test/utils/access/adminkubeconfig.go b/vendor/github.com/gardener/gardener/test/utils/access/adminkubeconfig.go new file mode 100644 index 00000000..d40d2499 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/utils/access/adminkubeconfig.go @@ -0,0 +1,54 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package access + +import ( + "context" + + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + authenticationv1alpha1 "github.com/gardener/gardener/pkg/apis/authentication/v1alpha1" + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/client/kubernetes" +) + +// CreateShootClientFromAdminKubeconfig requests an admin kubeconfig and creates a shoot client. +func CreateShootClientFromAdminKubeconfig(ctx context.Context, gardenClient kubernetes.Interface, shoot *gardencorev1beta1.Shoot) (kubernetes.Interface, error) { + kubeconfig, err := RequestAdminKubeconfigForShoot(ctx, gardenClient, shoot, pointer.Int64(7200)) + if err != nil { + return nil, err + } + + return kubernetes.NewClientFromBytes( + kubeconfig, + kubernetes.WithClientOptions(client.Options{Scheme: kubernetes.ShootScheme}), + kubernetes.WithDisabledCachedClient(), + ) +} + +// RequestAdminKubeconfigForShoot requests an admin kubeconfig for the given shoot. +func RequestAdminKubeconfigForShoot(ctx context.Context, gardenClient kubernetes.Interface, shoot *gardencorev1beta1.Shoot, expirationSeconds *int64) ([]byte, error) { + adminKubeconfigRequest := &authenticationv1alpha1.AdminKubeconfigRequest{ + Spec: authenticationv1alpha1.AdminKubeconfigRequestSpec{ + ExpirationSeconds: expirationSeconds, + }, + } + if err := gardenClient.Client().SubResource("adminkubeconfig").Create(ctx, shoot, adminKubeconfigRequest); err != nil { + return nil, err + } + + return adminKubeconfigRequest.Status.Kubeconfig, nil +} diff --git a/vendor/github.com/gardener/gardener/test/utils/access/csr.go b/vendor/github.com/gardener/gardener/test/utils/access/csr.go new file mode 100644 index 00000000..b588c410 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/utils/access/csr.go @@ -0,0 +1,147 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package access + +import ( + "context" + "crypto/rand" + "crypto/x509/pkix" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/cert" + csrutil "k8s.io/client-go/util/certificate/csr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/controllerutils" + "github.com/gardener/gardener/pkg/utils" + "github.com/gardener/gardener/pkg/utils/flow" + secretsutils "github.com/gardener/gardener/pkg/utils/secrets" +) + +// labelsE2ETestCSRAccess is the set of labels added to all CSRs and ClusterRoleBindings for easy cleanup. +var labelsE2ETestCSRAccess = map[string]string{"e2e-test": "csr-access"} + +// CreateTargetClientFromCSR creates and approves a CSR in the shoot and creates a new target client from it. +// You should call CleanupObjectsFromCSRAccess to clean up the objects created by this function. +func CreateTargetClientFromCSR(ctx context.Context, targetClient kubernetes.Interface, commonName string) (kubernetes.Interface, error) { + // use fake key to avoid building complex retry/update logic + privateKey, err := secretsutils.FakeGenerateKey(rand.Reader, 4096) + if err != nil { + return nil, err + } + + csrData, err := cert.MakeCSR(privateKey, &pkix.Name{CommonName: commonName}, nil, nil) + if err != nil { + return nil, err + } + + reqName, reqUID, err := csrutil.RequestCertificate( + targetClient.Kubernetes(), + csrData, + commonName, + certificatesv1.KubeAPIServerClientSignerName, + pointer.Duration(3600*time.Second), + []certificatesv1.KeyUsage{ + certificatesv1.UsageDigitalSignature, + certificatesv1.UsageKeyEncipherment, + certificatesv1.UsageClientAuth, + }, + privateKey, + ) + if err != nil { + return nil, err + } + + csr := &certificatesv1.CertificateSigningRequest{} + if err = targetClient.Client().Get(ctx, client.ObjectKey{Name: reqName}, csr); err != nil { + return nil, err + } + + patch := client.MergeFrom(csr.DeepCopy()) + csr.Labels = utils.MergeStringMaps(csr.Labels, labelsE2ETestCSRAccess) + if err = targetClient.Client().Patch(ctx, csr, patch); err != nil { + return nil, err + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: commonName}} + if _, err = controllerutils.GetAndCreateOrMergePatch(ctx, targetClient.Client(), clusterRoleBinding, func() error { + clusterRoleBinding.Labels = utils.MergeStringMaps(clusterRoleBinding.Labels, labelsE2ETestCSRAccess) + clusterRoleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + } + clusterRoleBinding.Subjects = []rbacv1.Subject{{ + Kind: rbacv1.UserKind, + Name: commonName, + }} + return nil + }); err != nil { + return nil, err + } + + hasApprovedCondition := false + for _, condition := range csr.Status.Conditions { + if condition.Type == certificatesv1.CertificateApproved { + hasApprovedCondition = true + break + } + } + + if !hasApprovedCondition { + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Reason: "AutoApproved", + Message: "Auto approving test CertificateSigningRequest", + Status: corev1.ConditionTrue, + }) + if err := targetClient.Client().SubResource("approval").Update(ctx, csr); err != nil { + return nil, err + } + } + + certData, err := csrutil.WaitForCertificate(ctx, targetClient.Kubernetes(), reqName, reqUID) + if err != nil { + return nil, err + } + + r := targetClient.RESTConfig() + restConfig := &rest.Config{ + Host: r.Host, + TLSClientConfig: rest.TLSClientConfig{ + CAData: r.CAData, + CertData: certData, + KeyData: utils.EncodePrivateKey(privateKey), + }, + } + + return kubernetes.NewWithConfig(kubernetes.WithRESTConfig(restConfig), kubernetes.WithDisabledCachedClient()) +} + +// CleanupObjectsFromCSRAccess cleans up all objects in the target created by all calls to CreateTargetClientFromCSR. +func CleanupObjectsFromCSRAccess(ctx context.Context, targetClient kubernetes.Interface) error { + return flow.Parallel(func(ctx context.Context) error { + return targetClient.Client().DeleteAllOf(ctx, &certificatesv1.CertificateSigningRequest{}, client.MatchingLabels(labelsE2ETestCSRAccess)) + }, func(ctx context.Context) error { + return targetClient.Client().DeleteAllOf(ctx, &rbacv1.ClusterRoleBinding{}, client.MatchingLabels(labelsE2ETestCSRAccess)) + })(ctx) +} diff --git a/vendor/github.com/gardener/gardener/test/utils/access/serviceaccount.go b/vendor/github.com/gardener/gardener/test/utils/access/serviceaccount.go new file mode 100644 index 00000000..9fab2e91 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/utils/access/serviceaccount.go @@ -0,0 +1,175 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package access + +import ( + "context" + "fmt" + "time" + + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/controllerutils" + "github.com/gardener/gardener/pkg/utils" + "github.com/gardener/gardener/pkg/utils/flow" + "github.com/gardener/gardener/pkg/utils/retry" +) + +const namespaceE2ETestServiceAccountTokenAccess = metav1.NamespaceDefault + +// labelsE2ETestDynamicServiceAccountTokenAccess is the set of labels added to all ServiceAccounts and +// ClusterRoleBindings for easy cleanup. +var labelsE2ETestDynamicServiceAccountTokenAccess = map[string]string{"e2e-test": "serviceaccount-dynamic-access"} + +// CreateTargetClientFromDynamicServiceAccountToken creates a ServiceAccount, uses the kube-apiserver's TokenRequest API +// to request a token for it, and then creates a new target client from it. +// You should call CleanupObjectsFromDynamicServiceAccountTokenAccess to clean up the objects created by this function. +func CreateTargetClientFromDynamicServiceAccountToken(ctx context.Context, targetClient kubernetes.Interface, name string) (kubernetes.Interface, error) { + return createTargetClientFromServiceAccount(ctx, targetClient, name, labelsE2ETestDynamicServiceAccountTokenAccess, func(serviceAccount *corev1.ServiceAccount) (string, error) { + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{v1beta1constants.GardenerAudience}, + ExpirationSeconds: pointer.Int64(3600), + }, + } + + if err := targetClient.Client().SubResource("token").Create(ctx, serviceAccount, tokenRequest); err != nil { + return "", err + } + + return tokenRequest.Status.Token, nil + }) +} + +// CleanupObjectsFromDynamicServiceAccountTokenAccess cleans up all objects in the target created by all calls to +// CreateTargetClientFromDynamicServiceAccountToken. +func CleanupObjectsFromDynamicServiceAccountTokenAccess(ctx context.Context, targetClient kubernetes.Interface) error { + return flow.Parallel(func(ctx context.Context) error { + return targetClient.Client().DeleteAllOf(ctx, &corev1.ServiceAccount{}, client.InNamespace(namespaceE2ETestServiceAccountTokenAccess), client.MatchingLabels(labelsE2ETestDynamicServiceAccountTokenAccess)) + }, func(ctx context.Context) error { + return targetClient.Client().DeleteAllOf(ctx, &rbacv1.ClusterRoleBinding{}, client.MatchingLabels(labelsE2ETestDynamicServiceAccountTokenAccess)) + })(ctx) +} + +// labelsE2ETestStaticServiceAccountToken is the set of labels added to all ServiceAccounts, Secrets, and +// ClusterRoleBindings for easy cleanup. +var labelsE2ETestStaticServiceAccountToken = map[string]string{"e2e-test": "serviceaccount-static-access"} + +// CreateShootClientFromStaticServiceAccountToken creates a ServiceAccount, a corresponding static token secret (issued +// by kube-controller-manager), and then creates a new shoot client from it. +// You should call CleanupObjectsFromStaticServiceAccountTokenAccess to clean up the objects created by this function. +func CreateShootClientFromStaticServiceAccountToken(ctx context.Context, shootClient kubernetes.Interface, name string) (kubernetes.Interface, error) { + return createTargetClientFromServiceAccount(ctx, shootClient, name, labelsE2ETestStaticServiceAccountToken, func(serviceAccount *corev1.ServiceAccount) (string, error) { + secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: serviceAccount.Namespace}} + if _, err := controllerutils.GetAndCreateOrMergePatch(ctx, shootClient.Client(), secret, func() error { + secret.Labels = utils.MergeStringMaps(secret.Labels, labelsE2ETestStaticServiceAccountToken) + secret.Annotations = utils.MergeStringMaps(secret.Annotations, map[string]string{corev1.ServiceAccountNameKey: serviceAccount.Name}) + secret.Type = corev1.SecretTypeServiceAccountToken + return nil + }); err != nil { + return "", err + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + if err := retry.Until(timeoutCtx, 500*time.Millisecond, func(ctx context.Context) (bool, error) { + if err := shootClient.Client().Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { + return retry.SevereError(err) + } + + if len(secret.Data[corev1.ServiceAccountTokenKey]) == 0 { + return retry.MinorError(fmt.Errorf("token for secret %s not yet populated by kube-controller-manager", client.ObjectKeyFromObject(secret))) + } + + return retry.Ok() + }); err != nil { + return "", nil + } + + return string(secret.Data[corev1.ServiceAccountTokenKey]), nil + }) +} + +// CleanupObjectsFromStaticServiceAccountTokenAccess cleans up all objects in the shoot created by all calls to +// CreateShootClientFromStaticServiceAccountToken. +func CleanupObjectsFromStaticServiceAccountTokenAccess(ctx context.Context, shootClient kubernetes.Interface) error { + return flow.Parallel(func(ctx context.Context) error { + return shootClient.Client().DeleteAllOf(ctx, &corev1.ServiceAccount{}, client.InNamespace(namespaceE2ETestServiceAccountTokenAccess), client.MatchingLabels(labelsE2ETestStaticServiceAccountToken)) + }, func(ctx context.Context) error { + return shootClient.Client().DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(namespaceE2ETestServiceAccountTokenAccess), client.MatchingLabels(labelsE2ETestStaticServiceAccountToken)) + }, func(ctx context.Context) error { + return shootClient.Client().DeleteAllOf(ctx, &rbacv1.ClusterRoleBinding{}, client.MatchingLabels(labelsE2ETestStaticServiceAccountToken)) + })(ctx) +} + +func createTargetClientFromServiceAccount( + ctx context.Context, + targetClient kubernetes.Interface, + name string, + labels map[string]string, + getTokenForServiceAccount func(*corev1.ServiceAccount) (string, error), +) ( + kubernetes.Interface, + error, +) { + serviceAccount := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespaceE2ETestServiceAccountTokenAccess}} + if _, err := controllerutils.GetAndCreateOrMergePatch(ctx, targetClient.Client(), serviceAccount, func() error { + serviceAccount.Labels = utils.MergeStringMaps(serviceAccount.Labels, labels) + return nil + }); err != nil { + return nil, err + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: name}} + if _, err := controllerutils.GetAndCreateOrMergePatch(ctx, targetClient.Client(), clusterRoleBinding, func() error { + clusterRoleBinding.Labels = utils.MergeStringMaps(serviceAccount.Labels, labels) + clusterRoleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + } + clusterRoleBinding.Subjects = []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccount.Name, + Namespace: serviceAccount.Namespace, + }} + return nil + }); err != nil { + return nil, err + } + + token, err := getTokenForServiceAccount(serviceAccount) + if err != nil { + return nil, err + } + + r := targetClient.RESTConfig() + restConfig := &rest.Config{ + Host: r.Host, + TLSClientConfig: rest.TLSClientConfig{CAData: r.CAData}, + BearerToken: token, + } + + return kubernetes.NewWithConfig(kubernetes.WithRESTConfig(restConfig), kubernetes.WithDisabledCachedClient()) +} diff --git a/vendor/github.com/gardener/gardener/test/utils/access/statictokenkubeconfig.go b/vendor/github.com/gardener/gardener/test/utils/access/statictokenkubeconfig.go new file mode 100644 index 00000000..5fffc883 --- /dev/null +++ b/vendor/github.com/gardener/gardener/test/utils/access/statictokenkubeconfig.go @@ -0,0 +1,30 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package access + +import ( + "context" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/client/kubernetes" + gardenerutils "github.com/gardener/gardener/pkg/utils/gardener" +) + +// CreateShootClientFromStaticTokenKubeconfig retrieves the static token kubeconfig secret and creates a shoot client. +func CreateShootClientFromStaticTokenKubeconfig(ctx context.Context, gardenClient kubernetes.Interface, shoot *gardencorev1beta1.Shoot) (kubernetes.Interface, error) { + return kubernetes.NewClientFromSecret(ctx, gardenClient.Client(), shoot.Namespace, gardenerutils.ComputeShootProjectSecretName(shoot.Name, "kubeconfig"), + kubernetes.WithDisabledCachedClient(), + ) +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 00000000..042091d9 --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000..52ccb5a9 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000..ea6524dd --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000..23c6e26c --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000..e6179f65 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 00000000..7a3ead17 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,494 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + CMPW $62, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go new file mode 100644 index 00000000..7082b349 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000..2f672be5 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000..7f236570 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 00000000..f8d54adf --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go new file mode 100644 index 00000000..107c1e71 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000..296d7f0b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000..ece692ea --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/mholt/archiver/.gitignore b/vendor/github.com/mholt/archiver/.gitignore new file mode 100644 index 00000000..ac8f8b25 --- /dev/null +++ b/vendor/github.com/mholt/archiver/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +_gitignore +builds/ +*.test +cmd/archiver/archiver diff --git a/vendor/github.com/mholt/archiver/.travis.yml b/vendor/github.com/mholt/archiver/.travis.yml new file mode 100644 index 00000000..7a8f349a --- /dev/null +++ b/vendor/github.com/mholt/archiver/.travis.yml @@ -0,0 +1,21 @@ +language: go + +go: + - 1.x + +env: + - CGO_ENABLED=0 + +install: + - go get -t ./... + - go get golang.org/x/lint/golint + - go get github.com/gordonklaus/ineffassign + +script: + - diff <(echo -n) <(gofmt -s -d .) + - ineffassign . + - go vet ./... + - go test ./... + +after_script: + - golint ./... diff --git a/vendor/github.com/mholt/archiver/LICENSE b/vendor/github.com/mholt/archiver/LICENSE new file mode 100644 index 00000000..315d04f2 --- /dev/null +++ b/vendor/github.com/mholt/archiver/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Matthew Holt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/mholt/archiver/README.md b/vendor/github.com/mholt/archiver/README.md new file mode 100644 index 00000000..85e6db99 --- /dev/null +++ b/vendor/github.com/mholt/archiver/README.md @@ -0,0 +1,255 @@ +archiver [![archiver GoDoc](https://img.shields.io/badge/reference-godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/mholt/archiver) [![Linux Build Status](https://img.shields.io/travis/mholt/archiver.svg?style=flat-square&label=linux+build)](https://travis-ci.org/mholt/archiver) [![Windows Build Status](https://img.shields.io/appveyor/ci/mholt/archiver.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/mholt/archiver) +======== + +Introducing **Archiver 3.1** - a cross-platform, multi-format archive utility and Go library. A powerful and flexible library meets an elegant CLI in this generic replacement for several of platform-specific, format-specific archive utilities. + +## Features + +Package archiver makes it trivially easy to make and extract common archive formats such as zip and tarball (and its compressed variants). Simply name the input and output file(s). The `arc` command runs the same on all platforms and has no external dependencies (not even libc). It is powered by the Go standard library and several third-party, pure-Go libraries. + +Files are put into the root of the archive; directories are recursively added, preserving structure. + +- Make whole archives from a list of files +- Open whole archives to a folder +- Extract specific files/folders from archives +- Stream files in and out of archives without needing actual files on disk +- Traverse archive contents without loading them +- Compress files +- Decompress files +- Streaming compression and decompression +- Several archive and compression formats supported + +### Format-dependent features + +- Optionally create a top-level folder to avoid littering a directory or archive root with files +- Toggle overwrite existing files +- Adjust compression level +- Zip: store (not compress) already-compressed files +- Make all necessary directories +- Open password-protected RAR archives +- Optionally continue with other files after an error + +### Supported archive formats + +- .zip +- .tar +- .tar.gz or .tgz +- .tar.bz2 or .tbz2 +- .tar.xz or .txz +- .tar.lz4 or .tlz4 +- .tar.sz or .tsz +- .rar (open only) + +### Supported compression formats + +- bzip2 +- gzip +- lz4 +- snappy (sz) +- xz + + +## Install + +```bash +go get -u github.com/mholt/archiver/cmd/arc +``` + +Or download binaries from the [releases](https://github.com/mholt/archiver/releases) page. + + +## Command Use + +### Make new archive + +```bash +# Syntax: arc archive [archive name] [input files...] + +$ arc archive test.tar.gz file1.txt images/file2.jpg folder/subfolder +``` + +(At least one input file is required.) + +### Extract entire archive + +```bash +# Syntax: arc unarchive [archive name] [destination] + +$ arc unarchive test.tar.gz +``` + +(The destination path is optional; default is current directory.) + +The archive name must end with a supported file extension—this is how it knows what kind of archive to make. Run `arc help` for more help. + +### List archive contents + +```bash +# Syntax: arc ls [archive name] + +$ arc ls caddy_dist.tar.gz +drwxr-xr-x matt staff 0 2018-09-19 15:47:18 -0600 MDT dist/ +-rw-r--r-- matt staff 6148 2017-08-07 18:34:22 -0600 MDT dist/.DS_Store +-rw-r--r-- matt staff 22481 2018-09-19 15:47:18 -0600 MDT dist/CHANGES.txt +-rw-r--r-- matt staff 17189 2018-09-19 15:47:18 -0600 MDT dist/EULA.txt +-rw-r--r-- matt staff 25261 2016-03-07 16:32:00 -0700 MST dist/LICENSES.txt +-rw-r--r-- matt staff 1017 2018-09-19 15:47:18 -0600 MDT dist/README.txt +-rw-r--r-- matt staff 288 2016-03-21 11:52:38 -0600 MDT dist/gitcookie.sh.enc +... +``` + +### Extract a specific file or folder from an archive + +```bash +# Syntax: arc extract [archive name] [path in archive] [destination on disk] + +$ arc extract test.tar.gz foo/hello.txt extracted/hello.txt +``` + +### Compress a single file + +```bash +# Syntax: arc compress [input file] [output file] + +$ arc compress test.txt compressed_test.txt.gz +$ arc compress test.txt gz +``` + +For convenience, the output file (second argument) may simply be a compression format (without leading dot), in which case the output filename will be the same as the input filename but with the format extension appended, and the input file will be deleted if successful. + +### Decompress a single file + +```bash +# Syntax: arc decompress [input file] [output file] + +$ arc decompress test.txt.gz original_test.txt +$ arc decompress test.txt.gz +``` + +For convenience, the output file (second argument) may be omitted. In that case, the output filename will have the same name as the input filename, but with the compression extension stripped from the end; and the input file will be deleted if successful. + +### Flags + +Flags are specified before the subcommand. Use `arc help` or `arc -h` to get usage help and a description of flags with their default values. + +## Library Use + +The archiver package allows you to easily create and open archives, walk their contents, extract specific files, compress and decompress files, and even stream archives in and out using pure io.Reader and io.Writer interfaces, without ever needing to touch the disk. + +```go +import "github.com/mholt/archiver" +``` + +[See the package's GoDoc](https://godoc.org/github.com/mholt/archiver) for full API documentation. + +For example, creating or unpacking an archive file: + +```go +err := archiver.Archive([]string{"testdata", "other/file.txt"}, "test.zip") +// ... +err = archiver.Unarchive("test.tar.gz", "test") +``` + +The archive format is determined by file extension. (There are [several functions in this package](https://godoc.org/github.com/mholt/archiver) which perform a task by inferring the format from file extension or file header, including `Archive()`, `Unarchive()`, `CompressFile()`, and `DecompressFile()`.) + +To configure the archiver used or perform, create an instance of the format's type: + +```go +z := archiver.Zip{ + CompressionLevel: flate.DefaultCompression, + MkdirAll: true, + SelectiveCompression: true, + ContinueOnError: false, + OverwriteExisting: false, + ImplicitTopLevelFolder: false, +} + +err := z.Archive([]string{"testdata", "other/file.txt"}, "/Users/matt/Desktop/test.zip") +``` + +Inspecting an archive: + +```go +err = z.Walk("/Users/matt/Desktop/test.zip", func(f archiver.File) error { + zfh, ok := f.Header.(zip.FileHeader) + if ok { + fmt.Println("Filename:", zfh.Name) + } + return nil +}) +``` + +Streaming files into an archive that is being written to the HTTP response: + +```go +err = z.Create(responseWriter) +if err != nil { + return err +} +defer z.Close() + +for _, fname := range filenames { + info, err := os.Stat(fname) + if err != nil { + return err + } + + // get file's name for the inside of the archive + internalName, err := archiver.NameInArchive(info, fname, fname) + if err != nil { + return err + } + + // open the file + file, err := os.Open(f) + if err != nil { + return err + } + + // write it to the archive + err = z.Write(archiver.File{ + FileInfo: archiver.FileInfo{ + FileInfo: info, + CustomName: internalName, + }, + ReadCloser: file, + }) + file.Close() + if err != nil { + return err + } +} +``` + +The `archiver.File` type allows you to use actual files with archives, or to mimic files when you only have streams. + +There's a lot more that can be done, too. [See the GoDoc](https://godoc.org/github.com/mholt/archiver) for full API documentation. + +**Security note: This package does NOT attempt to mitigate zip-slip attacks.** It is [extremely difficult](https://github.com/rubyzip/rubyzip/pull/376) [to do properly](https://github.com/mholt/archiver/pull/65#issuecomment-395988244) and [seemingly impossible to mitigate effectively across platforms](https://github.com/golang/go/issues/20126). [Attempted fixes have broken processing of legitimate files in production](https://github.com/mholt/archiver/pull/70#issuecomment-423267320), rendering the program unusable. Our recommendation instead is to inspect the contents of an untrusted archive before extracting it (this package provides `Walkers`) and decide if you want to proceed with extraction. + + +## Project Values + +This project has a few principle-based goals that guide its development: + +- **Do our thing really well.** Our thing is creating, opening, inspecting, compressing, and streaming archive files. It is not meant to be a replacement for specific archive format tools like tar, zip, etc. that have lots of features and customizability. (Some customizability is OK, but not to the extent that it becomes overly complicated or error-prone.) + +- **Have good tests.** Changes should be covered by tests. + +- **Limit dependencies.** Keep the package lightweight. + +- **Pure Go.** This means no cgo or other external/system dependencies. This package should be able to stand on its own and cross-compile easily to any platform -- and that includes its library dependencies. + +- **Idiomatic Go.** Keep interfaces small, variable names semantic, vet shows no errors, the linter is generally quiet, etc. + +- **Be elegant.** This package should be elegant to use and its code should be elegant when reading and testing. If it doesn't feel good, fix it up. + +- **Well-documented.** Use comments prudently; explain why non-obvious code is necessary (and use tests to enforce it). Keep the docs updated, and have examples where helpful. + +- **Keep it efficient.** This often means keep it simple. Fast code is valuable. + +- **Consensus.** Contributions should ideally be approved by multiple reviewers before being merged. Generally, avoid merging multi-chunk changes that do not go through at least one or two iterations/reviews. Except for trivial changes, PRs are seldom ready to merge right away. + +- **Have fun contributing.** Coding is awesome! + +We welcome contributions and appreciate your efforts! However, please open issues to discuss any changes before spending the time preparing a pull request. This will save time, reduce frustration, and help coordinate the work. Thank you! diff --git a/vendor/github.com/mholt/archiver/appveyor.yml b/vendor/github.com/mholt/archiver/appveyor.yml new file mode 100644 index 00000000..2a6d2d8f --- /dev/null +++ b/vendor/github.com/mholt/archiver/appveyor.yml @@ -0,0 +1,31 @@ +version: "{build}" + +clone_folder: c:\gopath\src\github.com\mholt\archiver + +environment: + GOPATH: c:\gopath + CGO_ENABLED: 0 + +stack: go 1.11 + +install: + - go get ./... + - go get golang.org/x/lint/golint + - go get github.com/gordonklaus/ineffassign + - set PATH=%GOPATH%\bin;%PATH% + +build: off + +before_test: + - go version + - go env + +test_script: + - go vet ./... + - go test ./... + - ineffassign . + +after_test: + - golint ./... + +deploy: off diff --git a/vendor/github.com/mholt/archiver/archiver.go b/vendor/github.com/mholt/archiver/archiver.go new file mode 100644 index 00000000..5cfdc78e --- /dev/null +++ b/vendor/github.com/mholt/archiver/archiver.go @@ -0,0 +1,498 @@ +// Package archiver facilitates convenient, cross-platform, high-level archival +// and compression operations for a variety of formats and compression algorithms. +// +// This package and its dependencies are written in pure Go (not cgo) and +// have no external dependencies, so they should run on all major platforms. +// (It also comes with a command for CLI use in the cmd/arc folder.) +// +// Each supported format or algorithm has a unique type definition that +// implements the interfaces corresponding to the tasks they perform. For +// example, the Tar type implements Reader, Writer, Archiver, Unarchiver, +// Walker, and several other interfaces. +// +// The most common functions are implemented at the package level for +// convenience: Archive, Unarchive, Walk, Extract, CompressFile, and +// DecompressFile. With these, the format type is chosen implicitly, +// and a sane default configuration is used. +// +// To customize a format's configuration, create an instance of its struct +// with its fields set to the desired values. You can also use and customize +// the handy Default* (replace the wildcard with the format's type name) +// for a quick, one-off instance of the format's type. +// +// To obtain a new instance of a format's struct with the default config, use +// the provided New*() functions. This is not required, however. An empty +// struct of any type, for example &Zip{} is perfectly valid, so you may +// create the structs manually, too. The examples on this page show how +// either may be done. +// +// See the examples in this package for an idea of how to wield this package +// for common tasks. Most of the examples which are specific to a certain +// format type, for example Zip, can be applied to other types that implement +// the same interfaces. For example, using Zip is very similar to using Tar +// or TarGz (etc), and using Gz is very similar to using Sz or Xz (etc). +// +// When creating archives or compressing files using a specific instance of +// the format's type, the name of the output file MUST match that of the +// format, to prevent confusion later on. If you absolutely need a different +// file extension, you may rename the file afterward. +// +// Values in this package are NOT safe for concurrent use. There is no +// performance benefit of reusing them, and since they may contain important +// state (especially while walking, reading, or writing), it is NOT +// recommended to reuse values from this package or change their configuration +// after they are in use. +package archiver + +import ( + "fmt" + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +// Archiver is a type that can create an archive file +// from a list of source file names. +type Archiver interface { + ExtensionChecker + + // Archive adds all the files or folders in sources + // to an archive to be created at destination. Files + // are added to the root of the archive, and directories + // are walked and recursively added, preserving folder + // structure. + Archive(sources []string, destination string) error +} + +// ExtensionChecker validates file extensions +type ExtensionChecker interface { + CheckExt(name string) error +} + +// Unarchiver is a type that can extract archive files +// into a folder. +type Unarchiver interface { + Unarchive(source, destination string) error +} + +// Writer can write discrete byte streams of files to +// an output stream. +type Writer interface { + Create(out io.Writer) error + Write(f File) error + Close() error +} + +// Reader can read discrete byte streams of files from +// an input stream. +type Reader interface { + Open(in io.Reader, size int64) error + Read() (File, error) + Close() error +} + +// Extractor can extract a specific file from a source +// archive to a specific destination folder on disk. +type Extractor interface { + Extract(source, target, destination string) error +} + +// File provides methods for accessing information about +// or contents of a file within an archive. +type File struct { + os.FileInfo + + // The original header info; depends on + // type of archive -- could be nil, too. + Header interface{} + + // Allow the file contents to be read (and closed) + io.ReadCloser +} + +// FileInfo is an os.FileInfo but optionally with +// a custom name, useful if dealing with files that +// are not actual files on disk, or which have a +// different name in an archive than on disk. +type FileInfo struct { + os.FileInfo + CustomName string +} + +// Name returns fi.CustomName if not empty; +// otherwise it returns fi.FileInfo.Name(). +func (fi FileInfo) Name() string { + if fi.CustomName != "" { + return fi.CustomName + } + return fi.FileInfo.Name() +} + +// ReadFakeCloser is an io.Reader that has +// a no-op close method to satisfy the +// io.ReadCloser interface. +type ReadFakeCloser struct { + io.Reader +} + +// Close implements io.Closer. +func (rfc ReadFakeCloser) Close() error { return nil } + +// Walker can walk an archive file and return information +// about each item in the archive. +type Walker interface { + Walk(archive string, walkFn WalkFunc) error +} + +// WalkFunc is called at each item visited by Walk. +// If an error is returned, the walk may continue +// if the Walker is configured to continue on error. +// The sole exception is the error value ErrStopWalk, +// which stops the walk without an actual error. +type WalkFunc func(f File) error + +// ErrStopWalk signals Walk to break without error. +var ErrStopWalk = fmt.Errorf("walk stopped") + +// Compressor compresses to out what it reads from in. +// It also ensures a compatible or matching file extension. +type Compressor interface { + ExtensionChecker + Compress(in io.Reader, out io.Writer) error +} + +// Decompressor decompresses to out what it reads from in. +type Decompressor interface { + Decompress(in io.Reader, out io.Writer) error +} + +// Matcher is a type that can return whether the given +// file appears to match the implementation's format. +// Implementations should return the file's read position +// to where it was when the method was called. +type Matcher interface { + Match(io.ReadSeeker) (bool, error) +} + +// Archive creates an archive of the source files to a new file at destination. +// The archive format is chosen implicitly by file extension. +func Archive(sources []string, destination string) error { + aIface, err := ByExtension(destination) + if err != nil { + return err + } + a, ok := aIface.(Archiver) + if !ok { + return fmt.Errorf("format specified by destination filename is not an archive format: %s (%T)", destination, aIface) + } + return a.Archive(sources, destination) +} + +// Unarchive unarchives the given archive file into the destination folder. +// The archive format is selected implicitly. +func Unarchive(source, destination string) error { + uaIface, err := ByExtension(source) + if err != nil { + return err + } + u, ok := uaIface.(Unarchiver) + if !ok { + return fmt.Errorf("format specified by source filename is not an archive format: %s (%T)", source, uaIface) + } + return u.Unarchive(source, destination) +} + +// Walk calls walkFn for each file within the given archive file. +// The archive format is chosen implicitly. +func Walk(archive string, walkFn WalkFunc) error { + wIface, err := ByExtension(archive) + if err != nil { + return err + } + w, ok := wIface.(Walker) + if !ok { + return fmt.Errorf("format specified by archive filename is not a walker format: %s (%T)", archive, wIface) + } + return w.Walk(archive, walkFn) +} + +// Extract extracts a single file from the given source archive. If the target +// is a directory, the entire folder will be extracted into destination. The +// archive format is chosen implicitly. +func Extract(source, target, destination string) error { + eIface, err := ByExtension(source) + if err != nil { + return err + } + e, ok := eIface.(Extractor) + if !ok { + return fmt.Errorf("format specified by source filename is not an extractor format: %s (%T)", source, eIface) + } + return e.Extract(source, target, destination) +} + +// CompressFile is a convenience function to simply compress a file. +// The compression algorithm is selected implicitly based on the +// destination's extension. +func CompressFile(source, destination string) error { + cIface, err := ByExtension(destination) + if err != nil { + return err + } + c, ok := cIface.(Compressor) + if !ok { + return fmt.Errorf("format specified by destination filename is not a recognized compression algorithm: %s", destination) + } + return FileCompressor{Compressor: c}.CompressFile(source, destination) +} + +// DecompressFile is a convenience function to simply compress a file. +// The compression algorithm is selected implicitly based on the +// source's extension. +func DecompressFile(source, destination string) error { + cIface, err := ByExtension(source) + if err != nil { + return err + } + c, ok := cIface.(Decompressor) + if !ok { + return fmt.Errorf("format specified by source filename is not a recognized compression algorithm: %s", source) + } + return FileCompressor{Decompressor: c}.DecompressFile(source, destination) +} + +func fileExists(name string) bool { + _, err := os.Stat(name) + return !os.IsNotExist(err) +} + +func mkdir(dirPath string) error { + err := os.MkdirAll(dirPath, 0755) + if err != nil { + return fmt.Errorf("%s: making directory: %v", dirPath, err) + } + return nil +} + +func writeNewFile(fpath string, in io.Reader, fm os.FileMode) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + out, err := os.Create(fpath) + if err != nil { + return fmt.Errorf("%s: creating new file: %v", fpath, err) + } + defer out.Close() + + err = out.Chmod(fm) + if err != nil && runtime.GOOS != "windows" { + return fmt.Errorf("%s: changing file mode: %v", fpath, err) + } + + _, err = io.Copy(out, in) + if err != nil { + return fmt.Errorf("%s: writing file: %v", fpath, err) + } + return nil +} + +func writeNewSymbolicLink(fpath string, target string) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + err = os.Symlink(target, fpath) + if err != nil { + return fmt.Errorf("%s: making symbolic link for: %v", fpath, err) + } + + return nil +} + +func writeNewHardLink(fpath string, target string) error { + err := os.MkdirAll(filepath.Dir(fpath), 0755) + if err != nil { + return fmt.Errorf("%s: making directory for file: %v", fpath, err) + } + + err = os.Link(target, fpath) + if err != nil { + return fmt.Errorf("%s: making hard link for: %v", fpath, err) + } + + return nil +} + +// within returns true if sub is within or equal to parent. +func within(parent, sub string) bool { + rel, err := filepath.Rel(parent, sub) + if err != nil { + return false + } + return !strings.Contains(rel, "..") +} + +// multipleTopLevels returns true if the paths do not +// share a common top-level folder. +func multipleTopLevels(paths []string) bool { + if len(paths) < 2 { + return false + } + var lastTop string + for _, p := range paths { + p = strings.TrimPrefix(strings.Replace(p, `\`, "/", -1), "/") + for { + next := path.Dir(p) + if next == "." { + break + } + p = next + } + if lastTop == "" { + lastTop = p + } + if p != lastTop { + return true + } + } + return false +} + +// folderNameFromFileName returns a name for a folder +// that is suitable based on the filename, which will +// be stripped of its extensions. +func folderNameFromFileName(filename string) string { + base := filepath.Base(filename) + firstDot := strings.Index(base, ".") + if firstDot > -1 { + return base[:firstDot] + } + return base +} + +// makeNameInArchive returns the filename for the file given by fpath to be used within +// the archive. sourceInfo is the FileInfo obtained by calling os.Stat on source, and baseDir +// is an optional base directory that becomes the root of the archive. fpath should be the +// unaltered file path of the file given to a filepath.WalkFunc. +func makeNameInArchive(sourceInfo os.FileInfo, source, baseDir, fpath string) (string, error) { + name := filepath.Base(fpath) // start with the file or dir name + if sourceInfo.IsDir() { + // preserve internal directory structure; that's the path components + // between the source directory's leaf and this file's leaf + dir, err := filepath.Rel(filepath.Dir(source), filepath.Dir(fpath)) + if err != nil { + return "", err + } + // prepend the internal directory structure to the leaf name, + // and convert path separators to forward slashes as per spec + name = path.Join(filepath.ToSlash(dir), name) + } + return path.Join(baseDir, name), nil // prepend the base directory +} + +// NameInArchive returns a name for the file at fpath suitable for +// the inside of an archive. The source and its associated sourceInfo +// is the path where walking a directory started, and if no directory +// was walked, source may == fpath. The returned name is essentially +// the components of the path between source and fpath, preserving +// the internal directory structure. +func NameInArchive(sourceInfo os.FileInfo, source, fpath string) (string, error) { + return makeNameInArchive(sourceInfo, source, "", fpath) +} + +// ByExtension returns an archiver and unarchiver, or compressor +// and decompressor, based on the extension of the filename. +func ByExtension(filename string) (interface{}, error) { + var ec interface{} + for _, c := range extCheckers { + if err := c.CheckExt(filename); err == nil { + ec = c + break + } + } + switch ec.(type) { + case *Rar: + return NewRar(), nil + case *Tar: + return NewTar(), nil + case *TarBz2: + return NewTarBz2(), nil + case *TarGz: + return NewTarGz(), nil + case *TarLz4: + return NewTarLz4(), nil + case *TarSz: + return NewTarSz(), nil + case *TarXz: + return NewTarXz(), nil + case *Zip: + return NewZip(), nil + case *Gz: + return NewGz(), nil + case *Bz2: + return NewBz2(), nil + case *Lz4: + return NewBz2(), nil + case *Snappy: + return NewSnappy(), nil + case *Xz: + return NewXz(), nil + } + return nil, fmt.Errorf("format unrecognized by filename: %s", filename) +} + +// ByHeader returns the unarchiver value that matches the input's +// file header. It does not affect the current read position. +func ByHeader(input io.ReadSeeker) (Unarchiver, error) { + var matcher Matcher + for _, m := range matchers { + ok, err := m.Match(input) + if err != nil { + return nil, fmt.Errorf("matching on format %s: %v", m, err) + } + if ok { + matcher = m + break + } + } + switch matcher.(type) { + case *Zip: + return NewZip(), nil + case *Tar: + return NewTar(), nil + case *Rar: + return NewRar(), nil + } + return nil, fmt.Errorf("format unrecognized") +} + +// extCheckers is a list of the format implementations +// that can check extensions. Only to be used for +// checking extensions - not any archival operations. +var extCheckers = []ExtensionChecker{ + &TarBz2{}, + &TarGz{}, + &TarLz4{}, + &TarSz{}, + &TarXz{}, + &Rar{}, + &Tar{}, + &Zip{}, + &Gz{}, + &Bz2{}, + &Lz4{}, + &Snappy{}, + &Xz{}, +} + +var matchers = []Matcher{ + &Rar{}, + &Tar{}, + &Zip{}, +} diff --git a/vendor/github.com/mholt/archiver/build.bash b/vendor/github.com/mholt/archiver/build.bash new file mode 100644 index 00000000..bc2c3d4a --- /dev/null +++ b/vendor/github.com/mholt/archiver/build.bash @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -ex + +# This script builds archiver for most common platforms. + +export CGO_ENABLED=0 + +cd cmd/arc +GOOS=linux GOARCH=386 go build -o ../../builds/arc_linux_386 +GOOS=linux GOARCH=amd64 go build -o ../../builds/arc_linux_amd64 +GOOS=linux GOARCH=arm go build -o ../../builds/arc_linux_arm7 +GOOS=linux GOARCH=arm64 go build -o ../../builds/arc_linux_arm64 +GOOS=darwin GOARCH=amd64 go build -o ../../builds/arc_mac_amd64 +GOOS=windows GOARCH=amd64 go build -o ../../builds/arc_windows_amd64.exe +GOOS=freebsd GOARCH=amd64 go build -o ../../builds/arc_freebsd_amd64 +GOOS=openbsd GOARCH=amd64 go build -o ../../builds/arc_openbsd_amd64 +cd ../.. diff --git a/vendor/github.com/mholt/archiver/bz2.go b/vendor/github.com/mholt/archiver/bz2.go new file mode 100644 index 00000000..2eb4ac2b --- /dev/null +++ b/vendor/github.com/mholt/archiver/bz2.go @@ -0,0 +1,64 @@ +package archiver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/dsnet/compress/bzip2" +) + +// Bz2 facilitates bzip2 compression. +type Bz2 struct { + CompressionLevel int +} + +// Compress reads in, compresses it, and writes it to out. +func (bz *Bz2) Compress(in io.Reader, out io.Writer) error { + w, err := bzip2.NewWriter(out, &bzip2.WriterConfig{ + Level: bz.CompressionLevel, + }) + if err != nil { + return err + } + defer w.Close() + _, err = io.Copy(w, in) + return err +} + +// Decompress reads in, decompresses it, and writes it to out. +func (bz *Bz2) Decompress(in io.Reader, out io.Writer) error { + r, err := bzip2.NewReader(in, nil) + if err != nil { + return err + } + defer r.Close() + _, err = io.Copy(out, r) + return err +} + +// CheckExt ensures the file extension matches the format. +func (bz *Bz2) CheckExt(filename string) error { + if filepath.Ext(filename) != ".bz2" { + return fmt.Errorf("filename must have a .bz2 extension") + } + return nil +} + +func (bz *Bz2) String() string { return "bz2" } + +// NewBz2 returns a new, default instance ready to be customized and used. +func NewBz2() *Bz2 { + return &Bz2{ + CompressionLevel: bzip2.DefaultCompression, + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Compressor(new(Bz2)) + _ = Decompressor(new(Bz2)) +) + +// DefaultBz2 is a default instance that is conveniently ready to use. +var DefaultBz2 = NewBz2() diff --git a/vendor/github.com/mholt/archiver/filecompressor.go b/vendor/github.com/mholt/archiver/filecompressor.go new file mode 100644 index 00000000..ab1fd3b8 --- /dev/null +++ b/vendor/github.com/mholt/archiver/filecompressor.go @@ -0,0 +1,67 @@ +package archiver + +import ( + "fmt" + "os" +) + +// FileCompressor can compress and decompress single files. +type FileCompressor struct { + Compressor + Decompressor + + // Whether to overwrite existing files when creating files. + OverwriteExisting bool +} + +// CompressFile reads the source file and compresses it to destination. +// The destination must have a matching extension. +func (fc FileCompressor) CompressFile(source, destination string) error { + if err := fc.CheckExt(destination); err != nil { + return err + } + if fc.Compressor == nil { + return fmt.Errorf("no compressor specified") + } + if !fc.OverwriteExisting && fileExists(destination) { + return fmt.Errorf("file exists: %s", destination) + } + + in, err := os.Open(source) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(destination) + if err != nil { + return err + } + defer out.Close() + + return fc.Compress(in, out) +} + +// DecompressFile reads the source file and decompresses it to destination. +func (fc FileCompressor) DecompressFile(source, destination string) error { + if fc.Decompressor == nil { + return fmt.Errorf("no decompressor specified") + } + if !fc.OverwriteExisting && fileExists(destination) { + return fmt.Errorf("file exists: %s", destination) + } + + in, err := os.Open(source) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(destination) + if err != nil { + return err + } + defer out.Close() + + return fc.Decompress(in, out) +} diff --git a/vendor/github.com/mholt/archiver/gz.go b/vendor/github.com/mholt/archiver/gz.go new file mode 100644 index 00000000..73671745 --- /dev/null +++ b/vendor/github.com/mholt/archiver/gz.go @@ -0,0 +1,61 @@ +package archiver + +import ( + "compress/gzip" + "fmt" + "io" + "path/filepath" +) + +// Gz facilitates gzip compression. +type Gz struct { + CompressionLevel int +} + +// Compress reads in, compresses it, and writes it to out. +func (gz *Gz) Compress(in io.Reader, out io.Writer) error { + w, err := gzip.NewWriterLevel(out, gz.CompressionLevel) + if err != nil { + return err + } + defer w.Close() + _, err = io.Copy(w, in) + return err +} + +// Decompress reads in, decompresses it, and writes it to out. +func (gz *Gz) Decompress(in io.Reader, out io.Writer) error { + r, err := gzip.NewReader(in) + if err != nil { + return err + } + defer r.Close() + _, err = io.Copy(out, r) + return err +} + +// CheckExt ensures the file extension matches the format. +func (gz *Gz) CheckExt(filename string) error { + if filepath.Ext(filename) != ".gz" { + return fmt.Errorf("filename must have a .gz extension") + } + return nil +} + +func (gz *Gz) String() string { return "gz" } + +// NewGz returns a new, default instance ready to be customized and used. +func NewGz() *Gz { + return &Gz{ + CompressionLevel: gzip.DefaultCompression, + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Compressor(new(Gz)) + _ = Decompressor(new(Gz)) +) + +// DefaultGz is a default instance that is conveniently ready to use. +var DefaultGz = NewGz() diff --git a/vendor/github.com/mholt/archiver/lz4.go b/vendor/github.com/mholt/archiver/lz4.go new file mode 100644 index 00000000..daff631d --- /dev/null +++ b/vendor/github.com/mholt/archiver/lz4.go @@ -0,0 +1,56 @@ +package archiver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/pierrec/lz4" +) + +// Lz4 facilitates LZ4 compression. +type Lz4 struct { + CompressionLevel int +} + +// Compress reads in, compresses it, and writes it to out. +func (lz *Lz4) Compress(in io.Reader, out io.Writer) error { + w := lz4.NewWriter(out) + w.Header.CompressionLevel = lz.CompressionLevel + defer w.Close() + _, err := io.Copy(w, in) + return err +} + +// Decompress reads in, decompresses it, and writes it to out. +func (lz *Lz4) Decompress(in io.Reader, out io.Writer) error { + r := lz4.NewReader(in) + _, err := io.Copy(out, r) + return err +} + +// CheckExt ensures the file extension matches the format. +func (lz *Lz4) CheckExt(filename string) error { + if filepath.Ext(filename) != ".lz4" { + return fmt.Errorf("filename must have a .lz4 extension") + } + return nil +} + +func (lz *Lz4) String() string { return "lz4" } + +// NewLz4 returns a new, default instance ready to be customized and used. +func NewLz4() *Lz4 { + return &Lz4{ + CompressionLevel: 9, // https://github.com/lz4/lz4/blob/1b819bfd633ae285df2dfe1b0589e1ec064f2873/lib/lz4hc.h#L48 + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Compressor(new(Lz4)) + _ = Decompressor(new(Lz4)) +) + +// DefaultLz4 is a default instance that is conveniently ready to use. +var DefaultLz4 = NewLz4() diff --git a/vendor/github.com/mholt/archiver/rar.go b/vendor/github.com/mholt/archiver/rar.go new file mode 100644 index 00000000..62fb900b --- /dev/null +++ b/vendor/github.com/mholt/archiver/rar.go @@ -0,0 +1,390 @@ +package archiver + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/nwaples/rardecode" +) + +// Rar provides facilities for reading RAR archives. +// See https://www.rarlab.com/technote.htm. +type Rar struct { + // Whether to overwrite existing files; if false, + // an error is returned if the file exists. + OverwriteExisting bool + + // Whether to make all the directories necessary + // to create a rar archive in the desired path. + MkdirAll bool + + // A single top-level folder can be implicitly + // created by the Unarchive method if the files + // to be extracted from the archive do not all + // have a common root. This roughly mimics the + // behavior of archival tools integrated into OS + // file browsers which create a subfolder to + // avoid unexpectedly littering the destination + // folder with potentially many files, causing a + // problematic cleanup/organization situation. + // This feature is available for both creation + // and extraction of archives, but may be slightly + // inefficient with lots and lots of files, + // especially on extraction. + ImplicitTopLevelFolder bool + + // If true, errors encountered during reading + // or writing a single file will be logged and + // the operation will continue on remaining files. + ContinueOnError bool + + // The password to open archives (optional). + Password string + + rr *rardecode.Reader // underlying stream reader + rc *rardecode.ReadCloser // supports multi-volume archives (files only) +} + +// CheckExt ensures the file extension matches the format. +func (*Rar) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".rar") { + return fmt.Errorf("filename must have a .rar extension") + } + return nil +} + +// Unarchive unpacks the .rar file at source to destination. +// Destination will be treated as a folder name. It supports +// multi-volume archives. +func (r *Rar) Unarchive(source, destination string) error { + if !fileExists(destination) && r.MkdirAll { + err := mkdir(destination) + if err != nil { + return fmt.Errorf("preparing destination: %v", err) + } + } + + // if the files in the archive do not all share a common + // root, then make sure we extract to a single subfolder + // rather than potentially littering the destination... + if r.ImplicitTopLevelFolder { + var err error + destination, err = r.addTopLevelFolder(source, destination) + if err != nil { + return fmt.Errorf("scanning source archive: %v", err) + } + } + + err := r.OpenFile(source) + if err != nil { + return fmt.Errorf("opening rar archive for reading: %v", err) + } + defer r.Close() + + for { + err := r.unrarNext(destination) + if err == io.EOF { + break + } + if err != nil { + if r.ContinueOnError { + log.Printf("[ERROR] Reading file in rar archive: %v", err) + continue + } + return fmt.Errorf("reading file in rar archive: %v", err) + } + } + + return nil +} + +// addTopLevelFolder scans the files contained inside +// the tarball named sourceArchive and returns a modified +// destination if all the files do not share the same +// top-level folder. +func (r *Rar) addTopLevelFolder(sourceArchive, destination string) (string, error) { + file, err := os.Open(sourceArchive) + if err != nil { + return "", fmt.Errorf("opening source archive: %v", err) + } + defer file.Close() + + rc, err := rardecode.NewReader(file, r.Password) + if err != nil { + return "", fmt.Errorf("creating archive reader: %v", err) + } + + var files []string + for { + hdr, err := rc.Next() + if err == io.EOF { + break + } + if err != nil { + return "", fmt.Errorf("scanning tarball's file listing: %v", err) + } + files = append(files, hdr.Name) + } + + if multipleTopLevels(files) { + destination = filepath.Join(destination, folderNameFromFileName(sourceArchive)) + } + + return destination, nil +} + +func (r *Rar) unrarNext(to string) error { + f, err := r.Read() + if err != nil { + return err // don't wrap error; calling loop must break on io.EOF + } + header, ok := f.Header.(*rardecode.FileHeader) + if !ok { + return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header) + } + return r.unrarFile(f, filepath.Join(to, header.Name)) +} + +func (r *Rar) unrarFile(f File, to string) error { + // do not overwrite existing files, if configured + if !f.IsDir() && !r.OverwriteExisting && fileExists(to) { + return fmt.Errorf("file already exists: %s", to) + } + + hdr, ok := f.Header.(*rardecode.FileHeader) + if !ok { + return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header) + } + + // if files come before their containing folders, then we must + // create their folders before writing the file + err := mkdir(filepath.Dir(to)) + if err != nil { + return fmt.Errorf("making parent directories: %v", err) + } + + return writeNewFile(to, r.rr, hdr.Mode()) +} + +// OpenFile opens filename for reading. This method supports +// multi-volume archives, whereas Open does not (but Open +// supports any stream, not just files). +func (r *Rar) OpenFile(filename string) error { + if r.rr != nil { + return fmt.Errorf("rar archive is already open for reading") + } + var err error + r.rc, err = rardecode.OpenReader(filename, r.Password) + if err != nil { + return err + } + r.rr = &r.rc.Reader + return nil +} + +// Open opens t for reading an archive from +// in. The size parameter is not used. +func (r *Rar) Open(in io.Reader, size int64) error { + if r.rr != nil { + return fmt.Errorf("rar archive is already open for reading") + } + var err error + r.rr, err = rardecode.NewReader(in, r.Password) + return err +} + +// Read reads the next file from t, which must have +// already been opened for reading. If there are no +// more files, the error is io.EOF. The File must +// be closed when finished reading from it. +func (r *Rar) Read() (File, error) { + if r.rr == nil { + return File{}, fmt.Errorf("rar archive is not open") + } + + hdr, err := r.rr.Next() + if err != nil { + return File{}, err // don't wrap error; preserve io.EOF + } + + file := File{ + FileInfo: rarFileInfo{hdr}, + Header: hdr, + ReadCloser: ReadFakeCloser{r.rr}, + } + + return file, nil +} + +// Close closes the rar archive(s) opened by Create and Open. +func (r *Rar) Close() error { + var err error + if r.rc != nil { + rc := r.rc + r.rc = nil + err = rc.Close() + } + if r.rr != nil { + r.rr = nil + } + return err +} + +// Walk calls walkFn for each visited item in archive. +func (r *Rar) Walk(archive string, walkFn WalkFunc) error { + file, err := os.Open(archive) + if err != nil { + return fmt.Errorf("opening archive file: %v", err) + } + defer file.Close() + + err = r.Open(file, 0) + if err != nil { + return fmt.Errorf("opening archive: %v", err) + } + defer r.Close() + + for { + f, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + if r.ContinueOnError { + log.Printf("[ERROR] Opening next file: %v", err) + continue + } + return fmt.Errorf("opening next file: %v", err) + } + err = walkFn(f) + if err != nil { + if err == ErrStopWalk { + break + } + if r.ContinueOnError { + log.Printf("[ERROR] Walking %s: %v", f.Name(), err) + continue + } + return fmt.Errorf("walking %s: %v", f.Name(), err) + } + } + + return nil +} + +// Extract extracts a single file from the rar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (r *Rar) Extract(source, target, destination string) error { + // target refers to a path inside the archive, which should be clean also + target = path.Clean(target) + + // if the target ends up being a directory, then + // we will continue walking and extracting files + // until we are no longer within that directory + var targetDirPath string + + return r.Walk(source, func(f File) error { + th, ok := f.Header.(*rardecode.FileHeader) + if !ok { + return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header) + } + + // importantly, cleaning the path strips tailing slash, + // which must be appended to folders within the archive + name := path.Clean(th.Name) + if f.IsDir() && target == name { + targetDirPath = path.Dir(name) + } + + if within(target, th.Name) { + // either this is the exact file we want, or is + // in the directory we want to extract + + // build the filename we will extract to + end, err := filepath.Rel(targetDirPath, th.Name) + if err != nil { + return fmt.Errorf("relativizing paths: %v", err) + } + joined := filepath.Join(destination, end) + + err = r.unrarFile(f, joined) + if err != nil { + return fmt.Errorf("extracting file %s: %v", th.Name, err) + } + + // if our target was not a directory, stop walk + if targetDirPath == "" { + return ErrStopWalk + } + } else if targetDirPath != "" { + // finished walking the entire directory + return ErrStopWalk + } + + return nil + }) +} + +// Match returns true if the format of file matches this +// type's format. It should not affect reader position. +func (*Rar) Match(file io.ReadSeeker) (bool, error) { + currentPos, err := file.Seek(0, io.SeekCurrent) + if err != nil { + return false, err + } + _, err = file.Seek(0, 0) + if err != nil { + return false, err + } + defer file.Seek(currentPos, io.SeekStart) + + buf := make([]byte, 8) + if n, err := file.Read(buf); err != nil || n < 8 { + return false, nil + } + hasTarHeader := bytes.Equal(buf[:7], []byte("Rar!\x1a\x07\x00")) || // ver 1.5 + bytes.Equal(buf, []byte("Rar!\x1a\x07\x01\x00")) // ver 5.0 + return hasTarHeader, nil +} + +func (r *Rar) String() string { return "rar" } + +// NewRar returns a new, default instance ready to be customized and used. +func NewRar() *Rar { + return &Rar{ + MkdirAll: true, + } +} + +type rarFileInfo struct { + fh *rardecode.FileHeader +} + +func (rfi rarFileInfo) Name() string { return rfi.fh.Name } +func (rfi rarFileInfo) Size() int64 { return rfi.fh.UnPackedSize } +func (rfi rarFileInfo) Mode() os.FileMode { return rfi.fh.Mode() } +func (rfi rarFileInfo) ModTime() time.Time { return rfi.fh.ModificationTime } +func (rfi rarFileInfo) IsDir() bool { return rfi.fh.IsDir } +func (rfi rarFileInfo) Sys() interface{} { return nil } + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(Rar)) + _ = Unarchiver(new(Rar)) + _ = Walker(new(Rar)) + _ = Extractor(new(Rar)) + _ = Matcher(new(Rar)) + _ = ExtensionChecker(new(Rar)) + _ = os.FileInfo(rarFileInfo{}) +) + +// DefaultRar is a default instance that is conveniently ready to use. +var DefaultRar = NewRar() diff --git a/vendor/github.com/mholt/archiver/sz.go b/vendor/github.com/mholt/archiver/sz.go new file mode 100644 index 00000000..39c5865e --- /dev/null +++ b/vendor/github.com/mholt/archiver/sz.go @@ -0,0 +1,51 @@ +package archiver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/golang/snappy" +) + +// Snappy facilitates Snappy compression. +type Snappy struct{} + +// Compress reads in, compresses it, and writes it to out. +func (s *Snappy) Compress(in io.Reader, out io.Writer) error { + w := snappy.NewWriter(out) + defer w.Close() + _, err := io.Copy(w, in) + return err +} + +// Decompress reads in, decompresses it, and writes it to out. +func (s *Snappy) Decompress(in io.Reader, out io.Writer) error { + r := snappy.NewReader(in) + _, err := io.Copy(out, r) + return err +} + +// CheckExt ensures the file extension matches the format. +func (s *Snappy) CheckExt(filename string) error { + if filepath.Ext(filename) != ".sz" { + return fmt.Errorf("filename must have a .sz extension") + } + return nil +} + +func (s *Snappy) String() string { return "sz" } + +// NewSnappy returns a new, default instance ready to be customized and used. +func NewSnappy() *Snappy { + return new(Snappy) +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Compressor(new(Snappy)) + _ = Decompressor(new(Snappy)) +) + +// DefaultSnappy is a default instance that is conveniently ready to use. +var DefaultSnappy = NewSnappy() diff --git a/vendor/github.com/mholt/archiver/tar.go b/vendor/github.com/mholt/archiver/tar.go new file mode 100644 index 00000000..780adc16 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tar.go @@ -0,0 +1,605 @@ +package archiver + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "strconv" + "strings" +) + +// Tar provides facilities for operating TAR archives. +// See http://www.gnu.org/software/tar/manual/html_node/Standard.html. +type Tar struct { + // Whether to overwrite existing files; if false, + // an error is returned if the file exists. + OverwriteExisting bool + + // Whether to make all the directories necessary + // to create a tar archive in the desired path. + MkdirAll bool + + // A single top-level folder can be implicitly + // created by the Archive or Unarchive methods + // if the files to be added to the archive + // or the files to be extracted from the archive + // do not all have a common root. This roughly + // mimics the behavior of archival tools integrated + // into OS file browsers which create a subfolder + // to avoid unexpectedly littering the destination + // folder with potentially many files, causing a + // problematic cleanup/organization situation. + // This feature is available for both creation + // and extraction of archives, but may be slightly + // inefficient with lots and lots of files, + // especially on extraction. + ImplicitTopLevelFolder bool + + // If true, errors encountered during reading + // or writing a single file will be logged and + // the operation will continue on remaining files. + ContinueOnError bool + + tw *tar.Writer + tr *tar.Reader + + readerWrapFn func(io.Reader) (io.Reader, error) + writerWrapFn func(io.Writer) (io.Writer, error) + cleanupWrapFn func() +} + +// CheckExt ensures the file extension matches the format. +func (*Tar) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar") { + return fmt.Errorf("filename must have a .tar extension") + } + return nil +} + +// Archive creates a tarball file at destination containing +// the files listed in sources. The destination must end with +// ".tar". File paths can be those of regular files or +// directories; directories will be recursively added. +func (t *Tar) Archive(sources []string, destination string) error { + err := t.CheckExt(destination) + if t.writerWrapFn == nil && err != nil { + return fmt.Errorf("checking extension: %v", err) + } + if !t.OverwriteExisting && fileExists(destination) { + return fmt.Errorf("file already exists: %s", destination) + } + + // make the folder to contain the resulting archive + // if it does not already exist + destDir := filepath.Dir(destination) + if t.MkdirAll && !fileExists(destDir) { + err := mkdir(destDir) + if err != nil { + return fmt.Errorf("making folder for destination: %v", err) + } + } + + out, err := os.Create(destination) + if err != nil { + return fmt.Errorf("creating %s: %v", destination, err) + } + defer out.Close() + + err = t.Create(out) + if err != nil { + return fmt.Errorf("creating tar: %v", err) + } + defer t.Close() + + var topLevelFolder string + if t.ImplicitTopLevelFolder && multipleTopLevels(sources) { + topLevelFolder = folderNameFromFileName(destination) + } + + for _, source := range sources { + err := t.writeWalk(source, topLevelFolder, destination) + if err != nil { + return fmt.Errorf("walking %s: %v", source, err) + } + } + + return nil +} + +// Unarchive unpacks the .tar file at source to destination. +// Destination will be treated as a folder name. +func (t *Tar) Unarchive(source, destination string) error { + if !fileExists(destination) && t.MkdirAll { + err := mkdir(destination) + if err != nil { + return fmt.Errorf("preparing destination: %v", err) + } + } + + // if the files in the archive do not all share a common + // root, then make sure we extract to a single subfolder + // rather than potentially littering the destination... + if t.ImplicitTopLevelFolder { + var err error + destination, err = t.addTopLevelFolder(source, destination) + if err != nil { + return fmt.Errorf("scanning source archive: %v", err) + } + } + + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("opening source archive: %v", err) + } + defer file.Close() + + err = t.Open(file, 0) + if err != nil { + return fmt.Errorf("opening tar archive for reading: %v", err) + } + defer t.Close() + + for { + err := t.untarNext(destination) + if err == io.EOF { + break + } + if err != nil { + if t.ContinueOnError { + log.Printf("[ERROR] Reading file in tar archive: %v", err) + continue + } + return fmt.Errorf("reading file in tar archive: %v", err) + } + } + + return nil +} + +// addTopLevelFolder scans the files contained inside +// the tarball named sourceArchive and returns a modified +// destination if all the files do not share the same +// top-level folder. +func (t *Tar) addTopLevelFolder(sourceArchive, destination string) (string, error) { + file, err := os.Open(sourceArchive) + if err != nil { + return "", fmt.Errorf("opening source archive: %v", err) + } + defer file.Close() + + // if the reader is to be wrapped, ensure we do that now + // or we will not be able to read the archive successfully + reader := io.Reader(file) + if t.readerWrapFn != nil { + reader, err = t.readerWrapFn(reader) + if err != nil { + return "", fmt.Errorf("wrapping reader: %v", err) + } + } + if t.cleanupWrapFn != nil { + defer t.cleanupWrapFn() + } + + tr := tar.NewReader(reader) + + var files []string + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return "", fmt.Errorf("scanning tarball's file listing: %v", err) + } + files = append(files, hdr.Name) + } + + if multipleTopLevels(files) { + destination = filepath.Join(destination, folderNameFromFileName(sourceArchive)) + } + + return destination, nil +} + +func (t *Tar) untarNext(to string) error { + f, err := t.Read() + if err != nil { + return err // don't wrap error; calling loop must break on io.EOF + } + header, ok := f.Header.(*tar.Header) + if !ok { + return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header) + } + return t.untarFile(f, filepath.Join(to, header.Name)) +} + +func (t *Tar) untarFile(f File, to string) error { + // do not overwrite existing files, if configured + if !f.IsDir() && !t.OverwriteExisting && fileExists(to) { + return fmt.Errorf("file already exists: %s", to) + } + + hdr, ok := f.Header.(*tar.Header) + if !ok { + return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header) + } + + switch hdr.Typeflag { + case tar.TypeDir: + return mkdir(to) + case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo: + return writeNewFile(to, f, f.Mode()) + case tar.TypeSymlink: + return writeNewSymbolicLink(to, hdr.Linkname) + case tar.TypeLink: + return writeNewHardLink(to, filepath.Join(to, hdr.Linkname)) + case tar.TypeXGlobalHeader: + return nil // ignore the pax global header from git-generated tarballs + default: + return fmt.Errorf("%s: unknown type flag: %c", hdr.Name, hdr.Typeflag) + } +} + +func (t *Tar) writeWalk(source, topLevelFolder, destination string) error { + sourceInfo, err := os.Stat(source) + if err != nil { + return fmt.Errorf("%s: stat: %v", source, err) + } + destAbs, err := filepath.Abs(destination) + if err != nil { + return fmt.Errorf("%s: getting absolute path of destination %s: %v", source, destination, err) + } + + return filepath.Walk(source, func(fpath string, info os.FileInfo, err error) error { + handleErr := func(err error) error { + if t.ContinueOnError { + log.Printf("[ERROR] Walking %s: %v", fpath, err) + return nil + } + return err + } + if err != nil { + return handleErr(fmt.Errorf("traversing %s: %v", fpath, err)) + } + if info == nil { + return handleErr(fmt.Errorf("no file info")) + } + + // make sure we do not copy our output file into itself + fpathAbs, err := filepath.Abs(fpath) + if err != nil { + return handleErr(fmt.Errorf("%s: getting absolute path: %v", fpath, err)) + } + if within(fpathAbs, destAbs) { + return nil + } + + // build the name to be used within the archive + nameInArchive, err := makeNameInArchive(sourceInfo, source, topLevelFolder, fpath) + if err != nil { + return handleErr(err) + } + + file, err := os.Open(fpath) + if err != nil { + return handleErr(fmt.Errorf("%s: opening: %v", fpath, err)) + } + defer file.Close() + + err = t.Write(File{ + FileInfo: FileInfo{ + FileInfo: info, + CustomName: nameInArchive, + }, + ReadCloser: file, + }) + if err != nil { + return handleErr(fmt.Errorf("%s: writing: %s", fpath, err)) + } + + return nil + }) +} + +// Create opens t for writing a tar archive to out. +func (t *Tar) Create(out io.Writer) error { + if t.tw != nil { + return fmt.Errorf("tar archive is already created for writing") + } + + // wrapping writers allows us to output + // compressed tarballs, for example + if t.writerWrapFn != nil { + var err error + out, err = t.writerWrapFn(out) + if err != nil { + return fmt.Errorf("wrapping writer: %v", err) + } + } + + t.tw = tar.NewWriter(out) + return nil +} + +// Write writes f to t, which must have been opened for writing first. +func (t *Tar) Write(f File) error { + if t.tw == nil { + return fmt.Errorf("tar archive was not created for writing first") + } + if f.FileInfo == nil { + return fmt.Errorf("no file info") + } + if f.FileInfo.Name() == "" { + return fmt.Errorf("missing file name") + } + + hdr, err := tar.FileInfoHeader(f, f.Name()) + if err != nil { + return fmt.Errorf("%s: making header: %v", f.Name(), err) + } + + err = t.tw.WriteHeader(hdr) + if err != nil { + return fmt.Errorf("%s: writing header: %v", hdr.Name, err) + } + + if f.IsDir() { + return nil + } + + if hdr.Typeflag == tar.TypeReg { + if f.ReadCloser == nil { + return fmt.Errorf("%s: no way to read file contents", f.Name()) + } + _, err := io.Copy(t.tw, f) + if err != nil { + return fmt.Errorf("%s: copying contents: %v", f.Name(), err) + } + } + + return nil +} + +// Open opens t for reading an archive from +// in. The size parameter is not used. +func (t *Tar) Open(in io.Reader, size int64) error { + if t.tr != nil { + return fmt.Errorf("tar archive is already open for reading") + } + // wrapping readers allows us to open compressed tarballs + if t.readerWrapFn != nil { + var err error + in, err = t.readerWrapFn(in) + if err != nil { + return fmt.Errorf("wrapping file reader: %v", err) + } + } + t.tr = tar.NewReader(in) + return nil +} + +// Read reads the next file from t, which must have +// already been opened for reading. If there are no +// more files, the error is io.EOF. The File must +// be closed when finished reading from it. +func (t *Tar) Read() (File, error) { + if t.tr == nil { + return File{}, fmt.Errorf("tar archive is not open") + } + + hdr, err := t.tr.Next() + if err != nil { + return File{}, err // don't wrap error; preserve io.EOF + } + + file := File{ + FileInfo: hdr.FileInfo(), + Header: hdr, + ReadCloser: ReadFakeCloser{t.tr}, + } + + return file, nil +} + +// Close closes the tar archive(s) opened by Create and Open. +func (t *Tar) Close() error { + var err error + if t.tr != nil { + t.tr = nil + } + if t.tw != nil { + tw := t.tw + t.tw = nil + err = tw.Close() + } + // make sure cleanup of "Reader/Writer wrapper" + // (say that ten times fast) happens AFTER the + // underlying stream is closed + if t.cleanupWrapFn != nil { + t.cleanupWrapFn() + } + return err +} + +// Walk calls walkFn for each visited item in archive. +func (t *Tar) Walk(archive string, walkFn WalkFunc) error { + file, err := os.Open(archive) + if err != nil { + return fmt.Errorf("opening archive file: %v", err) + } + defer file.Close() + + err = t.Open(file, 0) + if err != nil { + return fmt.Errorf("opening archive: %v", err) + } + defer t.Close() + + for { + f, err := t.Read() + if err == io.EOF { + break + } + if err != nil { + if t.ContinueOnError { + log.Printf("[ERROR] Opening next file: %v", err) + continue + } + return fmt.Errorf("opening next file: %v", err) + } + err = walkFn(f) + if err != nil { + if err == ErrStopWalk { + break + } + if t.ContinueOnError { + log.Printf("[ERROR] Walking %s: %v", f.Name(), err) + continue + } + return fmt.Errorf("walking %s: %v", f.Name(), err) + } + } + + return nil +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (t *Tar) Extract(source, target, destination string) error { + // target refers to a path inside the archive, which should be clean also + target = path.Clean(target) + + // if the target ends up being a directory, then + // we will continue walking and extracting files + // until we are no longer within that directory + var targetDirPath string + + return t.Walk(source, func(f File) error { + th, ok := f.Header.(*tar.Header) + if !ok { + return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header) + } + + // importantly, cleaning the path strips tailing slash, + // which must be appended to folders within the archive + name := path.Clean(th.Name) + if f.IsDir() && target == name { + targetDirPath = path.Dir(name) + } + + if within(target, th.Name) { + // either this is the exact file we want, or is + // in the directory we want to extract + + // build the filename we will extract to + end, err := filepath.Rel(targetDirPath, th.Name) + if err != nil { + return fmt.Errorf("relativizing paths: %v", err) + } + joined := filepath.Join(destination, end) + + err = t.untarFile(f, joined) + if err != nil { + return fmt.Errorf("extracting file %s: %v", th.Name, err) + } + + // if our target was not a directory, stop walk + if targetDirPath == "" { + return ErrStopWalk + } + } else if targetDirPath != "" { + // finished walking the entire directory + return ErrStopWalk + } + + return nil + }) +} + +// Match returns true if the format of file matches this +// type's format. It should not affect reader position. +func (*Tar) Match(file io.ReadSeeker) (bool, error) { + currentPos, err := file.Seek(0, io.SeekCurrent) + if err != nil { + return false, err + } + _, err = file.Seek(0, 0) + if err != nil { + return false, err + } + defer file.Seek(currentPos, io.SeekStart) + + buf := make([]byte, tarBlockSize) + if _, err = io.ReadFull(file, buf); err != nil { + return false, nil + } + return hasTarHeader(buf), nil +} + +// hasTarHeader checks passed bytes has a valid tar header or not. buf must +// contain at least 512 bytes and if not, it always returns false. +func hasTarHeader(buf []byte) bool { + if len(buf) < tarBlockSize { + return false + } + + b := buf[148:156] + b = bytes.Trim(b, " \x00") // clean up all spaces and null bytes + if len(b) == 0 { + return false // unknown format + } + hdrSum, err := strconv.ParseUint(string(b), 8, 64) + if err != nil { + return false + } + + // According to the go official archive/tar, Sun tar uses signed byte + // values so this calcs both signed and unsigned + var usum uint64 + var sum int64 + for i, c := range buf { + if 148 <= i && i < 156 { + c = ' ' // checksum field itself is counted as branks + } + usum += uint64(uint8(c)) + sum += int64(int8(c)) + } + + if hdrSum != usum && int64(hdrSum) != sum { + return false // invalid checksum + } + + return true +} + +func (t *Tar) String() string { return "tar" } + +// NewTar returns a new, default instance ready to be customized and used. +func NewTar() *Tar { + return &Tar{ + MkdirAll: true, + } +} + +const tarBlockSize = 512 + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(Tar)) + _ = Writer(new(Tar)) + _ = Archiver(new(Tar)) + _ = Unarchiver(new(Tar)) + _ = Walker(new(Tar)) + _ = Extractor(new(Tar)) + _ = Matcher(new(Tar)) + _ = ExtensionChecker(new(Rar)) +) + +// DefaultTar is a default instance that is conveniently ready to use. +var DefaultTar = NewTar() diff --git a/vendor/github.com/mholt/archiver/tarbz2.go b/vendor/github.com/mholt/archiver/tarbz2.go new file mode 100644 index 00000000..e5870a7d --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarbz2.go @@ -0,0 +1,126 @@ +package archiver + +import ( + "fmt" + "io" + "strings" + + "github.com/dsnet/compress/bzip2" +) + +// TarBz2 facilitates bzip2 compression +// (https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf) +// of tarball archives. +type TarBz2 struct { + *Tar + + CompressionLevel int +} + +// CheckExt ensures the file extension matches the format. +func (*TarBz2) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar.bz2") && + !strings.HasSuffix(filename, ".tbz2") { + return fmt.Errorf("filename must have a .tar.bz2 or .tbz2 extension") + } + return nil +} + +// Archive creates a compressed tar file at destination +// containing the files listed in sources. The destination +// must end with ".tar.bz2" or ".tbz2". File paths can be +// those of regular files or directories; directories will +// be recursively added. +func (tbz2 *TarBz2) Archive(sources []string, destination string) error { + err := tbz2.CheckExt(destination) + if err != nil { + return fmt.Errorf("output %s", err.Error()) + } + tbz2.wrapWriter() + return tbz2.Tar.Archive(sources, destination) +} + +// Unarchive unpacks the compressed tarball at +// source to destination. Destination will be +// treated as a folder name. +func (tbz2 *TarBz2) Unarchive(source, destination string) error { + tbz2.wrapReader() + return tbz2.Tar.Unarchive(source, destination) +} + +// Walk calls walkFn for each visited item in archive. +func (tbz2 *TarBz2) Walk(archive string, walkFn WalkFunc) error { + tbz2.wrapReader() + return tbz2.Tar.Walk(archive, walkFn) +} + +// Create opens tbz2 for writing a compressed +// tar archive to out. +func (tbz2 *TarBz2) Create(out io.Writer) error { + tbz2.wrapWriter() + return tbz2.Tar.Create(out) +} + +// Open opens t for reading a compressed archive from +// in. The size parameter is not used. +func (tbz2 *TarBz2) Open(in io.Reader, size int64) error { + tbz2.wrapReader() + return tbz2.Tar.Open(in, size) +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (tbz2 *TarBz2) Extract(source, target, destination string) error { + tbz2.wrapReader() + return tbz2.Tar.Extract(source, target, destination) +} + +func (tbz2 *TarBz2) wrapWriter() { + var bz2w *bzip2.Writer + tbz2.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) { + var err error + bz2w, err = bzip2.NewWriter(w, &bzip2.WriterConfig{ + Level: tbz2.CompressionLevel, + }) + return bz2w, err + } + tbz2.Tar.cleanupWrapFn = func() { + bz2w.Close() + } +} + +func (tbz2 *TarBz2) wrapReader() { + var bz2r *bzip2.Reader + tbz2.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) { + var err error + bz2r, err = bzip2.NewReader(r, nil) + return bz2r, err + } + tbz2.Tar.cleanupWrapFn = func() { + bz2r.Close() + } +} + +func (tbz2 *TarBz2) String() string { return "tar.bz2" } + +// NewTarBz2 returns a new, default instance ready to be customized and used. +func NewTarBz2() *TarBz2 { + return &TarBz2{ + CompressionLevel: bzip2.DefaultCompression, + Tar: NewTar(), + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(TarBz2)) + _ = Writer(new(TarBz2)) + _ = Archiver(new(TarBz2)) + _ = Unarchiver(new(TarBz2)) + _ = Walker(new(TarBz2)) + _ = Extractor(new(TarBz2)) +) + +// DefaultTarBz2 is a convenient archiver ready to use. +var DefaultTarBz2 = NewTarBz2() diff --git a/vendor/github.com/mholt/archiver/targz.go b/vendor/github.com/mholt/archiver/targz.go new file mode 100644 index 00000000..311c8262 --- /dev/null +++ b/vendor/github.com/mholt/archiver/targz.go @@ -0,0 +1,124 @@ +package archiver + +import ( + "compress/gzip" + "fmt" + "io" + "strings" +) + +// TarGz facilitates gzip compression +// (RFC 1952) of tarball archives. +type TarGz struct { + *Tar + + // The compression level to use, as described + // in the compress/gzip package. + CompressionLevel int +} + +// CheckExt ensures the file extension matches the format. +func (*TarGz) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar.gz") && + !strings.HasSuffix(filename, ".tgz") { + return fmt.Errorf("filename must have a .tar.gz or .tgz extension") + } + return nil +} + +// Archive creates a compressed tar file at destination +// containing the files listed in sources. The destination +// must end with ".tar.gz" or ".tgz". File paths can be +// those of regular files or directories; directories will +// be recursively added. +func (tgz *TarGz) Archive(sources []string, destination string) error { + err := tgz.CheckExt(destination) + if err != nil { + return fmt.Errorf("output %s", err.Error()) + } + tgz.wrapWriter() + return tgz.Tar.Archive(sources, destination) +} + +// Unarchive unpacks the compressed tarball at +// source to destination. Destination will be +// treated as a folder name. +func (tgz *TarGz) Unarchive(source, destination string) error { + tgz.wrapReader() + return tgz.Tar.Unarchive(source, destination) +} + +// Walk calls walkFn for each visited item in archive. +func (tgz *TarGz) Walk(archive string, walkFn WalkFunc) error { + tgz.wrapReader() + return tgz.Tar.Walk(archive, walkFn) +} + +// Create opens txz for writing a compressed +// tar archive to out. +func (tgz *TarGz) Create(out io.Writer) error { + tgz.wrapWriter() + return tgz.Tar.Create(out) +} + +// Open opens t for reading a compressed archive from +// in. The size parameter is not used. +func (tgz *TarGz) Open(in io.Reader, size int64) error { + tgz.wrapReader() + return tgz.Tar.Open(in, size) +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (tgz *TarGz) Extract(source, target, destination string) error { + tgz.wrapReader() + return tgz.Tar.Extract(source, target, destination) +} + +func (tgz *TarGz) wrapWriter() { + var gzw *gzip.Writer + tgz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) { + var err error + gzw, err = gzip.NewWriterLevel(w, tgz.CompressionLevel) + return gzw, err + } + tgz.Tar.cleanupWrapFn = func() { + gzw.Close() + } +} + +func (tgz *TarGz) wrapReader() { + var gzr *gzip.Reader + tgz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) { + var err error + gzr, err = gzip.NewReader(r) + return gzr, err + } + tgz.Tar.cleanupWrapFn = func() { + gzr.Close() + } +} + +func (tgz *TarGz) String() string { return "tar.gz" } + +// NewTarGz returns a new, default instance ready to be customized and used. +func NewTarGz() *TarGz { + return &TarGz{ + CompressionLevel: gzip.DefaultCompression, + Tar: NewTar(), + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(TarGz)) + _ = Writer(new(TarGz)) + _ = Archiver(new(TarGz)) + _ = Unarchiver(new(TarGz)) + _ = Walker(new(TarGz)) + _ = Extractor(new(TarGz)) +) + +// DefaultTarGz is a convenient archiver ready to use. +var DefaultTarGz = NewTarGz() diff --git a/vendor/github.com/mholt/archiver/tarlz4.go b/vendor/github.com/mholt/archiver/tarlz4.go new file mode 100644 index 00000000..4a178f69 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarlz4.go @@ -0,0 +1,122 @@ +package archiver + +import ( + "fmt" + "io" + "strings" + + "github.com/pierrec/lz4" +) + +// TarLz4 facilitates lz4 compression +// (https://github.com/lz4/lz4/tree/master/doc) +// of tarball archives. +type TarLz4 struct { + *Tar + + // The compression level to use when writing. + // Minimum 0 (fast compression), maximum 12 + // (most space savings). + CompressionLevel int +} + +// CheckExt ensures the file extension matches the format. +func (*TarLz4) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar.lz4") && + !strings.HasSuffix(filename, ".tlz4") { + + return fmt.Errorf("filename must have a .tar.lz4 or .tlz4 extension") + } + return nil +} + +// Archive creates a compressed tar file at destination +// containing the files listed in sources. The destination +// must end with ".tar.lz4" or ".tlz4". File paths can be +// those of regular files or directories; directories will +// be recursively added. +func (tlz4 *TarLz4) Archive(sources []string, destination string) error { + err := tlz4.CheckExt(destination) + if err != nil { + return fmt.Errorf("output %s", err.Error()) + } + tlz4.wrapWriter() + return tlz4.Tar.Archive(sources, destination) +} + +// Unarchive unpacks the compressed tarball at +// source to destination. Destination will be +// treated as a folder name. +func (tlz4 *TarLz4) Unarchive(source, destination string) error { + tlz4.wrapReader() + return tlz4.Tar.Unarchive(source, destination) +} + +// Walk calls walkFn for each visited item in archive. +func (tlz4 *TarLz4) Walk(archive string, walkFn WalkFunc) error { + tlz4.wrapReader() + return tlz4.Tar.Walk(archive, walkFn) +} + +// Create opens tlz4 for writing a compressed +// tar archive to out. +func (tlz4 *TarLz4) Create(out io.Writer) error { + tlz4.wrapWriter() + return tlz4.Tar.Create(out) +} + +// Open opens t for reading a compressed archive from +// in. The size parameter is not used. +func (tlz4 *TarLz4) Open(in io.Reader, size int64) error { + tlz4.wrapReader() + return tlz4.Tar.Open(in, size) +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (tlz4 *TarLz4) Extract(source, target, destination string) error { + tlz4.wrapReader() + return tlz4.Tar.Extract(source, target, destination) +} + +func (tlz4 *TarLz4) wrapWriter() { + var lz4w *lz4.Writer + tlz4.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) { + lz4w = lz4.NewWriter(w) + lz4w.Header.CompressionLevel = tlz4.CompressionLevel + return lz4w, nil + } + tlz4.Tar.cleanupWrapFn = func() { + lz4w.Close() + } +} + +func (tlz4 *TarLz4) wrapReader() { + tlz4.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) { + return lz4.NewReader(r), nil + } +} + +func (tlz4 *TarLz4) String() string { return "tar.lz4" } + +// NewTarLz4 returns a new, default instance ready to be customized and used. +func NewTarLz4() *TarLz4 { + return &TarLz4{ + CompressionLevel: 9, // https://github.com/lz4/lz4/blob/1b819bfd633ae285df2dfe1b0589e1ec064f2873/lib/lz4hc.h#L48 + Tar: NewTar(), + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(TarLz4)) + _ = Writer(new(TarLz4)) + _ = Archiver(new(TarLz4)) + _ = Unarchiver(new(TarLz4)) + _ = Walker(new(TarLz4)) + _ = Extractor(new(TarLz4)) +) + +// DefaultTarLz4 is a convenient archiver ready to use. +var DefaultTarLz4 = NewTarLz4() diff --git a/vendor/github.com/mholt/archiver/tarsz.go b/vendor/github.com/mholt/archiver/tarsz.go new file mode 100644 index 00000000..0569e664 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarsz.go @@ -0,0 +1,114 @@ +package archiver + +import ( + "fmt" + "io" + "strings" + + "github.com/golang/snappy" +) + +// TarSz facilitates Snappy compression +// (https://github.com/google/snappy) +// of tarball archives. +type TarSz struct { + *Tar +} + +// CheckExt ensures the file extension matches the format. +func (*TarSz) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar.sz") && + !strings.HasSuffix(filename, ".tsz") { + return fmt.Errorf("filename must have a .tar.sz or .tsz extension") + } + return nil +} + +// Archive creates a compressed tar file at destination +// containing the files listed in sources. The destination +// must end with ".tar.sz" or ".tsz". File paths can be +// those of regular files or directories; directories will +// be recursively added. +func (tsz *TarSz) Archive(sources []string, destination string) error { + err := tsz.CheckExt(destination) + if err != nil { + return fmt.Errorf("output %s", err.Error()) + } + tsz.wrapWriter() + return tsz.Tar.Archive(sources, destination) +} + +// Unarchive unpacks the compressed tarball at +// source to destination. Destination will be +// treated as a folder name. +func (tsz *TarSz) Unarchive(source, destination string) error { + tsz.wrapReader() + return tsz.Tar.Unarchive(source, destination) +} + +// Walk calls walkFn for each visited item in archive. +func (tsz *TarSz) Walk(archive string, walkFn WalkFunc) error { + tsz.wrapReader() + return tsz.Tar.Walk(archive, walkFn) +} + +// Create opens tsz for writing a compressed +// tar archive to out. +func (tsz *TarSz) Create(out io.Writer) error { + tsz.wrapWriter() + return tsz.Tar.Create(out) +} + +// Open opens t for reading a compressed archive from +// in. The size parameter is not used. +func (tsz *TarSz) Open(in io.Reader, size int64) error { + tsz.wrapReader() + return tsz.Tar.Open(in, size) +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (tsz *TarSz) Extract(source, target, destination string) error { + tsz.wrapReader() + return tsz.Tar.Extract(source, target, destination) +} + +func (tsz *TarSz) wrapWriter() { + var sw *snappy.Writer + tsz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) { + sw = snappy.NewWriter(w) + return sw, nil + } + tsz.Tar.cleanupWrapFn = func() { + sw.Close() + } +} + +func (tsz *TarSz) wrapReader() { + tsz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) { + return snappy.NewReader(r), nil + } +} + +func (tsz *TarSz) String() string { return "tar.sz" } + +// NewTarSz returns a new, default instance ready to be customized and used. +func NewTarSz() *TarSz { + return &TarSz{ + Tar: NewTar(), + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(TarSz)) + _ = Writer(new(TarSz)) + _ = Archiver(new(TarSz)) + _ = Unarchiver(new(TarSz)) + _ = Walker(new(TarSz)) + _ = Extractor(new(TarSz)) +) + +// DefaultTarSz is a convenient archiver ready to use. +var DefaultTarSz = NewTarSz() diff --git a/vendor/github.com/mholt/archiver/tarxz.go b/vendor/github.com/mholt/archiver/tarxz.go new file mode 100644 index 00000000..5679a067 --- /dev/null +++ b/vendor/github.com/mholt/archiver/tarxz.go @@ -0,0 +1,119 @@ +package archiver + +import ( + "fmt" + "io" + "strings" + + "github.com/ulikunitz/xz" + fastxz "github.com/xi2/xz" +) + +// TarXz facilitates xz compression +// (https://tukaani.org/xz/format.html) +// of tarball archives. +type TarXz struct { + *Tar +} + +// CheckExt ensures the file extension matches the format. +func (*TarXz) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".tar.xz") && + !strings.HasSuffix(filename, ".txz") { + return fmt.Errorf("filename must have a .tar.xz or .txz extension") + } + return nil +} + +// Archive creates a compressed tar file at destination +// containing the files listed in sources. The destination +// must end with ".tar.xz" or ".txz". File paths can be +// those of regular files or directories; directories will +// be recursively added. +func (txz *TarXz) Archive(sources []string, destination string) error { + err := txz.CheckExt(destination) + if err != nil { + return fmt.Errorf("output %s", err.Error()) + } + txz.wrapWriter() + return txz.Tar.Archive(sources, destination) +} + +// Unarchive unpacks the compressed tarball at +// source to destination. Destination will be +// treated as a folder name. +func (txz *TarXz) Unarchive(source, destination string) error { + txz.wrapReader() + return txz.Tar.Unarchive(source, destination) +} + +// Walk calls walkFn for each visited item in archive. +func (txz *TarXz) Walk(archive string, walkFn WalkFunc) error { + txz.wrapReader() + return txz.Tar.Walk(archive, walkFn) +} + +// Create opens txz for writing a compressed +// tar archive to out. +func (txz *TarXz) Create(out io.Writer) error { + txz.wrapWriter() + return txz.Tar.Create(out) +} + +// Open opens t for reading a compressed archive from +// in. The size parameter is not used. +func (txz *TarXz) Open(in io.Reader, size int64) error { + txz.wrapReader() + return txz.Tar.Open(in, size) +} + +// Extract extracts a single file from the tar archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (txz *TarXz) Extract(source, target, destination string) error { + txz.wrapReader() + return txz.Tar.Extract(source, target, destination) +} + +func (txz *TarXz) wrapWriter() { + var xzw *xz.Writer + txz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) { + var err error + xzw, err = xz.NewWriter(w) + return xzw, err + } + txz.Tar.cleanupWrapFn = func() { + xzw.Close() + } +} + +func (txz *TarXz) wrapReader() { + var xzr *fastxz.Reader + txz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) { + var err error + xzr, err = fastxz.NewReader(r, 0) + return xzr, err + } +} + +func (txz *TarXz) String() string { return "tar.xz" } + +// NewTarXz returns a new, default instance ready to be customized and used. +func NewTarXz() *TarXz { + return &TarXz{ + Tar: NewTar(), + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(TarXz)) + _ = Writer(new(TarXz)) + _ = Archiver(new(TarXz)) + _ = Unarchiver(new(TarXz)) + _ = Walker(new(TarXz)) + _ = Extractor(new(TarXz)) +) + +// DefaultTarXz is a convenient archiver ready to use. +var DefaultTarXz = NewTarXz() diff --git a/vendor/github.com/mholt/archiver/xz.go b/vendor/github.com/mholt/archiver/xz.go new file mode 100644 index 00000000..c60d5eae --- /dev/null +++ b/vendor/github.com/mholt/archiver/xz.go @@ -0,0 +1,58 @@ +package archiver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/ulikunitz/xz" + fastxz "github.com/xi2/xz" +) + +// Xz facilitates XZ compression. +type Xz struct{} + +// Compress reads in, compresses it, and writes it to out. +func (x *Xz) Compress(in io.Reader, out io.Writer) error { + w, err := xz.NewWriter(out) + if err != nil { + return err + } + defer w.Close() + _, err = io.Copy(w, in) + return err +} + +// Decompress reads in, decompresses it, and writes it to out. +func (x *Xz) Decompress(in io.Reader, out io.Writer) error { + r, err := fastxz.NewReader(in, 0) + if err != nil { + return err + } + _, err = io.Copy(out, r) + return err +} + +// CheckExt ensures the file extension matches the format. +func (x *Xz) CheckExt(filename string) error { + if filepath.Ext(filename) != ".xz" { + return fmt.Errorf("filename must have a .xz extension") + } + return nil +} + +func (x *Xz) String() string { return "xz" } + +// NewXz returns a new, default instance ready to be customized and used. +func NewXz() *Xz { + return new(Xz) +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Compressor(new(Xz)) + _ = Decompressor(new(Xz)) +) + +// DefaultXz is a default instance that is conveniently ready to use. +var DefaultXz = NewXz() diff --git a/vendor/github.com/mholt/archiver/zip.go b/vendor/github.com/mholt/archiver/zip.go new file mode 100644 index 00000000..afa47921 --- /dev/null +++ b/vendor/github.com/mholt/archiver/zip.go @@ -0,0 +1,575 @@ +package archiver + +import ( + "archive/zip" + "bytes" + "compress/flate" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +// Zip provides facilities for operating ZIP archives. +// See https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT. +type Zip struct { + // The compression level to use, as described + // in the compress/flate package. + CompressionLevel int + + // Whether to overwrite existing files; if false, + // an error is returned if the file exists. + OverwriteExisting bool + + // Whether to make all the directories necessary + // to create a zip archive in the desired path. + MkdirAll bool + + // If enabled, selective compression will only + // compress files which are not already in a + // compressed format; this is decided based + // simply on file extension. + SelectiveCompression bool + + // A single top-level folder can be implicitly + // created by the Archive or Unarchive methods + // if the files to be added to the archive + // or the files to be extracted from the archive + // do not all have a common root. This roughly + // mimics the behavior of archival tools integrated + // into OS file browsers which create a subfolder + // to avoid unexpectedly littering the destination + // folder with potentially many files, causing a + // problematic cleanup/organization situation. + // This feature is available for both creation + // and extraction of archives, but may be slightly + // inefficient with lots and lots of files, + // especially on extraction. + ImplicitTopLevelFolder bool + + // If true, errors encountered during reading + // or writing a single file will be logged and + // the operation will continue on remaining files. + ContinueOnError bool + + zw *zip.Writer + zr *zip.Reader + ridx int +} + +// CheckExt ensures the file extension matches the format. +func (*Zip) CheckExt(filename string) error { + if !strings.HasSuffix(filename, ".zip") { + return fmt.Errorf("filename must have a .zip extension") + } + return nil +} + +// Archive creates a .zip file at destination containing +// the files listed in sources. The destination must end +// with ".zip". File paths can be those of regular files +// or directories. Regular files are stored at the 'root' +// of the archive, and directories are recursively added. +func (z *Zip) Archive(sources []string, destination string) error { + err := z.CheckExt(destination) + if err != nil { + return fmt.Errorf("checking extension: %v", err) + } + if !z.OverwriteExisting && fileExists(destination) { + return fmt.Errorf("file already exists: %s", destination) + } + + // make the folder to contain the resulting archive + // if it does not already exist + destDir := filepath.Dir(destination) + if z.MkdirAll && !fileExists(destDir) { + err := mkdir(destDir) + if err != nil { + return fmt.Errorf("making folder for destination: %v", err) + } + } + + out, err := os.Create(destination) + if err != nil { + return fmt.Errorf("creating %s: %v", destination, err) + } + defer out.Close() + + err = z.Create(out) + if err != nil { + return fmt.Errorf("creating zip: %v", err) + } + defer z.Close() + + var topLevelFolder string + if z.ImplicitTopLevelFolder && multipleTopLevels(sources) { + topLevelFolder = folderNameFromFileName(destination) + } + + for _, source := range sources { + err := z.writeWalk(source, topLevelFolder, destination) + if err != nil { + return fmt.Errorf("walking %s: %v", source, err) + } + } + + return nil +} + +// Unarchive unpacks the .zip file at source to destination. +// Destination will be treated as a folder name. +func (z *Zip) Unarchive(source, destination string) error { + if !fileExists(destination) && z.MkdirAll { + err := mkdir(destination) + if err != nil { + return fmt.Errorf("preparing destination: %v", err) + } + } + + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("opening source file: %v", err) + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return fmt.Errorf("statting source file: %v", err) + } + + err = z.Open(file, fileInfo.Size()) + if err != nil { + return fmt.Errorf("opening zip archive for reading: %v", err) + } + defer z.Close() + + // if the files in the archive do not all share a common + // root, then make sure we extract to a single subfolder + // rather than potentially littering the destination... + if z.ImplicitTopLevelFolder { + files := make([]string, len(z.zr.File)) + for i := range z.zr.File { + files[i] = z.zr.File[i].Name + } + if multipleTopLevels(files) { + destination = filepath.Join(destination, folderNameFromFileName(source)) + } + } + + for { + err := z.extractNext(destination) + if err == io.EOF { + break + } + if err != nil { + if z.ContinueOnError { + log.Printf("[ERROR] Reading file in zip archive: %v", err) + continue + } + return fmt.Errorf("reading file in zip archive: %v", err) + } + } + + return nil +} + +func (z *Zip) extractNext(to string) error { + f, err := z.Read() + if err != nil { + return err // don't wrap error; calling loop must break on io.EOF + } + defer f.Close() + header, ok := f.Header.(zip.FileHeader) + if !ok { + return fmt.Errorf("expected header to be zip.FileHeader but was %T", f.Header) + } + return z.extractFile(f, filepath.Join(to, header.Name)) +} + +func (z *Zip) extractFile(f File, to string) error { + // if a directory, no content; simply make the directory and return + if f.IsDir() { + return mkdir(to) + } + + // do not overwrite existing files, if configured + if !z.OverwriteExisting && fileExists(to) { + return fmt.Errorf("file already exists: %s", to) + } + + return writeNewFile(to, f, f.Mode()) +} + +func (z *Zip) writeWalk(source, topLevelFolder, destination string) error { + sourceInfo, err := os.Stat(source) + if err != nil { + return fmt.Errorf("%s: stat: %v", source, err) + } + destAbs, err := filepath.Abs(destination) + if err != nil { + return fmt.Errorf("%s: getting absolute path of destination %s: %v", source, destination, err) + } + + return filepath.Walk(source, func(fpath string, info os.FileInfo, err error) error { + handleErr := func(err error) error { + if z.ContinueOnError { + log.Printf("[ERROR] Walking %s: %v", fpath, err) + return nil + } + return err + } + if err != nil { + return handleErr(fmt.Errorf("traversing %s: %v", fpath, err)) + } + if info == nil { + return handleErr(fmt.Errorf("%s: no file info", fpath)) + } + + // make sure we do not copy the output file into the output + // file; that results in an infinite loop and disk exhaustion! + fpathAbs, err := filepath.Abs(fpath) + if err != nil { + return handleErr(fmt.Errorf("%s: getting absolute path: %v", fpath, err)) + } + if within(fpathAbs, destAbs) { + return nil + } + + // build the name to be used within the archive + nameInArchive, err := makeNameInArchive(sourceInfo, source, topLevelFolder, fpath) + if err != nil { + return handleErr(err) + } + + file, err := os.Open(fpath) + if err != nil { + return handleErr(fmt.Errorf("%s: opening: %v", fpath, err)) + } + defer file.Close() + + err = z.Write(File{ + FileInfo: FileInfo{ + FileInfo: info, + CustomName: nameInArchive, + }, + ReadCloser: file, + }) + if err != nil { + return handleErr(fmt.Errorf("%s: writing: %s", fpath, err)) + } + + return nil + }) +} + +// Create opens z for writing a ZIP archive to out. +func (z *Zip) Create(out io.Writer) error { + if z.zw != nil { + return fmt.Errorf("zip archive is already created for writing") + } + z.zw = zip.NewWriter(out) + if z.CompressionLevel != flate.DefaultCompression { + z.zw.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) { + return flate.NewWriter(out, z.CompressionLevel) + }) + } + return nil +} + +// Write writes f to z, which must have been opened for writing first. +func (z *Zip) Write(f File) error { + if z.zw == nil { + return fmt.Errorf("zip archive was not created for writing first") + } + if f.FileInfo == nil { + return fmt.Errorf("no file info") + } + if f.FileInfo.Name() == "" { + return fmt.Errorf("missing file name") + } + + header, err := zip.FileInfoHeader(f) + if err != nil { + return fmt.Errorf("%s: getting header: %v", f.Name(), err) + } + + if f.IsDir() { + header.Name += "/" // required - strangely no mention of this in zip spec? but is in godoc... + header.Method = zip.Store + } else { + ext := strings.ToLower(path.Ext(header.Name)) + if _, ok := compressedFormats[ext]; ok && z.SelectiveCompression { + header.Method = zip.Store + } else { + header.Method = zip.Deflate + } + } + + writer, err := z.zw.CreateHeader(header) + if err != nil { + return fmt.Errorf("%s: making header: %v", f.Name(), err) + } + + if f.IsDir() { + return nil + } + + if header.Mode().IsRegular() { + if f.ReadCloser == nil { + return fmt.Errorf("%s: no way to read file contents", f.Name()) + } + _, err := io.Copy(writer, f) + if err != nil { + return fmt.Errorf("%s: copying contents: %v", f.Name(), err) + } + } + + return nil +} + +// Open opens z for reading an archive from in, +// which is expected to have the given size and +// which must be an io.ReaderAt. +func (z *Zip) Open(in io.Reader, size int64) error { + inRdrAt, ok := in.(io.ReaderAt) + if !ok { + return fmt.Errorf("reader must be io.ReaderAt") + } + if z.zr != nil { + return fmt.Errorf("zip archive is already open for reading") + } + var err error + z.zr, err = zip.NewReader(inRdrAt, size) + if err != nil { + return fmt.Errorf("creating reader: %v", err) + } + z.ridx = 0 + return nil +} + +// Read reads the next file from z, which must have +// already been opened for reading. If there are no +// more files, the error is io.EOF. The File must +// be closed when finished reading from it. +func (z *Zip) Read() (File, error) { + if z.zr == nil { + return File{}, fmt.Errorf("zip archive is not open") + } + if z.ridx >= len(z.zr.File) { + return File{}, io.EOF + } + + // access the file and increment counter so that + // if there is an error processing this file, the + // caller can still iterate to the next file + zf := z.zr.File[z.ridx] + z.ridx++ + + file := File{ + FileInfo: zf.FileInfo(), + Header: zf.FileHeader, + } + + rc, err := zf.Open() + if err != nil { + return file, fmt.Errorf("%s: open compressed file: %v", zf.Name, err) + } + file.ReadCloser = rc + + return file, nil +} + +// Close closes the zip archive(s) opened by Create and Open. +func (z *Zip) Close() error { + if z.zr != nil { + z.zr = nil + } + if z.zw != nil { + zw := z.zw + z.zw = nil + return zw.Close() + } + return nil +} + +// Walk calls walkFn for each visited item in archive. +func (z *Zip) Walk(archive string, walkFn WalkFunc) error { + zr, err := zip.OpenReader(archive) + if err != nil { + return fmt.Errorf("opening zip reader: %v", err) + } + defer zr.Close() + + for _, zf := range zr.File { + zfrc, err := zf.Open() + if err != nil { + zfrc.Close() + if z.ContinueOnError { + log.Printf("[ERROR] Opening %s: %v", zf.Name, err) + continue + } + return fmt.Errorf("opening %s: %v", zf.Name, err) + } + + err = walkFn(File{ + FileInfo: zf.FileInfo(), + Header: zf.FileHeader, + ReadCloser: zfrc, + }) + zfrc.Close() + if err != nil { + if err == ErrStopWalk { + break + } + if z.ContinueOnError { + log.Printf("[ERROR] Walking %s: %v", zf.Name, err) + continue + } + return fmt.Errorf("walking %s: %v", zf.Name, err) + } + } + + return nil +} + +// Extract extracts a single file from the zip archive. +// If the target is a directory, the entire folder will +// be extracted into destination. +func (z *Zip) Extract(source, target, destination string) error { + // target refers to a path inside the archive, which should be clean also + target = path.Clean(target) + + // if the target ends up being a directory, then + // we will continue walking and extracting files + // until we are no longer within that directory + var targetDirPath string + + return z.Walk(source, func(f File) error { + zfh, ok := f.Header.(zip.FileHeader) + if !ok { + return fmt.Errorf("expected header to be zip.FileHeader but was %T", f.Header) + } + + // importantly, cleaning the path strips tailing slash, + // which must be appended to folders within the archive + name := path.Clean(zfh.Name) + if f.IsDir() && target == name { + targetDirPath = path.Dir(name) + } + + if within(target, zfh.Name) { + // either this is the exact file we want, or is + // in the directory we want to extract + + // build the filename we will extract to + end, err := filepath.Rel(targetDirPath, zfh.Name) + if err != nil { + return fmt.Errorf("relativizing paths: %v", err) + } + joined := filepath.Join(destination, end) + + err = z.extractFile(f, joined) + if err != nil { + return fmt.Errorf("extracting file %s: %v", zfh.Name, err) + } + + // if our target was not a directory, stop walk + if targetDirPath == "" { + return ErrStopWalk + } + } else if targetDirPath != "" { + // finished walking the entire directory + return ErrStopWalk + } + + return nil + }) +} + +// Match returns true if the format of file matches this +// type's format. It should not affect reader position. +func (*Zip) Match(file io.ReadSeeker) (bool, error) { + currentPos, err := file.Seek(0, io.SeekCurrent) + if err != nil { + return false, err + } + _, err = file.Seek(0, 0) + if err != nil { + return false, err + } + defer file.Seek(currentPos, io.SeekStart) + + buf := make([]byte, 4) + if n, err := file.Read(buf); err != nil || n < 4 { + return false, nil + } + return bytes.Equal(buf, []byte("PK\x03\x04")), nil +} + +func (z *Zip) String() string { return "zip" } + +// NewZip returns a new, default instance ready to be customized and used. +func NewZip() *Zip { + return &Zip{ + CompressionLevel: flate.DefaultCompression, + MkdirAll: true, + SelectiveCompression: true, + } +} + +// Compile-time checks to ensure type implements desired interfaces. +var ( + _ = Reader(new(Zip)) + _ = Writer(new(Zip)) + _ = Archiver(new(Zip)) + _ = Unarchiver(new(Zip)) + _ = Walker(new(Zip)) + _ = Extractor(new(Zip)) + _ = Matcher(new(Zip)) + _ = ExtensionChecker(new(Zip)) +) + +// compressedFormats is a (non-exhaustive) set of lowercased +// file extensions for formats that are typically already +// compressed. Compressing files that are already compressed +// is inefficient, so use this set of extension to avoid that. +var compressedFormats = map[string]struct{}{ + ".7z": {}, + ".avi": {}, + ".br": {}, + ".bz2": {}, + ".cab": {}, + ".docx": {}, + ".gif": {}, + ".gz": {}, + ".jar": {}, + ".jpeg": {}, + ".jpg": {}, + ".lz": {}, + ".lz4": {}, + ".lzma": {}, + ".m4v": {}, + ".mov": {}, + ".mp3": {}, + ".mp4": {}, + ".mpeg": {}, + ".mpg": {}, + ".png": {}, + ".pptx": {}, + ".rar": {}, + ".sz": {}, + ".tbz2": {}, + ".tgz": {}, + ".tsz": {}, + ".txz": {}, + ".xlsx": {}, + ".xz": {}, + ".zip": {}, + ".zipx": {}, +} + +// DefaultZip is a default instance that is conveniently ready to use. +var DefaultZip = NewZip() diff --git a/vendor/github.com/nwaples/rardecode/LICENSE b/vendor/github.com/nwaples/rardecode/LICENSE new file mode 100644 index 00000000..0050f92d --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Nicholas Waples +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/nwaples/rardecode/README.md b/vendor/github.com/nwaples/rardecode/README.md new file mode 100644 index 00000000..513464c2 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/README.md @@ -0,0 +1,4 @@ +# rardecode +[![GoDoc](https://godoc.org/github.com/nwaples/rardecode?status.svg)](https://godoc.org/github.com/nwaples/rardecode) + +A go package for reading RAR archives. diff --git a/vendor/github.com/nwaples/rardecode/archive.go b/vendor/github.com/nwaples/rardecode/archive.go new file mode 100644 index 00000000..34e4ac28 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive.go @@ -0,0 +1,342 @@ +package rardecode + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +const ( + maxSfxSize = 0x100000 // maximum number of bytes to read when searching for RAR signature + sigPrefix = "Rar!\x1A\x07" + + fileFmt15 = iota + 1 // Version 1.5 archive file format + fileFmt50 // Version 5.0 archive file format +) + +var ( + errNoSig = errors.New("rardecode: RAR signature not found") + errVerMismatch = errors.New("rardecode: volume version mistmatch") + errCorruptHeader = errors.New("rardecode: corrupt block header") + errCorruptFileHeader = errors.New("rardecode: corrupt file header") + errBadHeaderCrc = errors.New("rardecode: bad header crc") + errUnknownArc = errors.New("rardecode: unknown archive version") + errUnknownDecoder = errors.New("rardecode: unknown decoder version") + errUnsupportedDecoder = errors.New("rardecode: unsupported decoder version") + errArchiveContinues = errors.New("rardecode: archive continues in next volume") + errArchiveEnd = errors.New("rardecode: archive end reached") + errDecoderOutOfData = errors.New("rardecode: decoder expected more data than is in packed file") + + reDigits = regexp.MustCompile(`\d+`) +) + +type readBuf []byte + +func (b *readBuf) byte() byte { + v := (*b)[0] + *b = (*b)[1:] + return v +} + +func (b *readBuf) uint16() uint16 { + v := uint16((*b)[0]) | uint16((*b)[1])<<8 + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := uint32((*b)[0]) | uint32((*b)[1])<<8 | uint32((*b)[2])<<16 | uint32((*b)[3])<<24 + *b = (*b)[4:] + return v +} + +func (b *readBuf) bytes(n int) []byte { + v := (*b)[:n] + *b = (*b)[n:] + return v +} + +func (b *readBuf) uvarint() uint64 { + var x uint64 + var s uint + for i, n := range *b { + if n < 0x80 { + *b = (*b)[i+1:] + return x | uint64(n)< 1 { + // More than 1 match so assume name.part###of###.rar style. + // Take the last 2 matches where the first is the volume number. + m = m[l-2 : l] + if strings.Contains(file[m[0][1]:m[1][0]], ".") || !strings.Contains(file[:m[0][0]], ".") { + // Didn't match above style as volume had '.' between the two numbers or didnt have a '.' + // before the first match. Use the second number as volume number. + m = m[1:] + } + } + // extract and increment volume number + lo, hi := m[0][0], m[0][1] + n, err := strconv.Atoi(file[lo:hi]) + if err != nil { + n = 0 + } else { + n++ + } + // volume number must use at least the same number of characters as previous volume + vol := fmt.Sprintf("%0"+fmt.Sprint(hi-lo)+"d", n) + file = file[:lo] + vol + file[hi:] + return file +} + +func nextOldVolName(file string) string { + // old style volume naming + i := strings.LastIndex(file, ".") + // For old style naming if 2nd and 3rd character of file extension is not a digit replace + // with "00" and ignore any trailing characters. + if len(file) < i+4 || file[i+2] < '0' || file[i+2] > '9' || file[i+3] < '0' || file[i+3] > '9' { + file = file[:i+2] + "00" + return file + } + // get file extension + b := []byte(file[i+1:]) + // start incrementing volume number digits from rightmost + for j := 2; j >= 0; j-- { + if b[j] != '9' { + b[j]++ + break + } + // digit overflow + if j == 0 { + // last character before '.' + b[j] = 'A' + } else { + // set to '0' and loop to next character + b[j] = '0' + } + } + file = file[:i+1] + string(b) + return file +} + +// openNextFile opens the next volume file in the archive. +func (v *volume) openNextFile() error { + file := v.file + if v.num == 0 { + // check file extensions + i := strings.LastIndex(file, ".") + if i < 0 { + // no file extension, add one + file += ".rar" + } else { + ext := strings.ToLower(file[i+1:]) + // replace with .rar for empty extensions & self extracting archives + if ext == "" || ext == "exe" || ext == "sfx" { + file = file[:i+1] + "rar" + } + } + if a, ok := v.fileBlockReader.(*archive15); ok { + v.old = a.old + } + // new naming scheme must have volume number in filename + if !v.old { + if reDigits.FindStringIndex(file) != nil { + // found digits, try using new naming scheme + err := v.openFile(nextNewVolName(file)) + if err != nil && os.IsNotExist(err) { + // file didn't exist, try old naming scheme + oldErr := v.openFile(nextOldVolName(file)) + if oldErr == nil || !os.IsNotExist(err) { + v.old = true + return oldErr + } + } + return err + } + v.old = true + } + } + // new style volume naming + if !v.old { + file = nextNewVolName(file) + } else { + file = nextOldVolName(file) + } + return v.openFile(file) +} + +func (v *volume) next() (*fileBlockHeader, error) { + for { + var atEOF bool + + h, err := v.fileBlockReader.next() + switch err { + case errArchiveContinues: + case io.EOF: + // Read all of volume without finding an end block. The only way + // to tell if the archive continues is to try to open the next volume. + atEOF = true + default: + return h, err + } + + v.f.Close() + err = v.openNextFile() // Open next volume file + if err != nil { + if atEOF && os.IsNotExist(err) { + // volume not found so assume that the archive has ended + return nil, io.EOF + } + return nil, err + } + v.num++ + v.br.Reset(v.f) + ver, err := findSig(v.br) + if err != nil { + return nil, err + } + if v.version() != ver { + return nil, errVerMismatch + } + v.files = append(v.files, v.dir+v.file) + v.reset() // reset encryption + } +} + +func (v *volume) Close() error { + // may be nil if os.Open fails in next() + if v.f == nil { + return nil + } + return v.f.Close() +} + +func openVolume(name, password string) (*volume, error) { + var err error + v := new(volume) + v.dir, v.file = filepath.Split(name) + v.f, err = os.Open(name) + if err != nil { + return nil, err + } + v.br = bufio.NewReader(v.f) + v.fileBlockReader, err = newFileBlockReader(v.br, password) + if err != nil { + v.f.Close() + return nil, err + } + v.files = append(v.files, name) + return v, nil +} + +func newFileBlockReader(br *bufio.Reader, pass string) (fileBlockReader, error) { + runes := []rune(pass) + if len(runes) > maxPassword { + pass = string(runes[:maxPassword]) + } + ver, err := findSig(br) + if err != nil { + return nil, err + } + switch ver { + case fileFmt15: + return newArchive15(br, pass), nil + case fileFmt50: + return newArchive50(br, pass), nil + } + return nil, errUnknownArc +} diff --git a/vendor/github.com/nwaples/rardecode/archive15.go b/vendor/github.com/nwaples/rardecode/archive15.go new file mode 100644 index 00000000..260176c0 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive15.go @@ -0,0 +1,468 @@ +package rardecode + +import ( + "bufio" + "bytes" + "crypto/sha1" + "errors" + "hash" + "hash/crc32" + "io" + "io/ioutil" + "strconv" + "strings" + "time" + "unicode/utf16" +) + +const ( + // block types + blockArc = 0x73 + blockFile = 0x74 + blockService = 0x7a + blockEnd = 0x7b + + // block flags + blockHasData = 0x8000 + + // archive block flags + arcVolume = 0x0001 + arcSolid = 0x0008 + arcNewNaming = 0x0010 + arcEncrypted = 0x0080 + + // file block flags + fileSplitBefore = 0x0001 + fileSplitAfter = 0x0002 + fileEncrypted = 0x0004 + fileSolid = 0x0010 + fileWindowMask = 0x00e0 + fileLargeData = 0x0100 + fileUnicode = 0x0200 + fileSalt = 0x0400 + fileVersion = 0x0800 + fileExtTime = 0x1000 + + // end block flags + endArcNotLast = 0x0001 + + saltSize = 8 // size of salt for calculating AES keys + cacheSize30 = 4 // number of AES keys to cache + hashRounds = 0x40000 +) + +var ( + errMultipleDecoders = errors.New("rardecode: multiple decoders in a single archive not supported") +) + +type blockHeader15 struct { + htype byte // block header type + flags uint16 + data readBuf // header data + dataSize int64 // size of extra block data +} + +// fileHash32 implements fileChecksum for 32-bit hashes +type fileHash32 struct { + hash.Hash32 // hash to write file contents to + sum uint32 // 32bit checksum for file +} + +func (h *fileHash32) valid() bool { + return h.sum == h.Sum32() +} + +// archive15 implements fileBlockReader for RAR 1.5 file format archives +type archive15 struct { + byteReader // reader for current block data + v *bufio.Reader // reader for current archive volume + dec decoder // current decoder + decVer byte // current decoder version + multi bool // archive is multi-volume + old bool // archive uses old naming scheme + solid bool // archive is a solid archive + encrypted bool + pass []uint16 // password in UTF-16 + checksum fileHash32 // file checksum + buf readBuf // temporary buffer + keyCache [cacheSize30]struct { // cache of previously calculated decryption keys + salt []byte + key []byte + iv []byte + } +} + +// Calculates the key and iv for AES decryption given a password and salt. +func calcAes30Params(pass []uint16, salt []byte) (key, iv []byte) { + p := make([]byte, 0, len(pass)*2+len(salt)) + for _, v := range pass { + p = append(p, byte(v), byte(v>>8)) + } + p = append(p, salt...) + + hash := sha1.New() + iv = make([]byte, 16) + s := make([]byte, 0, hash.Size()) + for i := 0; i < hashRounds; i++ { + hash.Write(p) + hash.Write([]byte{byte(i), byte(i >> 8), byte(i >> 16)}) + if i%(hashRounds/16) == 0 { + s = hash.Sum(s[:0]) + iv[i/(hashRounds/16)] = s[4*4+3] + } + } + key = hash.Sum(s[:0]) + key = key[:16] + + for k := key; len(k) >= 4; k = k[4:] { + k[0], k[1], k[2], k[3] = k[3], k[2], k[1], k[0] + } + return key, iv +} + +// parseDosTime converts a 32bit DOS time value to time.Time +func parseDosTime(t uint32) time.Time { + n := int(t) + sec := n & 0x1f << 1 + min := n >> 5 & 0x3f + hr := n >> 11 & 0x1f + day := n >> 16 & 0x1f + mon := time.Month(n >> 21 & 0x0f) + yr := n>>25&0x7f + 1980 + return time.Date(yr, mon, day, hr, min, sec, 0, time.Local) +} + +// decodeName decodes a non-unicode filename from a file header. +func decodeName(buf []byte) string { + i := bytes.IndexByte(buf, 0) + if i < 0 { + return string(buf) // filename is UTF-8 + } + + name := buf[:i] + encName := readBuf(buf[i+1:]) + if len(encName) < 2 { + return "" // invalid encoding + } + highByte := uint16(encName.byte()) << 8 + flags := encName.byte() + flagBits := 8 + var wchars []uint16 // decoded characters are UTF-16 + for len(wchars) < len(name) && len(encName) > 0 { + if flagBits == 0 { + flags = encName.byte() + flagBits = 8 + if len(encName) == 0 { + break + } + } + switch flags >> 6 { + case 0: + wchars = append(wchars, uint16(encName.byte())) + case 1: + wchars = append(wchars, uint16(encName.byte())|highByte) + case 2: + if len(encName) < 2 { + break + } + wchars = append(wchars, encName.uint16()) + case 3: + n := encName.byte() + b := name[len(wchars):] + if l := int(n&0x7f) + 2; l < len(b) { + b = b[:l] + } + if n&0x80 > 0 { + if len(encName) < 1 { + break + } + ec := encName.byte() + for _, c := range b { + wchars = append(wchars, uint16(c+ec)|highByte) + } + } else { + for _, c := range b { + wchars = append(wchars, uint16(c)) + } + } + } + flags <<= 2 + flagBits -= 2 + } + return string(utf16.Decode(wchars)) +} + +// readExtTimes reads and parses the optional extra time field from the file header. +func readExtTimes(f *fileBlockHeader, b *readBuf) { + if len(*b) < 2 { + return // invalid, not enough data + } + flags := b.uint16() + + ts := []*time.Time{&f.ModificationTime, &f.CreationTime, &f.AccessTime} + + for i, t := range ts { + n := flags >> uint((3-i)*4) + if n&0x8 == 0 { + continue + } + if i != 0 { // ModificationTime already read so skip + if len(*b) < 4 { + return // invalid, not enough data + } + *t = parseDosTime(b.uint32()) + } + if n&0x4 > 0 { + *t = t.Add(time.Second) + } + n &= 0x3 + if n == 0 { + continue + } + if len(*b) < int(n) { + return // invalid, not enough data + } + // add extra time data in 100's of nanoseconds + d := time.Duration(0) + for j := 3 - n; j < n; j++ { + d |= time.Duration(b.byte()) << (j * 8) + } + d *= 100 + *t = t.Add(d) + } +} + +func (a *archive15) getKeys(salt []byte) (key, iv []byte) { + // check cache of keys + for _, v := range a.keyCache { + if bytes.Equal(v.salt[:], salt) { + return v.key, v.iv + } + } + key, iv = calcAes30Params(a.pass, salt) + + // save a copy in the cache + copy(a.keyCache[1:], a.keyCache[:]) + a.keyCache[0].salt = append([]byte(nil), salt...) // copy so byte slice can be reused + a.keyCache[0].key = key + a.keyCache[0].iv = iv + + return key, iv +} + +func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) { + f := new(fileBlockHeader) + + f.first = h.flags&fileSplitBefore == 0 + f.last = h.flags&fileSplitAfter == 0 + + f.solid = h.flags&fileSolid > 0 + f.IsDir = h.flags&fileWindowMask == fileWindowMask + if !f.IsDir { + f.winSize = uint(h.flags&fileWindowMask)>>5 + 16 + } + + b := h.data + if len(b) < 21 { + return nil, errCorruptFileHeader + } + + f.PackedSize = h.dataSize + f.UnPackedSize = int64(b.uint32()) + f.HostOS = b.byte() + 1 + if f.HostOS > HostOSBeOS { + f.HostOS = HostOSUnknown + } + a.checksum.sum = b.uint32() + + f.ModificationTime = parseDosTime(b.uint32()) + unpackver := b.byte() // decoder version + method := b.byte() - 0x30 // decryption method + namesize := int(b.uint16()) + f.Attributes = int64(b.uint32()) + if h.flags&fileLargeData > 0 { + if len(b) < 8 { + return nil, errCorruptFileHeader + } + _ = b.uint32() // already read large PackedSize in readBlockHeader + f.UnPackedSize |= int64(b.uint32()) << 32 + f.UnKnownSize = f.UnPackedSize == -1 + } else if int32(f.UnPackedSize) == -1 { + f.UnKnownSize = true + f.UnPackedSize = -1 + } + if len(b) < namesize { + return nil, errCorruptFileHeader + } + name := b.bytes(namesize) + if h.flags&fileUnicode == 0 { + f.Name = string(name) + } else { + f.Name = decodeName(name) + } + // Rar 4.x uses '\' as file separator + f.Name = strings.Replace(f.Name, "\\", "/", -1) + + if h.flags&fileVersion > 0 { + // file version is stored as ';n' appended to file name + i := strings.LastIndex(f.Name, ";") + if i > 0 { + j, err := strconv.Atoi(f.Name[i+1:]) + if err == nil && j >= 0 { + f.Version = j + f.Name = f.Name[:i] + } + } + } + + var salt []byte + if h.flags&fileSalt > 0 { + if len(b) < saltSize { + return nil, errCorruptFileHeader + } + salt = b.bytes(saltSize) + } + if h.flags&fileExtTime > 0 { + readExtTimes(f, &b) + } + + if !f.first { + return f, nil + } + // fields only needed for first block in a file + if h.flags&fileEncrypted > 0 && len(salt) == saltSize { + f.key, f.iv = a.getKeys(salt) + } + a.checksum.Reset() + f.cksum = &a.checksum + if method == 0 { + return f, nil + } + if a.dec == nil { + switch unpackver { + case 15, 20, 26: + return nil, errUnsupportedDecoder + case 29: + a.dec = new(decoder29) + default: + return nil, errUnknownDecoder + } + a.decVer = unpackver + } else if a.decVer != unpackver { + return nil, errMultipleDecoders + } + f.decoder = a.dec + return f, nil +} + +// readBlockHeader returns the next block header in the archive. +// It will return io.EOF if there were no bytes read. +func (a *archive15) readBlockHeader() (*blockHeader15, error) { + var err error + b := a.buf[:7] + r := io.Reader(a.v) + if a.encrypted { + salt := a.buf[:saltSize] + _, err = io.ReadFull(r, salt) + if err != nil { + return nil, err + } + key, iv := a.getKeys(salt) + r = newAesDecryptReader(r, key, iv) + err = readFull(r, b) + } else { + _, err = io.ReadFull(r, b) + } + if err != nil { + return nil, err + } + + crc := b.uint16() + hash := crc32.NewIEEE() + hash.Write(b) + h := new(blockHeader15) + h.htype = b.byte() + h.flags = b.uint16() + size := b.uint16() + if size < 7 { + return nil, errCorruptHeader + } + size -= 7 + if int(size) > cap(a.buf) { + a.buf = readBuf(make([]byte, size)) + } + h.data = a.buf[:size] + if err := readFull(r, h.data); err != nil { + return nil, err + } + hash.Write(h.data) + if crc != uint16(hash.Sum32()) { + return nil, errBadHeaderCrc + } + if h.flags&blockHasData > 0 { + if len(h.data) < 4 { + return nil, errCorruptHeader + } + h.dataSize = int64(h.data.uint32()) + } + if (h.htype == blockService || h.htype == blockFile) && h.flags&fileLargeData > 0 { + if len(h.data) < 25 { + return nil, errCorruptHeader + } + b := h.data[21:25] + h.dataSize |= int64(b.uint32()) << 32 + } + return h, nil +} + +// next advances to the next file block in the archive +func (a *archive15) next() (*fileBlockHeader, error) { + for { + // could return an io.EOF here as 1.5 archives may not have an end block. + h, err := a.readBlockHeader() + if err != nil { + return nil, err + } + a.byteReader = limitByteReader(a.v, h.dataSize) // reader for block data + + switch h.htype { + case blockFile: + return a.parseFileHeader(h) + case blockArc: + a.encrypted = h.flags&arcEncrypted > 0 + a.multi = h.flags&arcVolume > 0 + a.old = h.flags&arcNewNaming == 0 + a.solid = h.flags&arcSolid > 0 + case blockEnd: + if h.flags&endArcNotLast == 0 || !a.multi { + return nil, errArchiveEnd + } + return nil, errArchiveContinues + default: + _, err = io.Copy(ioutil.Discard, a.byteReader) + } + if err != nil { + return nil, err + } + } +} + +func (a *archive15) version() int { return fileFmt15 } + +func (a *archive15) reset() { + a.encrypted = false // reset encryption when opening new volume file +} + +func (a *archive15) isSolid() bool { + return a.solid +} + +// newArchive15 creates a new fileBlockReader for a Version 1.5 archive +func newArchive15(r *bufio.Reader, password string) fileBlockReader { + a := new(archive15) + a.v = r + a.pass = utf16.Encode([]rune(password)) // convert to UTF-16 + a.checksum.Hash32 = crc32.NewIEEE() + a.buf = readBuf(make([]byte, 100)) + return a +} diff --git a/vendor/github.com/nwaples/rardecode/archive50.go b/vendor/github.com/nwaples/rardecode/archive50.go new file mode 100644 index 00000000..1d8f850d --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/archive50.go @@ -0,0 +1,475 @@ +package rardecode + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/sha256" + "errors" + "hash" + "hash/crc32" + "io" + "io/ioutil" + "time" +) + +const ( + // block types + block5Arc = 1 + block5File = 2 + block5Service = 3 + block5Encrypt = 4 + block5End = 5 + + // block flags + block5HasExtra = 0x0001 + block5HasData = 0x0002 + block5DataNotFirst = 0x0008 + block5DataNotLast = 0x0010 + + // end block flags + endArc5NotLast = 0x0001 + + // archive encryption block flags + enc5CheckPresent = 0x0001 // password check data is present + + // main archive block flags + arc5MultiVol = 0x0001 + arc5Solid = 0x0004 + + // file block flags + file5IsDir = 0x0001 + file5HasUnixMtime = 0x0002 + file5HasCRC32 = 0x0004 + file5UnpSizeUnknown = 0x0008 + + // file encryption record flags + file5EncCheckPresent = 0x0001 // password check data is present + file5EncUseMac = 0x0002 // use MAC instead of plain checksum + + cacheSize50 = 4 + maxPbkdf2Salt = 64 + pwCheckSize = 8 + maxKdfCount = 24 + + minHeaderSize = 7 +) + +var ( + errBadPassword = errors.New("rardecode: incorrect password") + errCorruptEncrypt = errors.New("rardecode: corrupt encryption data") + errUnknownEncMethod = errors.New("rardecode: unknown encryption method") +) + +type extra struct { + ftype uint64 // field type + data readBuf // field data +} + +type blockHeader50 struct { + htype uint64 // block type + flags uint64 + data readBuf // block header data + extra []extra // extra fields + dataSize int64 // size of block data +} + +// leHash32 wraps a hash.Hash32 to return the result of Sum in little +// endian format. +type leHash32 struct { + hash.Hash32 +} + +func (h leHash32) Sum(b []byte) []byte { + s := h.Sum32() + return append(b, byte(s), byte(s>>8), byte(s>>16), byte(s>>24)) +} + +func newLittleEndianCRC32() hash.Hash32 { + return leHash32{crc32.NewIEEE()} +} + +// hash50 implements fileChecksum for RAR 5 archives +type hash50 struct { + hash.Hash // hash file data is written to + sum []byte // file checksum + key []byte // if present used with hmac in calculating checksum from hash +} + +func (h *hash50) valid() bool { + sum := h.Sum(nil) + if len(h.key) > 0 { + mac := hmac.New(sha256.New, h.key) + mac.Write(sum) + sum = mac.Sum(sum[:0]) + if len(h.sum) == 4 { + // CRC32 + for i, v := range sum[4:] { + sum[i&3] ^= v + } + sum = sum[:4] + } + } + return bytes.Equal(sum, h.sum) +} + +// archive50 implements fileBlockReader for RAR 5 file format archives +type archive50 struct { + byteReader // reader for current block data + v *bufio.Reader // reader for current archive volume + pass []byte + blockKey []byte // key used to encrypt blocks + multi bool // archive is multi-volume + solid bool // is a solid archive + checksum hash50 // file checksum + dec decoder // optional decoder used to unpack file + buf readBuf // temporary buffer + keyCache [cacheSize50]struct { // encryption key cache + kdfCount int + salt []byte + keys [][]byte + } +} + +// calcKeys50 calculates the keys used in RAR 5 archive processing. +// The returned slice of byte slices contains 3 keys. +// Key 0 is used for block or file decryption. +// Key 1 is optionally used for file checksum calculation. +// Key 2 is optionally used for password checking. +func calcKeys50(pass, salt []byte, kdfCount int) [][]byte { + if len(salt) > maxPbkdf2Salt { + salt = salt[:maxPbkdf2Salt] + } + keys := make([][]byte, 3) + if len(keys) == 0 { + return keys + } + + prf := hmac.New(sha256.New, pass) + prf.Write(salt) + prf.Write([]byte{0, 0, 0, 1}) + + t := prf.Sum(nil) + u := append([]byte(nil), t...) + + kdfCount-- + + for i, iter := range []int{kdfCount, 16, 16} { + for iter > 0 { + prf.Reset() + prf.Write(u) + u = prf.Sum(u[:0]) + for j := range u { + t[j] ^= u[j] + } + iter-- + } + keys[i] = append([]byte(nil), t...) + } + + pwcheck := keys[2] + for i, v := range pwcheck[pwCheckSize:] { + pwcheck[i&(pwCheckSize-1)] ^= v + } + keys[2] = pwcheck[:pwCheckSize] + + return keys +} + +// getKeys reads kdfcount and salt from b and returns the corresponding encryption keys. +func (a *archive50) getKeys(b *readBuf) (keys [][]byte, err error) { + if len(*b) < 17 { + return nil, errCorruptEncrypt + } + // read kdf count and salt + kdfCount := int(b.byte()) + if kdfCount > maxKdfCount { + return nil, errCorruptEncrypt + } + kdfCount = 1 << uint(kdfCount) + salt := b.bytes(16) + + // check cache of keys for match + for _, v := range a.keyCache { + if kdfCount == v.kdfCount && bytes.Equal(salt, v.salt) { + return v.keys, nil + } + } + // not found, calculate keys + keys = calcKeys50(a.pass, salt, kdfCount) + + // store in cache + copy(a.keyCache[1:], a.keyCache[:]) + a.keyCache[0].kdfCount = kdfCount + a.keyCache[0].salt = append([]byte(nil), salt...) + a.keyCache[0].keys = keys + + return keys, nil +} + +// checkPassword calculates if a password is correct given password check data and keys. +func checkPassword(b *readBuf, keys [][]byte) error { + if len(*b) < 12 { + return nil // not enough bytes, ignore for the moment + } + pwcheck := b.bytes(8) + sum := b.bytes(4) + csum := sha256.Sum256(pwcheck) + if bytes.Equal(sum, csum[:len(sum)]) && !bytes.Equal(pwcheck, keys[2]) { + return errBadPassword + } + return nil +} + +// parseFileEncryptionRecord processes the optional file encryption record from a file header. +func (a *archive50) parseFileEncryptionRecord(b readBuf, f *fileBlockHeader) error { + if ver := b.uvarint(); ver != 0 { + return errUnknownEncMethod + } + flags := b.uvarint() + + keys, err := a.getKeys(&b) + if err != nil { + return err + } + + f.key = keys[0] + if len(b) < 16 { + return errCorruptEncrypt + } + f.iv = b.bytes(16) + + if flags&file5EncCheckPresent > 0 { + if err := checkPassword(&b, keys); err != nil { + return err + } + } + if flags&file5EncUseMac > 0 { + a.checksum.key = keys[1] + } + return nil +} + +func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) { + a.checksum.sum = nil + a.checksum.key = nil + + f := new(fileBlockHeader) + + f.first = h.flags&block5DataNotFirst == 0 + f.last = h.flags&block5DataNotLast == 0 + + flags := h.data.uvarint() // file flags + f.IsDir = flags&file5IsDir > 0 + f.UnKnownSize = flags&file5UnpSizeUnknown > 0 + f.UnPackedSize = int64(h.data.uvarint()) + f.PackedSize = h.dataSize + f.Attributes = int64(h.data.uvarint()) + if flags&file5HasUnixMtime > 0 { + if len(h.data) < 4 { + return nil, errCorruptFileHeader + } + f.ModificationTime = time.Unix(int64(h.data.uint32()), 0) + } + if flags&file5HasCRC32 > 0 { + if len(h.data) < 4 { + return nil, errCorruptFileHeader + } + a.checksum.sum = append([]byte(nil), h.data.bytes(4)...) + if f.first { + a.checksum.Hash = newLittleEndianCRC32() + f.cksum = &a.checksum + } + } + + flags = h.data.uvarint() // compression flags + f.solid = flags&0x0040 > 0 + f.winSize = uint(flags&0x3C00)>>10 + 17 + method := (flags >> 7) & 7 // compression method (0 == none) + if f.first && method != 0 { + unpackver := flags & 0x003f + if unpackver != 0 { + return nil, errUnknownDecoder + } + if a.dec == nil { + a.dec = new(decoder50) + } + f.decoder = a.dec + } + switch h.data.uvarint() { + case 0: + f.HostOS = HostOSWindows + case 1: + f.HostOS = HostOSUnix + default: + f.HostOS = HostOSUnknown + } + nlen := int(h.data.uvarint()) + if len(h.data) < nlen { + return nil, errCorruptFileHeader + } + f.Name = string(h.data.bytes(nlen)) + + // parse optional extra records + for _, e := range h.extra { + var err error + switch e.ftype { + case 1: // encryption + err = a.parseFileEncryptionRecord(e.data, f) + case 2: + // TODO: hash + case 3: + // TODO: time + case 4: // version + _ = e.data.uvarint() // ignore flags field + f.Version = int(e.data.uvarint()) + case 5: + // TODO: redirection + case 6: + // TODO: owner + } + if err != nil { + return nil, err + } + } + return f, nil +} + +// parseEncryptionBlock calculates the key for block encryption. +func (a *archive50) parseEncryptionBlock(b readBuf) error { + if ver := b.uvarint(); ver != 0 { + return errUnknownEncMethod + } + flags := b.uvarint() + keys, err := a.getKeys(&b) + if err != nil { + return err + } + if flags&enc5CheckPresent > 0 { + if err := checkPassword(&b, keys); err != nil { + return err + } + } + a.blockKey = keys[0] + return nil +} + +func (a *archive50) readBlockHeader() (*blockHeader50, error) { + r := io.Reader(a.v) + if a.blockKey != nil { + // block is encrypted + iv := a.buf[:16] + if err := readFull(r, iv); err != nil { + return nil, err + } + r = newAesDecryptReader(r, a.blockKey, iv) + } + + b := a.buf[:minHeaderSize] + if err := readFull(r, b); err != nil { + return nil, err + } + crc := b.uint32() + + hash := crc32.NewIEEE() + hash.Write(b) + + size := int(b.uvarint()) // header size + if size > cap(a.buf) { + a.buf = readBuf(make([]byte, size)) + } else { + a.buf = a.buf[:size] + } + n := copy(a.buf, b) // copy left over bytes + if err := readFull(r, a.buf[n:]); err != nil { // read rest of header + return nil, err + } + + // check header crc + hash.Write(a.buf[n:]) + if crc != hash.Sum32() { + return nil, errBadHeaderCrc + } + + b = a.buf + h := new(blockHeader50) + h.htype = b.uvarint() + h.flags = b.uvarint() + + var extraSize int + if h.flags&block5HasExtra > 0 { + extraSize = int(b.uvarint()) + } + if h.flags&block5HasData > 0 { + h.dataSize = int64(b.uvarint()) + } + if len(b) < extraSize { + return nil, errCorruptHeader + } + h.data = b.bytes(len(b) - extraSize) + + // read header extra records + for len(b) > 0 { + size = int(b.uvarint()) + if len(b) < size { + return nil, errCorruptHeader + } + data := readBuf(b.bytes(size)) + ftype := data.uvarint() + h.extra = append(h.extra, extra{ftype, data}) + } + + return h, nil +} + +// next advances to the next file block in the archive +func (a *archive50) next() (*fileBlockHeader, error) { + for { + h, err := a.readBlockHeader() + if err != nil { + return nil, err + } + a.byteReader = limitByteReader(a.v, h.dataSize) + switch h.htype { + case block5File: + return a.parseFileHeader(h) + case block5Arc: + flags := h.data.uvarint() + a.multi = flags&arc5MultiVol > 0 + a.solid = flags&arc5Solid > 0 + case block5Encrypt: + err = a.parseEncryptionBlock(h.data) + case block5End: + flags := h.data.uvarint() + if flags&endArc5NotLast == 0 || !a.multi { + return nil, errArchiveEnd + } + return nil, errArchiveContinues + default: + // discard block data + _, err = io.Copy(ioutil.Discard, a.byteReader) + } + if err != nil { + return nil, err + } + } +} + +func (a *archive50) version() int { return fileFmt50 } + +func (a *archive50) reset() { + a.blockKey = nil // reset encryption when opening new volume file +} + +func (a *archive50) isSolid() bool { + return a.solid +} + +// newArchive50 creates a new fileBlockReader for a Version 5 archive. +func newArchive50(r *bufio.Reader, password string) fileBlockReader { + a := new(archive50) + a.v = r + a.pass = []byte(password) + a.buf = make([]byte, 100) + return a +} diff --git a/vendor/github.com/nwaples/rardecode/bit_reader.go b/vendor/github.com/nwaples/rardecode/bit_reader.go new file mode 100644 index 00000000..9b284efa --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/bit_reader.go @@ -0,0 +1,119 @@ +package rardecode + +import "io" + +type bitReader interface { + readBits(n uint) (int, error) // read n bits of data + unreadBits(n uint) // revert the reading of the last n bits read +} + +type limitedBitReader struct { + br bitReader + n int + err error // error to return if br returns EOF before all n bits have been read +} + +// limitBitReader returns a bitReader that reads from br and stops with io.EOF after n bits. +// If br returns an io.EOF before reading n bits, err is returned. +func limitBitReader(br bitReader, n int, err error) bitReader { + return &limitedBitReader{br, n, err} +} + +func (l *limitedBitReader) readBits(n uint) (int, error) { + if int(n) > l.n { + return 0, io.EOF + } + v, err := l.br.readBits(n) + if err == nil { + l.n -= int(n) + } else if err == io.EOF { + err = l.err + } + return v, err +} + +func (l *limitedBitReader) unreadBits(n uint) { + l.n += int(n) + l.br.unreadBits(n) +} + +// rarBitReader wraps an io.ByteReader to perform various bit and byte +// reading utility functions used in RAR file processing. +type rarBitReader struct { + r io.ByteReader + v int + n uint +} + +func (r *rarBitReader) reset(br io.ByteReader) { + r.r = br + r.n = 0 + r.v = 0 +} + +func (r *rarBitReader) readBits(n uint) (int, error) { + for n > r.n { + c, err := r.r.ReadByte() + if err != nil { + return 0, err + } + r.v = r.v<<8 | int(c) + r.n += 8 + } + r.n -= n + return (r.v >> r.n) & ((1 << n) - 1), nil +} + +func (r *rarBitReader) unreadBits(n uint) { + r.n += n +} + +// alignByte aligns the current bit reading input to the next byte boundary. +func (r *rarBitReader) alignByte() { + r.n -= r.n % 8 +} + +// readUint32 reads a RAR V3 encoded uint32 +func (r *rarBitReader) readUint32() (uint32, error) { + n, err := r.readBits(2) + if err != nil { + return 0, err + } + if n != 1 { + n, err = r.readBits(4 << uint(n)) + return uint32(n), err + } + n, err = r.readBits(4) + if err != nil { + return 0, err + } + if n == 0 { + n, err = r.readBits(8) + n |= -1 << 8 + return uint32(n), err + } + nlow, err := r.readBits(4) + n = n<<4 | nlow + return uint32(n), err +} + +func (r *rarBitReader) ReadByte() (byte, error) { + n, err := r.readBits(8) + return byte(n), err +} + +// readFull reads len(p) bytes into p. If fewer bytes are read an error is returned. +func (r *rarBitReader) readFull(p []byte) error { + for i := range p { + c, err := r.ReadByte() + if err != nil { + return err + } + p[i] = c + } + return nil +} + +func newRarBitReader(r io.ByteReader) *rarBitReader { + return &rarBitReader{r: r} +} diff --git a/vendor/github.com/nwaples/rardecode/decode29.go b/vendor/github.com/nwaples/rardecode/decode29.go new file mode 100644 index 00000000..638645e7 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29.go @@ -0,0 +1,264 @@ +package rardecode + +import ( + "bytes" + "errors" + "io" +) + +const ( + maxCodeSize = 0x10000 + maxUniqueFilters = 1024 +) + +var ( + // Errors marking the end of the decoding block and/or file + endOfFile = errors.New("rardecode: end of file") + endOfBlock = errors.New("rardecode: end of block") + endOfBlockAndFile = errors.New("rardecode: end of block and file") +) + +// decoder29 implements the decoder interface for RAR 3.0 compression (unpack version 29) +// Decode input is broken up into 1 or more blocks. The start of each block specifies +// the decoding algorithm (ppm or lz) and optional data to initialize with. +// Block length is not stored, it is determined only after decoding an end of file and/or +// block marker in the data. +type decoder29 struct { + br *rarBitReader + eof bool // at file eof + fnum int // current filter number (index into filters) + flen []int // filter block length history + filters []v3Filter // list of current filters used by archive encoding + + // current decode function (lz or ppm). + // When called it should perform a single decode operation, and either apply the + // data to the window or return they raw bytes for a filter. + decode func(w *window) ([]byte, error) + + lz lz29Decoder // lz decoder + ppm ppm29Decoder // ppm decoder +} + +// init intializes the decoder for decoding a new file. +func (d *decoder29) init(r io.ByteReader, reset bool) error { + if d.br == nil { + d.br = newRarBitReader(r) + } else { + d.br.reset(r) + } + d.eof = false + if reset { + d.initFilters() + d.lz.reset() + d.ppm.reset() + d.decode = nil + } + if d.decode == nil { + return d.readBlockHeader() + } + return nil +} + +func (d *decoder29) initFilters() { + d.fnum = 0 + d.flen = nil + d.filters = nil +} + +// readVMCode reads the raw bytes for the code/commands used in a vm filter +func readVMCode(br *rarBitReader) ([]byte, error) { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n > maxCodeSize || n == 0 { + return nil, errInvalidFilter + } + buf := make([]byte, n) + err = br.readFull(buf) + if err != nil { + return nil, err + } + var x byte + for _, c := range buf[1:] { + x ^= c + } + // simple xor checksum on data + if x != buf[0] { + return nil, errInvalidFilter + } + return buf, nil +} + +func (d *decoder29) parseVMFilter(buf []byte) (*filterBlock, error) { + flags := buf[0] + br := newRarBitReader(bytes.NewReader(buf[1:])) + fb := new(filterBlock) + + // Find the filter number which is an index into d.filters. + // If filter number == len(d.filters) it is a new filter to be added. + if flags&0x80 > 0 { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n == 0 { + d.initFilters() + fb.reset = true + } else { + n-- + if n > maxUniqueFilters { + return nil, errInvalidFilter + } + if int(n) > len(d.filters) { + return nil, errInvalidFilter + } + } + d.fnum = int(n) + } + + // filter offset + n, err := br.readUint32() + if err != nil { + return nil, err + } + if flags&0x40 > 0 { + n += 258 + } + fb.offset = int(n) + + // filter length + if d.fnum == len(d.flen) { + d.flen = append(d.flen, 0) + } + if flags&0x20 > 0 { + n, err = br.readUint32() + if err != nil { + return nil, err + } + //fb.length = int(n) + d.flen[d.fnum] = int(n) + } + fb.length = d.flen[d.fnum] + + // initial register values + r := make(map[int]uint32) + if flags&0x10 > 0 { + bits, err := br.readBits(vmRegs - 1) + if err != nil { + return nil, err + } + for i := 0; i < vmRegs-1; i++ { + if bits&1 > 0 { + r[i], err = br.readUint32() + if err != nil { + return nil, err + } + } + bits >>= 1 + } + } + + // filter is new so read the code for it + if d.fnum == len(d.filters) { + code, err := readVMCode(br) + if err != nil { + return nil, err + } + f, err := getV3Filter(code) + if err != nil { + return nil, err + } + d.filters = append(d.filters, f) + d.flen = append(d.flen, fb.length) + } + + // read global data + var g []byte + if flags&0x08 > 0 { + n, err := br.readUint32() + if err != nil { + return nil, err + } + if n > vmGlobalSize-vmFixedGlobalSize { + return nil, errInvalidFilter + } + g = make([]byte, n) + err = br.readFull(g) + if err != nil { + return nil, err + } + } + + // create filter function + f := d.filters[d.fnum] + fb.filter = func(buf []byte, offset int64) ([]byte, error) { + return f(r, g, buf, offset) + } + + return fb, nil +} + +// readBlockHeader determines and initializes the current decoder for a new decode block. +func (d *decoder29) readBlockHeader() error { + d.br.alignByte() + n, err := d.br.readBits(1) + if err == nil { + if n > 0 { + d.decode = d.ppm.decode + err = d.ppm.init(d.br) + } else { + d.decode = d.lz.decode + err = d.lz.init(d.br) + } + } + if err == io.EOF { + err = errDecoderOutOfData + } + return err + +} + +func (d *decoder29) fill(w *window) ([]*filterBlock, error) { + if d.eof { + return nil, io.EOF + } + + var fl []*filterBlock + + for w.available() > 0 { + b, err := d.decode(w) // perform a single decode operation + if len(b) > 0 && err == nil { + // parse raw data for filter and add to list of filters + var f *filterBlock + f, err = d.parseVMFilter(b) + if f != nil { + // make offset relative to read index (from write index) + f.offset += w.buffered() + fl = append(fl, f) + } + } + + switch err { + case nil: + continue + case endOfBlock: + err = d.readBlockHeader() + if err == nil { + continue + } + case endOfFile: + d.eof = true + err = io.EOF + case endOfBlockAndFile: + d.eof = true + d.decode = nil // clear decoder, it will be setup by next init() + err = io.EOF + case io.EOF: + err = errDecoderOutOfData + } + return fl, err + } + // return filters + return fl, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode29_lz.go b/vendor/github.com/nwaples/rardecode/decode29_lz.go new file mode 100644 index 00000000..94470853 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29_lz.go @@ -0,0 +1,247 @@ +package rardecode + +const ( + mainSize = 299 + offsetSize = 60 + lowOffsetSize = 17 + lengthSize = 28 + tableSize = mainSize + offsetSize + lowOffsetSize + lengthSize +) + +var ( + lengthBase = [28]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, + 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224} + lengthExtraBits = [28]uint{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, + 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5} + + offsetBase = [60]int{0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, + 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, + 6144, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, + 131072, 196608, 262144, 327680, 393216, 458752, 524288, + 589824, 655360, 720896, 786432, 851968, 917504, 983040, + 1048576, 1310720, 1572864, 1835008, 2097152, 2359296, 2621440, + 2883584, 3145728, 3407872, 3670016, 3932160} + offsetExtraBits = [60]uint{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, + 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18} + + shortOffsetBase = [8]int{0, 4, 8, 16, 32, 64, 128, 192} + shortOffsetExtraBits = [8]uint{2, 2, 3, 4, 5, 6, 6, 6} +) + +type lz29Decoder struct { + codeLength [tableSize]byte + + mainDecoder huffmanDecoder + offsetDecoder huffmanDecoder + lowOffsetDecoder huffmanDecoder + lengthDecoder huffmanDecoder + + offset [4]int // history of previous offsets + length int // previous length + lowOffset int + lowOffsetRepeats int + + br *rarBitReader +} + +func (d *lz29Decoder) reset() { + for i := range d.offset { + d.offset[i] = 0 + } + d.length = 0 + for i := range d.codeLength { + d.codeLength[i] = 0 + } +} + +func (d *lz29Decoder) init(br *rarBitReader) error { + d.br = br + d.lowOffset = 0 + d.lowOffsetRepeats = 0 + + n, err := d.br.readBits(1) + if err != nil { + return err + } + addOld := n > 0 + + cl := d.codeLength[:] + if err = readCodeLengthTable(d.br, cl, addOld); err != nil { + return err + } + + d.mainDecoder.init(cl[:mainSize]) + cl = cl[mainSize:] + d.offsetDecoder.init(cl[:offsetSize]) + cl = cl[offsetSize:] + d.lowOffsetDecoder.init(cl[:lowOffsetSize]) + cl = cl[lowOffsetSize:] + d.lengthDecoder.init(cl) + + return nil +} + +func (d *lz29Decoder) readFilterData() (b []byte, err error) { + flags, err := d.br.ReadByte() + if err != nil { + return nil, err + } + + n := (int(flags) & 7) + 1 + switch n { + case 7: + n, err = d.br.readBits(8) + n += 7 + if err != nil { + return nil, err + } + case 8: + n, err = d.br.readBits(16) + if err != nil { + return nil, err + } + } + + buf := make([]byte, n+1) + buf[0] = flags + err = d.br.readFull(buf[1:]) + + return buf, err +} + +func (d *lz29Decoder) readEndOfBlock() error { + n, err := d.br.readBits(1) + if err != nil { + return err + } + if n > 0 { + return endOfBlock + } + n, err = d.br.readBits(1) + if err != nil { + return err + } + if n > 0 { + return endOfBlockAndFile + } + return endOfFile +} + +func (d *lz29Decoder) decode(win *window) ([]byte, error) { + sym, err := d.mainDecoder.readSym(d.br) + if err != nil { + return nil, err + } + + switch { + case sym < 256: + // literal + win.writeByte(byte(sym)) + return nil, nil + case sym == 256: + return nil, d.readEndOfBlock() + case sym == 257: + return d.readFilterData() + case sym == 258: + // use previous offset and length + case sym < 263: + i := sym - 259 + offset := d.offset[i] + copy(d.offset[1:i+1], d.offset[:i]) + d.offset[0] = offset + + i, err := d.lengthDecoder.readSym(d.br) + if err != nil { + return nil, err + } + d.length = lengthBase[i] + 2 + bits := lengthExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + d.length += n + } + case sym < 271: + i := sym - 263 + copy(d.offset[1:], d.offset[:]) + offset := shortOffsetBase[i] + 1 + bits := shortOffsetExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + d.offset[0] = offset + + d.length = 2 + default: + i := sym - 271 + d.length = lengthBase[i] + 3 + bits := lengthExtraBits[i] + if bits > 0 { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + d.length += n + } + + i, err = d.offsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + offset := offsetBase[i] + 1 + bits = offsetExtraBits[i] + + switch { + case bits >= 4: + if bits > 4 { + n, err := d.br.readBits(bits - 4) + if err != nil { + return nil, err + } + offset += n << 4 + } + + if d.lowOffsetRepeats > 0 { + d.lowOffsetRepeats-- + offset += d.lowOffset + } else { + n, err := d.lowOffsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + if n == 16 { + d.lowOffsetRepeats = 15 + offset += d.lowOffset + } else { + offset += n + d.lowOffset = n + } + } + case bits > 0: + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + + if offset >= 0x2000 { + d.length++ + if offset >= 0x40000 { + d.length++ + } + } + copy(d.offset[1:], d.offset[:]) + d.offset[0] = offset + } + win.copyBytes(d.length, d.offset[0]) + return nil, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode29_ppm.go b/vendor/github.com/nwaples/rardecode/decode29_ppm.go new file mode 100644 index 00000000..39c31995 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode29_ppm.go @@ -0,0 +1,132 @@ +package rardecode + +import "io" + +type ppm29Decoder struct { + m model // ppm model + esc byte // escape character + br io.ByteReader +} + +func (d *ppm29Decoder) init(br *rarBitReader) error { + maxOrder, err := br.readBits(7) + if err != nil { + return err + } + reset := maxOrder&0x20 > 0 + + // Should have flushed all unread bits from bitReader by now, + // use underlying ByteReader + d.br = br.r + + var maxMB int + if reset { + c, err := d.br.ReadByte() + if err != nil { + return err + } + maxMB = int(c) + 1 + } + + if maxOrder&0x40 > 0 { + d.esc, err = d.br.ReadByte() + if err != nil { + return err + } + } + + maxOrder = (maxOrder & 0x1f) + 1 + if maxOrder > 16 { + maxOrder = 16 + (maxOrder-16)*3 + } + + return d.m.init(d.br, reset, maxOrder, maxMB) +} + +func (d *ppm29Decoder) reset() { + d.esc = 2 +} + +func (d *ppm29Decoder) readFilterData() ([]byte, error) { + c, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n := int(c&7) + 1 + if n == 7 { + b, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n += int(b) + } else if n == 8 { + b, err := d.m.ReadByte() + if err != nil { + return nil, err + } + n = int(b) << 8 + b, err = d.m.ReadByte() + if err != nil { + return nil, err + } + n |= int(b) + } + + n++ + buf := make([]byte, n) + buf[0] = byte(c) + for i := 1; i < n; i++ { + buf[i], err = d.m.ReadByte() + if err != nil { + return nil, err + } + } + return buf, nil +} + +func (d *ppm29Decoder) decode(w *window) ([]byte, error) { + c, err := d.m.ReadByte() + if err != nil { + return nil, err + } + if c != d.esc { + w.writeByte(c) + return nil, nil + } + c, err = d.m.ReadByte() + if err != nil { + return nil, err + } + + switch c { + case 0: + return nil, endOfBlock + case 2: + return nil, endOfBlockAndFile + case 3: + return d.readFilterData() + case 4: + offset := 0 + for i := 0; i < 3; i++ { + c, err = d.m.ReadByte() + if err != nil { + return nil, err + } + offset = offset<<8 | int(c) + } + len, err := d.m.ReadByte() + if err != nil { + return nil, err + } + w.copyBytes(int(len)+32, offset+2) + case 5: + len, err := d.m.ReadByte() + if err != nil { + return nil, err + } + w.copyBytes(int(len)+4, 1) + default: + w.writeByte(d.esc) + } + return nil, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode50.go b/vendor/github.com/nwaples/rardecode/decode50.go new file mode 100644 index 00000000..1939a444 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode50.go @@ -0,0 +1,294 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + mainSize5 = 306 + offsetSize5 = 64 + lowoffsetSize5 = 16 + lengthSize5 = 44 + tableSize5 = mainSize5 + offsetSize5 + lowoffsetSize5 + lengthSize5 +) + +var ( + errUnknownFilter = errors.New("rardecode: unknown V5 filter") + errCorruptDecodeHeader = errors.New("rardecode: corrupt decode header") +) + +// decoder50 implements the decoder interface for RAR 5 compression. +// Decode input it broken up into 1 or more blocks. Each block starts with +// a header containing block length and optional code length tables to initialize +// the huffman decoders with. +type decoder50 struct { + r io.ByteReader + br bitReader // bit reader for current data block + codeLength [tableSize5]byte + + lastBlock bool // current block is last block in compressed file + + mainDecoder huffmanDecoder + offsetDecoder huffmanDecoder + lowoffsetDecoder huffmanDecoder + lengthDecoder huffmanDecoder + + offset [4]int + length int +} + +func (d *decoder50) init(r io.ByteReader, reset bool) error { + d.r = r + d.lastBlock = false + + if reset { + for i := range d.offset { + d.offset[i] = 0 + } + d.length = 0 + for i := range d.codeLength { + d.codeLength[i] = 0 + } + } + err := d.readBlockHeader() + if err == io.EOF { + return errDecoderOutOfData + } + return err +} + +func (d *decoder50) readBlockHeader() error { + flags, err := d.r.ReadByte() + if err != nil { + return err + } + + bytecount := (flags>>3)&3 + 1 + if bytecount == 4 { + return errCorruptDecodeHeader + } + + hsum, err := d.r.ReadByte() + if err != nil { + return err + } + + blockBits := int(flags)&0x07 + 1 + blockBytes := 0 + sum := 0x5a ^ flags + for i := byte(0); i < bytecount; i++ { + n, err := d.r.ReadByte() + if err != nil { + return err + } + sum ^= n + blockBytes |= int(n) << (i * 8) + } + if sum != hsum { // bad header checksum + return errCorruptDecodeHeader + } + blockBits += (blockBytes - 1) * 8 + + // create bit reader for block + d.br = limitBitReader(newRarBitReader(d.r), blockBits, errDecoderOutOfData) + d.lastBlock = flags&0x40 > 0 + + if flags&0x80 > 0 { + // read new code length tables and reinitialize huffman decoders + cl := d.codeLength[:] + err = readCodeLengthTable(d.br, cl, false) + if err != nil { + return err + } + d.mainDecoder.init(cl[:mainSize5]) + cl = cl[mainSize5:] + d.offsetDecoder.init(cl[:offsetSize5]) + cl = cl[offsetSize5:] + d.lowoffsetDecoder.init(cl[:lowoffsetSize5]) + cl = cl[lowoffsetSize5:] + d.lengthDecoder.init(cl) + } + return nil +} + +func slotToLength(br bitReader, n int) (int, error) { + if n >= 8 { + bits := uint(n/4 - 1) + n = (4 | (n & 3)) << bits + if bits > 0 { + b, err := br.readBits(bits) + if err != nil { + return 0, err + } + n |= b + } + } + n += 2 + return n, nil +} + +// readFilter5Data reads an encoded integer used in V5 filters. +func readFilter5Data(br bitReader) (int, error) { + // TODO: should data really be uint? (for 32bit ints). + // It will be masked later anyway by decode window mask. + bytes, err := br.readBits(2) + if err != nil { + return 0, err + } + bytes++ + + var data int + for i := 0; i < bytes; i++ { + n, err := br.readBits(8) + if err != nil { + return 0, err + } + data |= n << (uint(i) * 8) + } + return data, nil +} + +func readFilter(br bitReader) (*filterBlock, error) { + fb := new(filterBlock) + var err error + + fb.offset, err = readFilter5Data(br) + if err != nil { + return nil, err + } + fb.length, err = readFilter5Data(br) + if err != nil { + return nil, err + } + ftype, err := br.readBits(3) + if err != nil { + return nil, err + } + switch ftype { + case 0: + n, err := br.readBits(5) + if err != nil { + return nil, err + } + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterDelta(n+1, buf) } + case 1: + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe8, true, buf, offset) } + case 2: + fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe9, true, buf, offset) } + case 3: + fb.filter = filterArm + default: + return nil, errUnknownFilter + } + return fb, nil +} + +func (d *decoder50) decodeSym(win *window, sym int) (*filterBlock, error) { + switch { + case sym < 256: + // literal + win.writeByte(byte(sym)) + return nil, nil + case sym == 256: + f, err := readFilter(d.br) + f.offset += win.buffered() + return f, err + case sym == 257: + // use previous offset and length + case sym < 262: + i := sym - 258 + offset := d.offset[i] + copy(d.offset[1:i+1], d.offset[:i]) + d.offset[0] = offset + + sl, err := d.lengthDecoder.readSym(d.br) + if err != nil { + return nil, err + } + d.length, err = slotToLength(d.br, sl) + if err != nil { + return nil, err + } + default: + length, err := slotToLength(d.br, sym-262) + if err != nil { + return nil, err + } + + offset := 1 + slot, err := d.offsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + if slot < 4 { + offset += slot + } else { + bits := uint(slot/2 - 1) + offset += (2 | (slot & 1)) << bits + + if bits >= 4 { + if bits > 4 { + n, err := d.br.readBits(bits - 4) + if err != nil { + return nil, err + } + offset += n << 4 + } + n, err := d.lowoffsetDecoder.readSym(d.br) + if err != nil { + return nil, err + } + offset += n + } else { + n, err := d.br.readBits(bits) + if err != nil { + return nil, err + } + offset += n + } + } + if offset > 0x100 { + length++ + if offset > 0x2000 { + length++ + if offset > 0x40000 { + length++ + } + } + } + copy(d.offset[1:], d.offset[:]) + d.offset[0] = offset + d.length = length + } + win.copyBytes(d.length, d.offset[0]) + return nil, nil +} + +func (d *decoder50) fill(w *window) ([]*filterBlock, error) { + var fl []*filterBlock + + for w.available() > 0 { + sym, err := d.mainDecoder.readSym(d.br) + if err == nil { + var f *filterBlock + f, err = d.decodeSym(w, sym) + if f != nil { + fl = append(fl, f) + } + } else if err == io.EOF { + // reached end of the block + if d.lastBlock { + return fl, io.EOF + } + err = d.readBlockHeader() + } + if err != nil { + if err == io.EOF { + return fl, errDecoderOutOfData + } + return fl, err + } + } + return fl, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decode_reader.go b/vendor/github.com/nwaples/rardecode/decode_reader.go new file mode 100644 index 00000000..36699f9a --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decode_reader.go @@ -0,0 +1,290 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + minWindowSize = 0x40000 + maxQueuedFilters = 8192 +) + +var ( + errTooManyFilters = errors.New("rardecode: too many filters") + errInvalidFilter = errors.New("rardecode: invalid filter") +) + +// filter functions take a byte slice, the current output offset and +// returns transformed data. +type filter func(b []byte, offset int64) ([]byte, error) + +// filterBlock is a block of data to be processed by a filter. +type filterBlock struct { + length int // length of block + offset int // bytes to be read before start of block + reset bool // drop all existing queued filters + filter filter // filter function +} + +// decoder is the interface for decoding compressed data +type decoder interface { + init(r io.ByteReader, reset bool) error // initialize decoder for current file + fill(w *window) ([]*filterBlock, error) // fill window with decoded data, returning any filters +} + +// window is a sliding window buffer. +type window struct { + buf []byte + mask int // buf length mask + r int // index in buf for reads (beginning) + w int // index in buf for writes (end) + l int // length of bytes to be processed by copyBytes + o int // offset of bytes to be processed by copyBytes +} + +// buffered returns the number of bytes yet to be read from window +func (w *window) buffered() int { return (w.w - w.r) & w.mask } + +// available returns the number of bytes that can be written before the window is full +func (w *window) available() int { return (w.r - w.w - 1) & w.mask } + +func (w *window) reset(log2size uint, clear bool) { + size := 1 << log2size + if size < minWindowSize { + size = minWindowSize + } + if size > len(w.buf) { + b := make([]byte, size) + if clear { + w.w = 0 + } else if len(w.buf) > 0 { + n := copy(b, w.buf[w.w:]) + n += copy(b[n:], w.buf[:w.w]) + w.w = n + } + w.buf = b + w.mask = size - 1 + } else if clear { + for i := range w.buf { + w.buf[i] = 0 + } + w.w = 0 + } + w.r = w.w +} + +// writeByte writes c to the end of the window +func (w *window) writeByte(c byte) { + w.buf[w.w] = c + w.w = (w.w + 1) & w.mask +} + +// copyBytes copies len bytes at off distance from the end +// to the end of the window. +func (w *window) copyBytes(len, off int) { + len &= w.mask + + n := w.available() + if len > n { + // if there is not enough space availaible we copy + // as much as we can and save the offset and length + // of the remaining data to be copied later. + w.l = len - n + w.o = off + len = n + } + + i := (w.w - off) & w.mask + for ; len > 0; len-- { + w.buf[w.w] = w.buf[i] + w.w = (w.w + 1) & w.mask + i = (i + 1) & w.mask + } +} + +// read reads bytes from the beginning of the window into p +func (w *window) read(p []byte) (n int) { + if w.r > w.w { + n = copy(p, w.buf[w.r:]) + w.r = (w.r + n) & w.mask + p = p[n:] + } + if w.r < w.w { + l := copy(p, w.buf[w.r:w.w]) + w.r += l + n += l + } + if w.l > 0 && n > 0 { + // if we have successfully read data, copy any + // leftover data from a previous copyBytes. + l := w.l + w.l = 0 + w.copyBytes(l, w.o) + } + return n +} + +// decodeReader implements io.Reader for decoding compressed data in RAR archives. +type decodeReader struct { + win window // sliding window buffer used as decode dictionary + dec decoder // decoder being used to unpack file + tot int64 // total bytes read + buf []byte // filter input/output buffer + outbuf []byte // filter output not yet read + err error + filters []*filterBlock // list of filterBlock's, each with offset relative to previous in list +} + +func (d *decodeReader) init(r io.ByteReader, dec decoder, winsize uint, reset bool) error { + if reset { + d.filters = nil + } + d.err = nil + d.outbuf = nil + d.tot = 0 + d.win.reset(winsize, reset) + d.dec = dec + return d.dec.init(r, reset) +} + +func (d *decodeReader) readErr() error { + err := d.err + d.err = nil + return err +} + +// queueFilter adds a filterBlock to the end decodeReader's filters. +func (d *decodeReader) queueFilter(f *filterBlock) error { + if f.reset { + d.filters = nil + } + if len(d.filters) >= maxQueuedFilters { + return errTooManyFilters + } + // make offset relative to previous filter in list + for _, fb := range d.filters { + if f.offset < fb.offset { + // filter block must not start before previous filter + return errInvalidFilter + } + f.offset -= fb.offset + } + // offset & length must be < window size + f.offset &= d.win.mask + f.length &= d.win.mask + d.filters = append(d.filters, f) + return nil +} + +// processFilters processes any filters valid at the current read index +// and stores the output in outbuf. +func (d *decodeReader) processFilters() (err error) { + f := d.filters[0] + if f.offset > 0 { + return nil + } + d.filters = d.filters[1:] + if d.win.buffered() < f.length { + // fill() didn't return enough bytes + err = d.readErr() + if err == nil || err == io.EOF { + return errInvalidFilter + } + return err + } + + if cap(d.buf) < f.length { + d.buf = make([]byte, f.length) + } + d.outbuf = d.buf[:f.length] + n := d.win.read(d.outbuf) + for { + // run filter passing buffer and total bytes read so far + d.outbuf, err = f.filter(d.outbuf, d.tot) + if err != nil { + return err + } + if cap(d.outbuf) > cap(d.buf) { + // Filter returned a bigger buffer, save it for future filters. + d.buf = d.outbuf + } + if len(d.filters) == 0 { + return nil + } + f = d.filters[0] + + if f.offset != 0 { + // next filter not at current offset + f.offset -= n + return nil + } + if f.length != len(d.outbuf) { + return errInvalidFilter + } + d.filters = d.filters[1:] + + if cap(d.outbuf) < cap(d.buf) { + // Filter returned a smaller buffer. Copy it back to the saved buffer + // so the next filter can make use of the larger buffer if needed. + d.outbuf = append(d.buf[:0], d.outbuf...) + } + } +} + +// fill fills the decodeReader's window +func (d *decodeReader) fill() { + if d.err != nil { + return + } + var fl []*filterBlock + fl, d.err = d.dec.fill(&d.win) // fill window using decoder + for _, f := range fl { + err := d.queueFilter(f) + if err != nil { + d.err = err + return + } + } +} + +// Read decodes data and stores it in p. +func (d *decodeReader) Read(p []byte) (n int, err error) { + if len(d.outbuf) == 0 { + // no filter output, see if we need to create more + if d.win.buffered() == 0 { + // fill empty window + d.fill() + if d.win.buffered() == 0 { + return 0, d.readErr() + } + } else if len(d.filters) > 0 { + f := d.filters[0] + if f.offset == 0 && f.length > d.win.buffered() { + d.fill() // filter at current offset needs more data + } + } + if len(d.filters) > 0 { + if err := d.processFilters(); err != nil { + return 0, err + } + } + } + if len(d.outbuf) > 0 { + // copy filter output into p + n = copy(p, d.outbuf) + d.outbuf = d.outbuf[n:] + } else if len(d.filters) > 0 { + f := d.filters[0] + if f.offset < len(p) { + // only read data up to beginning of next filter + p = p[:f.offset] + } + n = d.win.read(p) // read directly from window + f.offset -= n // adjust first filter offset by bytes just read + } else { + n = d.win.read(p) // read directly from window + } + d.tot += int64(n) + return n, nil +} diff --git a/vendor/github.com/nwaples/rardecode/decrypt_reader.go b/vendor/github.com/nwaples/rardecode/decrypt_reader.go new file mode 100644 index 00000000..bb9f279c --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/decrypt_reader.go @@ -0,0 +1,126 @@ +package rardecode + +import ( + "crypto/aes" + "crypto/cipher" + "io" +) + +// cipherBlockReader implements Block Mode decryption of an io.Reader object. +type cipherBlockReader struct { + r io.Reader + mode cipher.BlockMode + inbuf []byte // input buffer for partial data block + outbuf []byte // output buffer used when output slice < block size + n int // bytes read from outbuf + err error +} + +// read reads and decrypts one or more input blocks into p. +// len(p) must be >= cipher block size. +func (cr *cipherBlockReader) read(p []byte) (n int, err error) { + bs := cr.mode.BlockSize() + // round p down to a multiple of the block size + l := len(p) - len(p)%bs + p = p[:l] + + l = len(cr.inbuf) + if l > 0 { + // copy any buffered input into p + copy(p, cr.inbuf) + cr.inbuf = cr.inbuf[:0] + } + // read data for at least one block + n, err = io.ReadAtLeast(cr.r, p[l:], bs-l) + n += l + p = p[:n] + + l = n % bs + // check if p is a multiple of the cipher block size + if l > 0 { + n -= l + // save trailing partial block to process later + cr.inbuf = append(cr.inbuf, p[n:]...) + p = p[:n] + } + + if err != nil { + if err == io.ErrUnexpectedEOF || err == io.ErrShortBuffer { + // ignore trailing bytes < block size length + err = io.EOF + } + return 0, err + } + cr.mode.CryptBlocks(p, p) // decrypt block(s) + return n, nil +} + +// Read reads and decrypts data into p. +// If the input is not a multiple of the cipher block size, +// the trailing bytes will be ignored. +func (cr *cipherBlockReader) Read(p []byte) (n int, err error) { + for { + if cr.n < len(cr.outbuf) { + // return buffered output + n = copy(p, cr.outbuf[cr.n:]) + cr.n += n + return n, nil + } + if cr.err != nil { + err = cr.err + cr.err = nil + return 0, err + } + if len(p) >= cap(cr.outbuf) { + break + } + // p is not large enough to process a block, use outbuf instead + n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)]) + cr.outbuf = cr.outbuf[:n] + cr.n = 0 + } + // read blocks into p + return cr.read(p) +} + +// ReadByte returns the next decrypted byte. +func (cr *cipherBlockReader) ReadByte() (byte, error) { + for { + if cr.n < len(cr.outbuf) { + c := cr.outbuf[cr.n] + cr.n++ + return c, nil + } + if cr.err != nil { + err := cr.err + cr.err = nil + return 0, err + } + // refill outbuf + var n int + n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)]) + cr.outbuf = cr.outbuf[:n] + cr.n = 0 + } +} + +// newCipherBlockReader returns a cipherBlockReader that decrypts the given io.Reader using +// the provided block mode cipher. +func newCipherBlockReader(r io.Reader, mode cipher.BlockMode) *cipherBlockReader { + cr := &cipherBlockReader{r: r, mode: mode} + cr.outbuf = make([]byte, 0, mode.BlockSize()) + cr.inbuf = make([]byte, 0, mode.BlockSize()) + return cr +} + +// newAesDecryptReader returns a cipherBlockReader that decrypts input from a given io.Reader using AES. +// It will panic if the provided key is invalid. +func newAesDecryptReader(r io.Reader, key, iv []byte) *cipherBlockReader { + block, err := aes.NewCipher(key) + if err != nil { + panic(err) + } + mode := cipher.NewCBCDecrypter(block, iv) + + return newCipherBlockReader(r, mode) +} diff --git a/vendor/github.com/nwaples/rardecode/filters.go b/vendor/github.com/nwaples/rardecode/filters.go new file mode 100644 index 00000000..a9eb0407 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/filters.go @@ -0,0 +1,416 @@ +package rardecode + +import ( + "bytes" + "encoding/binary" + "hash/crc32" + "io" +) + +const ( + fileSize = 0x1000000 + + vmGlobalAddr = 0x3C000 + vmGlobalSize = 0x02000 + vmFixedGlobalSize = 0x40 + + maxUint32 = 1<<32 - 1 +) + +// v3Filter is the interface type for RAR V3 filters. +// v3Filter performs the same function as the filter type, except that it also takes +// the initial register values r, and global data as input for the RAR V3 VM. +type v3Filter func(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) + +var ( + // standardV3Filters is a list of known filters. We can replace the use of a vm + // filter with a custom filter function. + standardV3Filters = []struct { + crc uint32 // crc of code byte slice for filter + len int // length of code byte slice for filter + f v3Filter // replacement filter function + }{ + {0xad576887, 53, e8FilterV3}, + {0x3cd7e57e, 57, e8e9FilterV3}, + {0x3769893f, 120, itaniumFilterV3}, + {0x0e06077d, 29, deltaFilterV3}, + {0x1c2c5dc8, 149, filterRGBV3}, + {0xbc85e701, 216, filterAudioV3}, + } + + // itanium filter byte masks + byteMask = []int{4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0} +) + +func filterE8(c byte, v5 bool, buf []byte, offset int64) ([]byte, error) { + off := int32(offset) + for b := buf; len(b) >= 5; { + ch := b[0] + b = b[1:] + off++ + if ch != 0xe8 && ch != c { + continue + } + if v5 { + off %= fileSize + } + addr := int32(binary.LittleEndian.Uint32(b)) + if addr < 0 { + if addr+off >= 0 { + binary.LittleEndian.PutUint32(b, uint32(addr+fileSize)) + } + } else if addr < fileSize { + binary.LittleEndian.PutUint32(b, uint32(addr-off)) + } + off += 4 + b = b[4:] + } + return buf, nil +} + +func e8FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterE8(0xe8, false, buf, offset) +} + +func e8e9FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterE8(0xe9, false, buf, offset) +} + +func getBits(buf []byte, pos, count uint) uint32 { + n := binary.LittleEndian.Uint32(buf[pos/8:]) + n >>= pos & 7 + mask := uint32(maxUint32) >> (32 - count) + return n & mask +} + +func setBits(buf []byte, pos, count uint, bits uint32) { + mask := uint32(maxUint32) >> (32 - count) + mask <<= pos & 7 + bits <<= pos & 7 + n := binary.LittleEndian.Uint32(buf[pos/8:]) + n = (n & ^mask) | (bits & mask) + binary.LittleEndian.PutUint32(buf[pos/8:], n) +} + +func itaniumFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + fileOffset := uint32(offset) >> 4 + + for b := buf; len(b) > 21; b = b[16:] { + c := int(b[0]&0x1f) - 0x10 + if c >= 0 { + mask := byteMask[c] + if mask != 0 { + for i := uint(0); i <= 2; i++ { + if mask&(1<= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + i := 0 + for j := 0; j < n; j++ { + var c byte + for k := j; k < len(res); k += n { + c -= buf[i] + i++ + res[k] = c + } + } + return res, nil +} + +func deltaFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + return filterDelta(int(r[0]), buf) +} + +func abs(n int) int { + if n < 0 { + n = -n + } + return n +} + +func filterRGBV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + width := int(r[0] - 3) + posR := int(r[1]) + if posR < 0 || width < 0 { + return buf, nil + } + + var res []byte + l := len(buf) + if cap(buf) >= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + for c := 0; c < 3; c++ { + var prevByte int + for i := c; i < len(res); i += 3 { + var predicted int + upperPos := i - width + if upperPos >= 3 { + upperByte := int(res[upperPos]) + upperLeftByte := int(res[upperPos-3]) + predicted = prevByte + upperByte - upperLeftByte + pa := abs(predicted - prevByte) + pb := abs(predicted - upperByte) + pc := abs(predicted - upperLeftByte) + if pa <= pb && pa <= pc { + predicted = prevByte + } else if pb <= pc { + predicted = upperByte + } else { + predicted = upperLeftByte + } + } else { + predicted = prevByte + } + prevByte = (predicted - int(buf[0])) & 0xFF + res[i] = uint8(prevByte) + buf = buf[1:] + } + + } + for i := posR; i < len(res)-2; i += 3 { + c := res[i+1] + res[i] += c + res[i+2] += c + } + return res, nil +} + +func filterAudioV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + var res []byte + l := len(buf) + if cap(buf) >= 2*l { + res = buf[l : 2*l] // use unused capacity + } else { + res = make([]byte, l, 2*l) + } + + chans := int(r[0]) + for c := 0; c < chans; c++ { + var prevByte, byteCount int + var diff [7]int + var d, k [3]int + + for i := c; i < len(res); i += chans { + predicted := prevByte<<3 + k[0]*d[0] + k[1]*d[1] + k[2]*d[2] + predicted = int(int8(predicted >> 3)) + + curByte := int(int8(buf[0])) + buf = buf[1:] + predicted -= curByte + res[i] = uint8(predicted) + + dd := curByte << 3 + diff[0] += abs(dd) + diff[1] += abs(dd - d[0]) + diff[2] += abs(dd + d[0]) + diff[3] += abs(dd - d[1]) + diff[4] += abs(dd + d[1]) + diff[5] += abs(dd - d[2]) + diff[6] += abs(dd + d[2]) + + prevDelta := int(int8(predicted - prevByte)) + prevByte = predicted + d[2] = d[1] + d[1] = prevDelta - d[0] + d[0] = prevDelta + + if byteCount&0x1f == 0 { + min := diff[0] + diff[0] = 0 + n := 0 + for j := 1; j < len(diff); j++ { + if diff[j] < min { + min = diff[j] + n = j + } + diff[j] = 0 + } + n-- + if n >= 0 { + m := n / 2 + if n%2 == 0 { + if k[m] >= -16 { + k[m]-- + } + } else { + if k[m] < 16 { + k[m]++ + } + } + } + } + byteCount++ + } + + } + return res, nil +} + +func filterArm(buf []byte, offset int64) ([]byte, error) { + for i := 0; len(buf)-i > 3; i += 4 { + if buf[i+3] == 0xeb { + n := uint(buf[i]) + n += uint(buf[i+1]) * 0x100 + n += uint(buf[i+2]) * 0x10000 + n -= (uint(offset) + uint(i)) / 4 + buf[i] = byte(n) + buf[i+1] = byte(n >> 8) + buf[i+2] = byte(n >> 16) + } + } + return buf, nil +} + +type vmFilter struct { + execCount uint32 + global []byte + static []byte + code []command +} + +// execute implements v3filter type for VM based RAR 3 filters. +func (f *vmFilter) execute(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) { + if len(buf) > vmGlobalAddr { + return buf, errInvalidFilter + } + v := newVM(buf) + + // register setup + v.r[3] = vmGlobalAddr + v.r[4] = uint32(len(buf)) + v.r[5] = f.execCount + for i, n := range r { + v.r[i] = n + } + + // vm global data memory block + vg := v.m[vmGlobalAddr : vmGlobalAddr+vmGlobalSize] + + // initialize fixed global memory + for i, n := range v.r[:vmRegs-1] { + binary.LittleEndian.PutUint32(vg[i*4:], n) + } + binary.LittleEndian.PutUint32(vg[0x1c:], uint32(len(buf))) + binary.LittleEndian.PutUint64(vg[0x24:], uint64(offset)) + binary.LittleEndian.PutUint32(vg[0x2c:], f.execCount) + + // registers + v.r[6] = uint32(offset) + + // copy program global memory + var n int + if len(f.global) > 0 { + n = copy(vg[vmFixedGlobalSize:], f.global) // use saved global instead + } else { + n = copy(vg[vmFixedGlobalSize:], global) + } + copy(vg[vmFixedGlobalSize+n:], f.static) + + v.execute(f.code) + + f.execCount++ + + // keep largest global buffer + if cap(global) > cap(f.global) { + f.global = global[:0] + } else if len(f.global) > 0 { + f.global = f.global[:0] + } + + // check for global data to be saved for next program execution + globalSize := binary.LittleEndian.Uint32(vg[0x30:]) + if globalSize > 0 { + if globalSize > vmGlobalSize-vmFixedGlobalSize { + globalSize = vmGlobalSize - vmFixedGlobalSize + } + if cap(f.global) < int(globalSize) { + f.global = make([]byte, globalSize) + } else { + f.global = f.global[:globalSize] + } + copy(f.global, vg[vmFixedGlobalSize:]) + } + + // find program output + length := binary.LittleEndian.Uint32(vg[0x1c:]) & vmMask + start := binary.LittleEndian.Uint32(vg[0x20:]) & vmMask + if start+length > vmSize { + // TODO: error + start = 0 + length = 0 + } + if start != 0 && cap(v.m) > cap(buf) { + // Initial buffer was to small for vm. + // Copy output to beginning of vm memory so that decodeReader + // will re-use the newly allocated vm memory and we will not + // have to reallocate again next time. + copy(v.m, v.m[start:start+length]) + start = 0 + } + return v.m[start : start+length], nil +} + +// getV3Filter returns a V3 filter function from a code byte slice. +func getV3Filter(code []byte) (v3Filter, error) { + // check if filter is a known standard filter + c := crc32.ChecksumIEEE(code) + for _, f := range standardV3Filters { + if f.crc == c && f.len == len(code) { + return f.f, nil + } + } + + // create new vm filter + f := new(vmFilter) + r := newRarBitReader(bytes.NewReader(code[1:])) // skip first xor byte check + + // read static data + n, err := r.readBits(1) + if err != nil { + return nil, err + } + if n > 0 { + m, err := r.readUint32() + if err != nil { + return nil, err + } + f.static = make([]byte, m+1) + err = r.readFull(f.static) + if err != nil { + return nil, err + } + } + + f.code, err = readCommands(r) + if err == io.EOF { + err = nil + } + + return f.execute, err +} diff --git a/vendor/github.com/nwaples/rardecode/huffman.go b/vendor/github.com/nwaples/rardecode/huffman.go new file mode 100644 index 00000000..4acb69d5 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/huffman.go @@ -0,0 +1,208 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + maxCodeLength = 15 // maximum code length in bits + maxQuickBits = 10 + maxQuickSize = 1 << maxQuickBits +) + +var ( + errHuffDecodeFailed = errors.New("rardecode: huffman decode failed") + errInvalidLengthTable = errors.New("rardecode: invalid huffman code length table") +) + +type huffmanDecoder struct { + limit [maxCodeLength + 1]int + pos [maxCodeLength + 1]int + symbol []int + min uint + quickbits uint + quicklen [maxQuickSize]uint + quicksym [maxQuickSize]int +} + +func (h *huffmanDecoder) init(codeLengths []byte) { + var count [maxCodeLength + 1]int + + for _, n := range codeLengths { + if n == 0 { + continue + } + count[n]++ + } + + h.pos[0] = 0 + h.limit[0] = 0 + h.min = 0 + for i := uint(1); i <= maxCodeLength; i++ { + h.limit[i] = h.limit[i-1] + count[i]<<(maxCodeLength-i) + h.pos[i] = h.pos[i-1] + count[i-1] + if h.min == 0 && h.limit[i] > 0 { + h.min = i + } + } + + if cap(h.symbol) >= len(codeLengths) { + h.symbol = h.symbol[:len(codeLengths)] + for i := range h.symbol { + h.symbol[i] = 0 + } + } else { + h.symbol = make([]int, len(codeLengths)) + } + + copy(count[:], h.pos[:]) + for i, n := range codeLengths { + if n != 0 { + h.symbol[count[n]] = i + count[n]++ + } + } + + if len(codeLengths) >= 298 { + h.quickbits = maxQuickBits + } else { + h.quickbits = maxQuickBits - 3 + } + + bits := uint(1) + for i := 0; i < 1<= h.limit[bits] && bits < maxCodeLength { + bits++ + } + h.quicklen[i] = bits + + dist := v - h.limit[bits-1] + dist >>= (maxCodeLength - bits) + + pos := h.pos[bits] + dist + if pos < len(h.symbol) { + h.quicksym[i] = h.symbol[pos] + } else { + h.quicksym[i] = 0 + } + } +} + +func (h *huffmanDecoder) readSym(r bitReader) (int, error) { + bits := uint(maxCodeLength) + v, err := r.readBits(maxCodeLength) + if err != nil { + if err != io.EOF { + return 0, err + } + // fall back to 1 bit at a time if we read past EOF + for i := uint(1); i <= maxCodeLength; i++ { + b, err := r.readBits(1) + if err != nil { + return 0, err // not enough bits return error + } + v |= b << (maxCodeLength - i) + if v < h.limit[i] { + bits = i + break + } + } + } else { + if v < h.limit[h.quickbits] { + i := v >> (maxCodeLength - h.quickbits) + r.unreadBits(maxCodeLength - h.quicklen[i]) + return h.quicksym[i], nil + } + + for i, n := range h.limit[h.min:] { + if v < n { + bits = h.min + uint(i) + r.unreadBits(maxCodeLength - bits) + break + } + } + } + + dist := v - h.limit[bits-1] + dist >>= maxCodeLength - bits + + pos := h.pos[bits] + dist + if pos >= len(h.symbol) { + return 0, errHuffDecodeFailed + } + + return h.symbol[pos], nil +} + +// readCodeLengthTable reads a new code length table into codeLength from br. +// If addOld is set the old table is added to the new one. +func readCodeLengthTable(br bitReader, codeLength []byte, addOld bool) error { + var bitlength [20]byte + for i := 0; i < len(bitlength); i++ { + n, err := br.readBits(4) + if err != nil { + return err + } + if n == 0xf { + cnt, err := br.readBits(4) + if err != nil { + return err + } + if cnt > 0 { + // array already zero'd dont need to explicitly set + i += cnt + 1 + continue + } + } + bitlength[i] = byte(n) + } + + var bl huffmanDecoder + bl.init(bitlength[:]) + + for i := 0; i < len(codeLength); i++ { + l, err := bl.readSym(br) + if err != nil { + return err + } + + if l < 16 { + if addOld { + codeLength[i] = (codeLength[i] + byte(l)) & 0xf + } else { + codeLength[i] = byte(l) + } + continue + } + + var count int + var value byte + + switch l { + case 16, 18: + count, err = br.readBits(3) + count += 3 + default: + count, err = br.readBits(7) + count += 11 + } + if err != nil { + return err + } + if l < 18 { + if i == 0 { + return errInvalidLengthTable + } + value = codeLength[i-1] + } + for ; count > 0 && i < len(codeLength); i++ { + codeLength[i] = value + count-- + } + i-- + } + return nil +} diff --git a/vendor/github.com/nwaples/rardecode/ppm_model.go b/vendor/github.com/nwaples/rardecode/ppm_model.go new file mode 100644 index 00000000..58a545aa --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/ppm_model.go @@ -0,0 +1,1096 @@ +package rardecode + +import ( + "errors" + "io" +) + +const ( + rangeBottom = 1 << 15 + rangeTop = 1 << 24 + + maxFreq = 124 + + intBits = 7 + periodBits = 7 + binScale = 1 << (intBits + periodBits) + + n0 = 1 + n1 = 4 + n2 = 4 + n3 = 4 + n4 = (128 + 3 - 1*n1 - 2*n2 - 3*n3) / 4 + nIndexes = n0 + n1 + n2 + n3 + n4 + + // memory is allocated in units. A unit contains unitSize number of bytes. + // A unit can store one context or two states. + unitSize = 12 + + maxUint16 = 1<<16 - 1 + freeMark = -1 +) + +var ( + errCorruptPPM = errors.New("rardecode: corrupt ppm data") + + expEscape = []byte{25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} + initBinEsc = []uint16{0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051} + + ns2Index [256]byte + ns2BSIndex [256]byte + + // units2Index maps the number of units in a block to a freelist index + units2Index [128 + 1]byte + // index2Units maps a freelist index to the size of the block in units + index2Units [nIndexes]int32 +) + +func init() { + ns2BSIndex[0] = 2 * 0 + ns2BSIndex[1] = 2 * 1 + for i := 2; i < 11; i++ { + ns2BSIndex[i] = 2 * 2 + } + for i := 11; i < 256; i++ { + ns2BSIndex[i] = 2 * 3 + } + + var j, n byte + for i := range ns2Index { + ns2Index[i] = n + if j <= 3 { + n++ + j = n + } else { + j-- + } + } + + var ii byte + var iu, units int32 + for i, n := range []int{n0, n1, n2, n3, n4} { + for j := 0; j < n; j++ { + units += int32(i) + index2Units[ii] = units + for iu <= units { + units2Index[iu] = ii + iu++ + } + ii++ + } + } +} + +type rangeCoder struct { + br io.ByteReader + code uint32 + low uint32 + rnge uint32 +} + +func (r *rangeCoder) init(br io.ByteReader) error { + r.br = br + r.low = 0 + r.rnge = ^uint32(0) + for i := 0; i < 4; i++ { + c, err := r.br.ReadByte() + if err != nil { + return err + } + r.code = r.code<<8 | uint32(c) + } + return nil +} + +func (r *rangeCoder) currentCount(scale uint32) uint32 { + r.rnge /= scale + return (r.code - r.low) / r.rnge +} + +func (r *rangeCoder) normalize() error { + for { + if r.low^(r.low+r.rnge) >= rangeTop { + if r.rnge >= rangeBottom { + return nil + } + r.rnge = -r.low & (rangeBottom - 1) + } + c, err := r.br.ReadByte() + if err != nil { + return err + } + r.code = r.code<<8 | uint32(c) + r.rnge <<= 8 + r.low <<= 8 + } +} + +func (r *rangeCoder) decode(lowCount, highCount uint32) error { + r.low += r.rnge * lowCount + r.rnge *= highCount - lowCount + + return r.normalize() +} + +type see2Context struct { + summ uint16 + shift byte + count byte +} + +func newSee2Context(i uint16) see2Context { + return see2Context{i << (periodBits - 4), (periodBits - 4), 4} +} + +func (s *see2Context) mean() uint32 { + if s == nil { + return 1 + } + n := s.summ >> s.shift + if n == 0 { + return 1 + } + s.summ -= n + return uint32(n) +} + +func (s *see2Context) update() { + if s == nil || s.shift >= periodBits { + return + } + s.count-- + if s.count == 0 { + s.summ += s.summ + s.count = 3 << s.shift + s.shift++ + } +} + +type state struct { + sym byte + freq byte + + // succ can point to a context or byte in memory. + // A context pointer is a positive integer. It is an index into the states + // array that points to the first of two states which the context is + // marshalled into. + // A byte pointer is a negative integer. The magnitude represents the position + // in bytes from the bottom of the memory. As memory is modelled as an array of + // states, this is used to calculate which state, and where in the state the + // byte is stored. + // A zero value represents a nil pointer. + succ int32 +} + +// uint16 return a uint16 stored in the sym and freq fields of a state +func (s state) uint16() uint16 { return uint16(s.sym) | uint16(s.freq)<<8 } + +// setUint16 stores a uint16 in the sym and freq fields of a state +func (s *state) setUint16(n uint16) { s.sym = byte(n); s.freq = byte(n >> 8) } + +// A context is marshalled into a slice of two states. +// The first state contains the number of states, and the suffix pointer. +// If there is only one state, the second state contains that state. +// If there is more than one state, the second state contains the summFreq +// and the index to the slice of states. +type context struct { + i int32 // index into the states array for context + s []state // slice of two states representing context + a *subAllocator +} + +// succPtr returns a pointer value for the context to be stored in a state.succ +func (c *context) succPtr() int32 { return c.i } + +func (c *context) numStates() int { return int(c.s[0].uint16()) } + +func (c *context) setNumStates(n int) { c.s[0].setUint16(uint16(n)) } + +func (c *context) statesIndex() int32 { return c.s[1].succ } + +func (c *context) setStatesIndex(n int32) { c.s[1].succ = n } + +func (c *context) suffix() *context { return c.a.succContext(c.s[0].succ) } + +func (c *context) setSuffix(sc *context) { c.s[0].succ = sc.i } + +func (c *context) summFreq() uint16 { return c.s[1].uint16() } + +func (c *context) setSummFreq(f uint16) { c.s[1].setUint16(f) } + +func (c *context) notEq(ctx *context) bool { return c.i != ctx.i } + +func (c *context) states() []state { + if ns := int32(c.s[0].uint16()); ns != 1 { + i := c.s[1].succ + return c.a.states[i : i+ns] + } + return c.s[1:] +} + +// shrinkStates shrinks the state list down to size states +func (c *context) shrinkStates(states []state, size int) []state { + i1 := units2Index[(len(states)+1)>>1] + i2 := units2Index[(size+1)>>1] + + if size == 1 { + // store state in context, and free states block + n := c.statesIndex() + c.s[1] = states[0] + states = c.s[1:] + c.a.addFreeBlock(n, i1) + } else if i1 != i2 { + if n := c.a.removeFreeBlock(i2); n > 0 { + // allocate new block and copy + copy(c.a.states[n:], states[:size]) + states = c.a.states[n:] + // free old block + c.a.addFreeBlock(c.statesIndex(), i1) + c.setStatesIndex(n) + } else { + // split current block, and free units not needed + n = c.statesIndex() + index2Units[i2]<<1 + u := index2Units[i1] - index2Units[i2] + c.a.freeUnits(n, u) + } + } + c.setNumStates(size) + return states[:size] +} + +// expandStates expands the states list by one +func (c *context) expandStates() []state { + states := c.states() + ns := len(states) + if ns == 1 { + s := states[0] + n := c.a.allocUnits(1) + if n == 0 { + return nil + } + c.setStatesIndex(n) + states = c.a.states[n:] + states[0] = s + } else if ns&0x1 == 0 { + u := ns >> 1 + i1 := units2Index[u] + i2 := units2Index[u+1] + if i1 != i2 { + n := c.a.allocUnits(i2) + if n == 0 { + return nil + } + copy(c.a.states[n:], states) + c.a.addFreeBlock(c.statesIndex(), i1) + c.setStatesIndex(n) + states = c.a.states[n:] + } + } + c.setNumStates(ns + 1) + return states[:ns+1] +} + +type subAllocator struct { + // memory for allocation is split into two heaps + + heap1MaxBytes int32 // maximum bytes available in heap1 + heap1Lo int32 // heap1 bottom in number of bytes + heap1Hi int32 // heap1 top in number of bytes + heap2Lo int32 // heap2 bottom index in states + heap2Hi int32 // heap2 top index in states + glueCount int + + // Each freeList entry contains an index into states for the beginning + // of a free block. The first state in that block may contain an index + // to another free block and so on. The size of the free block in units + // (2 states) for that freeList index can be determined from the + // index2Units array. + freeList [nIndexes]int32 + + // Instead of bytes, memory is represented by a slice of states. + // context's are marshalled to and from a pair of states. + // multiple bytes are stored in a state. + states []state +} + +func (a *subAllocator) init(maxMB int) { + bytes := int32(maxMB) << 20 + heap2Units := bytes / 8 / unitSize * 7 + a.heap1MaxBytes = bytes - heap2Units*unitSize + // Add one for the case when bytes are not a multiple of unitSize + heap1Units := a.heap1MaxBytes/unitSize + 1 + // Calculate total size in state's. Add 1 unit so we can reserve the first unit. + // This will allow us to use the zero index as a nil pointer. + n := int(1+heap1Units+heap2Units) * 2 + if cap(a.states) > n { + a.states = a.states[:n] + } else { + a.states = make([]state, n) + } +} + +func (a *subAllocator) restart() { + // Pad heap1 start by 1 unit and enough bytes so that there is no + // gap between heap1 end and heap2 start. + a.heap1Lo = unitSize + (unitSize - a.heap1MaxBytes%unitSize) + a.heap1Hi = unitSize + (a.heap1MaxBytes/unitSize+1)*unitSize + a.heap2Lo = a.heap1Hi / unitSize * 2 + a.heap2Hi = int32(len(a.states)) + a.glueCount = 0 + for i := range a.freeList { + a.freeList[i] = 0 + } + for i := range a.states { + a.states[i] = state{} + } +} + +// pushByte puts a byte on the heap and returns a state.succ index that +// can be used to retrieve it. +func (a *subAllocator) pushByte(c byte) int32 { + si := a.heap1Lo / 6 // state index + oi := a.heap1Lo % 6 // byte position in state + switch oi { + case 0: + a.states[si].sym = c + case 1: + a.states[si].freq = c + default: + n := (uint(oi) - 2) * 8 + mask := ^(uint32(0xFF) << n) + succ := uint32(a.states[si].succ) & mask + succ |= uint32(c) << n + a.states[si].succ = int32(succ) + } + a.heap1Lo++ + if a.heap1Lo >= a.heap1Hi { + return 0 + } + return -a.heap1Lo +} + +// popByte reverses the previous pushByte +func (a *subAllocator) popByte() { a.heap1Lo-- } + +// succByte returns a byte from the heap given a state.succ index +func (a *subAllocator) succByte(i int32) byte { + i = -i + si := i / 6 + oi := i % 6 + switch oi { + case 0: + return a.states[si].sym + case 1: + return a.states[si].freq + default: + n := (uint(oi) - 2) * 8 + succ := uint32(a.states[si].succ) >> n + return byte(succ & 0xff) + } +} + +// succContext returns a context given a state.succ index +func (a *subAllocator) succContext(i int32) *context { + if i <= 0 { + return nil + } + return &context{i: i, s: a.states[i : i+2 : i+2], a: a} +} + +// succIsNil returns whether a state.succ points to nothing +func (a *subAllocator) succIsNil(i int32) bool { return i == 0 } + +// nextByteAddr takes a state.succ value representing a pointer +// to a byte, and returns the next bytes address +func (a *subAllocator) nextByteAddr(n int32) int32 { return n - 1 } + +func (a *subAllocator) removeFreeBlock(i byte) int32 { + n := a.freeList[i] + if n != 0 { + a.freeList[i] = a.states[n].succ + a.states[n] = state{} + } + return n +} + +func (a *subAllocator) addFreeBlock(n int32, i byte) { + a.states[n].succ = a.freeList[i] + a.freeList[i] = n +} + +func (a *subAllocator) freeUnits(n, u int32) { + i := units2Index[u] + if u != index2Units[i] { + i-- + a.addFreeBlock(n, i) + u -= index2Units[i] + n += index2Units[i] << 1 + i = units2Index[u] + } + a.addFreeBlock(n, i) +} + +func (a *subAllocator) glueFreeBlocks() { + var freeIndex int32 + + for i, n := range a.freeList { + s := state{succ: freeMark} + s.setUint16(uint16(index2Units[i])) + for n != 0 { + states := a.states[n:] + states[1].succ = freeIndex + freeIndex = n + n = states[0].succ + states[0] = s + } + a.freeList[i] = 0 + } + + for i := freeIndex; i != 0; i = a.states[i+1].succ { + if a.states[i].succ != freeMark { + continue + } + u := int32(a.states[i].uint16()) + states := a.states[i+u<<1:] + for len(states) > 0 && states[0].succ == freeMark { + u += int32(states[0].uint16()) + if u > maxUint16 { + break + } + states[0].succ = 0 + a.states[i].setUint16(uint16(u)) + states = a.states[i+u<<1:] + } + } + + for n := freeIndex; n != 0; n = a.states[n+1].succ { + if a.states[n].succ != freeMark { + continue + } + a.states[n].succ = 0 + u := int32(a.states[n].uint16()) + m := n + for u > 128 { + a.addFreeBlock(m, nIndexes-1) + u -= 128 + m += 256 + } + a.freeUnits(m, u) + } +} + +func (a *subAllocator) allocUnitsRare(index byte) int32 { + if a.glueCount == 0 { + a.glueCount = 255 + a.glueFreeBlocks() + if n := a.removeFreeBlock(index); n > 0 { + return n + } + } + // try to find a larger free block and split it + for i := index + 1; i < nIndexes; i++ { + if n := a.removeFreeBlock(i); n > 0 { + u := index2Units[i] - index2Units[index] + a.freeUnits(n+index2Units[index]<<1, u) + return n + } + } + a.glueCount-- + + // try to allocate units from the top of heap1 + n := a.heap1Hi - index2Units[index]*unitSize + if n > a.heap1Lo { + a.heap1Hi = n + return a.heap1Hi / unitSize * 2 + } + return 0 +} + +func (a *subAllocator) allocUnits(i byte) int32 { + // try to allocate a free block + if n := a.removeFreeBlock(i); n > 0 { + return n + } + // try to allocate from the bottom of heap2 + n := index2Units[i] << 1 + if a.heap2Lo+n <= a.heap2Hi { + lo := a.heap2Lo + a.heap2Lo += n + return lo + } + return a.allocUnitsRare(i) +} + +func (a *subAllocator) newContext(s state, suffix *context) *context { + var n int32 + if a.heap2Lo < a.heap2Hi { + // allocate from top of heap2 + a.heap2Hi -= 2 + n = a.heap2Hi + } else if n = a.removeFreeBlock(1); n == 0 { + if n = a.allocUnitsRare(1); n == 0 { + return nil + } + } + c := &context{i: n, s: a.states[n : n+2 : n+2], a: a} + c.s[0] = state{} + c.setNumStates(1) + c.s[1] = s + if suffix != nil { + c.setSuffix(suffix) + } + return c +} + +func (a *subAllocator) newContextSize(ns int) *context { + c := a.newContext(state{}, nil) + c.setNumStates(ns) + i := units2Index[(ns+1)>>1] + n := a.allocUnits(i) + c.setStatesIndex(n) + return c +} + +type model struct { + maxOrder int + orderFall int + initRL int + runLength int + prevSuccess byte + escCount byte + prevSym byte + initEsc byte + minC *context + maxC *context + rc rangeCoder + a subAllocator + charMask [256]byte + binSumm [128][64]uint16 + see2Cont [25][16]see2Context +} + +func (m *model) restart() { + for i := range m.charMask { + m.charMask[i] = 0 + } + m.escCount = 1 + + if m.maxOrder < 12 { + m.initRL = -m.maxOrder - 1 + } else { + m.initRL = -12 - 1 + } + m.orderFall = m.maxOrder + m.runLength = m.initRL + m.prevSuccess = 0 + + m.a.restart() + + c := m.a.newContextSize(256) + c.setSummFreq(257) + states := c.states() + for i := range states { + states[i] = state{sym: byte(i), freq: 1} + } + m.minC = c + m.maxC = c + m.prevSym = 0 + + for i := range m.binSumm { + for j, esc := range initBinEsc { + n := binScale - esc/(uint16(i)+2) + for k := j; k < len(m.binSumm[i]); k += len(initBinEsc) { + m.binSumm[i][k] = n + } + } + } + + for i := range m.see2Cont { + see := newSee2Context(5*uint16(i) + 10) + for j := range m.see2Cont[i] { + m.see2Cont[i][j] = see + } + } +} + +func (m *model) init(br io.ByteReader, reset bool, maxOrder, maxMB int) error { + err := m.rc.init(br) + if err != nil { + return err + } + if !reset { + if m.minC == nil { + return errCorruptPPM + } + return nil + } + + m.a.init(maxMB) + + if maxOrder == 1 { + return errCorruptPPM + } + m.maxOrder = maxOrder + m.restart() + return nil +} + +func (m *model) rescale(s *state) *state { + if s.freq <= maxFreq { + return s + } + c := m.minC + + var summFreq uint16 + + s.freq += 4 + states := c.states() + escFreq := c.summFreq() + 4 + + for i := range states { + f := states[i].freq + escFreq -= uint16(f) + if m.orderFall != 0 { + f++ + } + f >>= 1 + summFreq += uint16(f) + states[i].freq = f + + if i == 0 || f <= states[i-1].freq { + continue + } + j := i - 1 + for j > 0 && f > states[j-1].freq { + j-- + } + t := states[i] + copy(states[j+1:i+1], states[j:i]) + states[j] = t + } + + i := len(states) - 1 + for states[i].freq == 0 { + i-- + escFreq++ + } + if i != len(states)-1 { + states = c.shrinkStates(states, i+1) + } + s = &states[0] + if i == 0 { + for { + s.freq -= s.freq >> 1 + escFreq >>= 1 + if escFreq <= 1 { + return s + } + } + } + summFreq += escFreq - (escFreq >> 1) + c.setSummFreq(summFreq) + return s +} + +func (m *model) decodeBinSymbol() (*state, error) { + c := m.minC + s := &c.states()[0] + + ns := c.suffix().numStates() + i := m.prevSuccess + ns2BSIndex[ns-1] + byte(m.runLength>>26)&0x20 + if m.prevSym >= 64 { + i += 8 + } + if s.sym >= 64 { + i += 2 * 8 + } + bs := &m.binSumm[s.freq-1][i] + mean := (*bs + 1<<(periodBits-2)) >> periodBits + + if m.rc.currentCount(binScale) < uint32(*bs) { + err := m.rc.decode(0, uint32(*bs)) + if s.freq < 128 { + s.freq++ + } + *bs += 1<>10] + m.charMask[s.sym] = m.escCount + m.prevSuccess = 0 + return nil, err +} + +func (m *model) decodeSymbol1() (*state, error) { + c := m.minC + states := c.states() + scale := uint32(c.summFreq()) + // protect against divide by zero + // TODO: look at why this happens, may be problem elsewhere + if scale == 0 { + return nil, errCorruptPPM + } + count := m.rc.currentCount(scale) + m.prevSuccess = 0 + + var n uint32 + for i := range states { + s := &states[i] + n += uint32(s.freq) + if n <= count { + continue + } + err := m.rc.decode(n-uint32(s.freq), n) + s.freq += 4 + c.setSummFreq(uint16(scale + 4)) + if i == 0 { + if 2*n > scale { + m.prevSuccess = 1 + m.runLength++ + } + } else { + if s.freq <= states[i-1].freq { + return s, err + } + states[i-1], states[i] = states[i], states[i-1] + s = &states[i-1] + } + return m.rescale(s), err + } + + for _, s := range states { + m.charMask[s.sym] = m.escCount + } + return nil, m.rc.decode(n, scale) +} + +func (m *model) makeEscFreq(c *context, numMasked int) *see2Context { + ns := c.numStates() + if ns == 256 { + return nil + } + diff := ns - numMasked + + var i int + if m.prevSym >= 64 { + i = 8 + } + if diff < c.suffix().numStates()-ns { + i++ + } + if int(c.summFreq()) < 11*ns { + i += 2 + } + if numMasked > diff { + i += 4 + } + return &m.see2Cont[ns2Index[diff-1]][i] +} + +func (m *model) decodeSymbol2(numMasked int) (*state, error) { + c := m.minC + + see := m.makeEscFreq(c, numMasked) + scale := see.mean() + + var i int + var hi uint32 + states := c.states() + sl := make([]*state, len(states)-numMasked) + for j := range sl { + for m.charMask[states[i].sym] == m.escCount { + i++ + } + hi += uint32(states[i].freq) + sl[j] = &states[i] + i++ + } + + scale += hi + count := m.rc.currentCount(scale) + + if count >= scale { + return nil, errCorruptPPM + } + if count >= hi { + err := m.rc.decode(hi, scale) + if see != nil { + see.summ += uint16(scale) + } + for _, s := range sl { + m.charMask[s.sym] = m.escCount + } + return nil, err + } + + hi = uint32(sl[0].freq) + for hi <= count { + sl = sl[1:] + hi += uint32(sl[0].freq) + } + s := sl[0] + + err := m.rc.decode(hi-uint32(s.freq), hi) + + see.update() + + m.escCount++ + m.runLength = m.initRL + + s.freq += 4 + c.setSummFreq(c.summFreq() + 4) + return m.rescale(s), err +} + +func (c *context) findState(sym byte) *state { + var i int + states := c.states() + for i = range states { + if states[i].sym == sym { + break + } + } + return &states[i] +} + +func (m *model) createSuccessors(s, ss *state) *context { + var sl []*state + + if m.orderFall != 0 { + sl = append(sl, s) + } + + c := m.minC + for suff := c.suffix(); suff != nil; suff = c.suffix() { + c = suff + + if ss == nil { + ss = c.findState(s.sym) + } + if ss.succ != s.succ { + c = m.a.succContext(ss.succ) + break + } + sl = append(sl, ss) + ss = nil + } + + if len(sl) == 0 { + return c + } + + var up state + up.sym = m.a.succByte(s.succ) + up.succ = m.a.nextByteAddr(s.succ) + + states := c.states() + if len(states) > 1 { + s = c.findState(up.sym) + + cf := uint16(s.freq) - 1 + s0 := c.summFreq() - uint16(len(states)) - cf + + if 2*cf <= s0 { + if 5*cf > s0 { + up.freq = 2 + } else { + up.freq = 1 + } + } else { + up.freq = byte(1 + (2*cf+3*s0-1)/(2*s0)) + } + } else { + up.freq = states[0].freq + } + + for i := len(sl) - 1; i >= 0; i-- { + c = m.a.newContext(up, c) + if c == nil { + return nil + } + sl[i].succ = c.succPtr() + } + return c +} + +func (m *model) update(s *state) { + if m.orderFall == 0 { + if c := m.a.succContext(s.succ); c != nil { + m.minC = c + m.maxC = c + return + } + } + + if m.escCount == 0 { + m.escCount = 1 + for i := range m.charMask { + m.charMask[i] = 0 + } + } + + var ss *state // matching minC.suffix state + + if s.freq < maxFreq/4 && m.minC.suffix() != nil { + c := m.minC.suffix() + states := c.states() + + var i int + if len(states) > 1 { + for states[i].sym != s.sym { + i++ + } + if i > 0 && states[i].freq >= states[i-1].freq { + states[i-1], states[i] = states[i], states[i-1] + i-- + } + if states[i].freq < maxFreq-9 { + states[i].freq += 2 + c.setSummFreq(c.summFreq() + 2) + } + } else if states[0].freq < 32 { + states[0].freq++ + } + ss = &states[i] // save later for createSuccessors + } + + if m.orderFall == 0 { + c := m.createSuccessors(s, ss) + if c == nil { + m.restart() + } else { + m.minC = c + m.maxC = c + s.succ = c.succPtr() + } + return + } + + succ := m.a.pushByte(s.sym) + if m.a.succIsNil(succ) { + m.restart() + return + } + + var minC *context + if m.a.succIsNil(s.succ) { + s.succ = succ + minC = m.minC + } else { + minC = m.a.succContext(s.succ) + if minC == nil { + minC = m.createSuccessors(s, ss) + if minC == nil { + m.restart() + return + } + } + m.orderFall-- + if m.orderFall == 0 { + succ = minC.succPtr() + if m.maxC.notEq(m.minC) { + m.a.popByte() + } + } + } + + n := m.minC.numStates() + s0 := int(m.minC.summFreq()) - n - int(s.freq-1) + for c := m.maxC; c.notEq(m.minC); c = c.suffix() { + var summFreq uint16 + + states := c.expandStates() + if states == nil { + m.restart() + return + } + if ns := len(states) - 1; ns != 1 { + summFreq = c.summFreq() + if 4*ns <= n && int(summFreq) <= 8*ns { + summFreq += 2 + } + if 2*ns < n { + summFreq++ + } + } else { + p := &states[0] + if p.freq < maxFreq/4-1 { + p.freq += p.freq + } else { + p.freq = maxFreq - 4 + } + summFreq = uint16(p.freq) + uint16(m.initEsc) + if n > 3 { + summFreq++ + } + } + + cf := 2 * int(s.freq) * int(summFreq+6) + sf := s0 + int(summFreq) + var freq byte + if cf >= 6*sf { + switch { + case cf >= 15*sf: + freq = 7 + case cf >= 12*sf: + freq = 6 + case cf >= 9*sf: + freq = 5 + default: + freq = 4 + } + summFreq += uint16(freq) + } else { + switch { + case cf >= 4*sf: + freq = 3 + case cf > sf: + freq = 2 + default: + freq = 1 + } + summFreq += 3 + } + states[len(states)-1] = state{sym: s.sym, freq: freq, succ: succ} + c.setSummFreq(summFreq) + } + m.minC = minC + m.maxC = minC +} + +func (m *model) ReadByte() (byte, error) { + if m.minC == nil { + return 0, errCorruptPPM + } + var s *state + var err error + if m.minC.numStates() == 1 { + s, err = m.decodeBinSymbol() + } else { + s, err = m.decodeSymbol1() + } + for s == nil && err == nil { + n := m.minC.numStates() + for m.minC.numStates() == n { + m.orderFall++ + m.minC = m.minC.suffix() + if m.minC == nil { + return 0, errCorruptPPM + } + } + s, err = m.decodeSymbol2(n) + } + if err != nil { + return 0, err + } + + // save sym so it doesn't get overwritten by a possible restart() + sym := s.sym + m.update(s) + m.prevSym = sym + return sym, nil +} diff --git a/vendor/github.com/nwaples/rardecode/reader.go b/vendor/github.com/nwaples/rardecode/reader.go new file mode 100644 index 00000000..11adc4fe --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/reader.go @@ -0,0 +1,376 @@ +package rardecode + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "time" +) + +// FileHeader HostOS types +const ( + HostOSUnknown = 0 + HostOSMSDOS = 1 + HostOSOS2 = 2 + HostOSWindows = 3 + HostOSUnix = 4 + HostOSMacOS = 5 + HostOSBeOS = 6 +) + +const ( + maxPassword = 128 +) + +var ( + errShortFile = errors.New("rardecode: decoded file too short") + errInvalidFileBlock = errors.New("rardecode: invalid file block") + errUnexpectedArcEnd = errors.New("rardecode: unexpected end of archive") + errBadFileChecksum = errors.New("rardecode: bad file checksum") +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +type limitedReader struct { + r io.Reader + n int64 // bytes remaining + shortErr error // error returned when r returns io.EOF with n > 0 +} + +func (l *limitedReader) Read(p []byte) (int, error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err := l.r.Read(p) + l.n -= int64(n) + if err == io.EOF && l.n > 0 { + return n, l.shortErr + } + return n, err +} + +type limitedByteReader struct { + limitedReader + br io.ByteReader +} + +func (l *limitedByteReader) ReadByte() (byte, error) { + if l.n <= 0 { + return 0, io.EOF + } + c, err := l.br.ReadByte() + if err == nil { + l.n-- + } else if err == io.EOF && l.n > 0 { + return 0, l.shortErr + } + return c, err +} + +// limitByteReader returns a limitedByteReader that reads from r and stops with +// io.EOF after n bytes. +// If r returns an io.EOF before reading n bytes, io.ErrUnexpectedEOF is returned. +func limitByteReader(r byteReader, n int64) *limitedByteReader { + return &limitedByteReader{limitedReader{r, n, io.ErrUnexpectedEOF}, r} +} + +// fileChecksum allows file checksum validations to be performed. +// File contents must first be written to fileChecksum. Then valid is +// called to perform the file checksum calculation to determine +// if the file contents are valid or not. +type fileChecksum interface { + io.Writer + valid() bool +} + +// FileHeader represents a single file in a RAR archive. +type FileHeader struct { + Name string // file name using '/' as the directory separator + IsDir bool // is a directory + HostOS byte // Host OS the archive was created on + Attributes int64 // Host OS specific file attributes + PackedSize int64 // packed file size (or first block if the file spans volumes) + UnPackedSize int64 // unpacked file size + UnKnownSize bool // unpacked file size is not known + ModificationTime time.Time // modification time (non-zero if set) + CreationTime time.Time // creation time (non-zero if set) + AccessTime time.Time // access time (non-zero if set) + Version int // file version +} + +// Mode returns an os.FileMode for the file, calculated from the Attributes field. +func (f *FileHeader) Mode() os.FileMode { + var m os.FileMode + + if f.IsDir { + m = os.ModeDir + } + if f.HostOS == HostOSWindows { + if f.IsDir { + m |= 0777 + } else if f.Attributes&1 > 0 { + m |= 0444 // readonly + } else { + m |= 0666 + } + return m + } + // assume unix perms for all remaining os types + m |= os.FileMode(f.Attributes) & os.ModePerm + + // only check other bits on unix host created archives + if f.HostOS != HostOSUnix { + return m + } + + if f.Attributes&0x200 != 0 { + m |= os.ModeSticky + } + if f.Attributes&0x400 != 0 { + m |= os.ModeSetgid + } + if f.Attributes&0x800 != 0 { + m |= os.ModeSetuid + } + + // Check for additional file types. + if f.Attributes&0xF000 == 0xA000 { + m |= os.ModeSymlink + } + return m +} + +// fileBlockHeader represents a file block in a RAR archive. +// Files may comprise one or more file blocks. +// Solid files retain decode tables and dictionary from previous solid files in the archive. +type fileBlockHeader struct { + first bool // first block in file + last bool // last block in file + solid bool // file is solid + winSize uint // log base 2 of decode window size + cksum fileChecksum // file checksum + decoder decoder // decoder to use for file + key []byte // key for AES, non-empty if file encrypted + iv []byte // iv for AES, non-empty if file encrypted + FileHeader +} + +// fileBlockReader provides sequential access to file blocks in a RAR archive. +type fileBlockReader interface { + io.Reader // Read's read data from the current file block + io.ByteReader // Read bytes from current file block + next() (*fileBlockHeader, error) // reads the next file block header at current position + reset() // resets encryption + isSolid() bool // is archive solid + version() int // returns current archive format version +} + +// packedFileReader provides sequential access to packed files in a RAR archive. +type packedFileReader struct { + r fileBlockReader + h *fileBlockHeader // current file header +} + +// nextBlockInFile reads the next file block in the current file at the current +// archive file position, or returns an error if there is a problem. +// It is invalid to call this when already at the last block in the current file. +func (f *packedFileReader) nextBlockInFile() error { + h, err := f.r.next() + if err != nil { + if err == io.EOF { + // archive ended, but file hasn't + return errUnexpectedArcEnd + } + return err + } + if h.first || h.Name != f.h.Name { + return errInvalidFileBlock + } + f.h = h + return nil +} + +// next advances to the next packed file in the RAR archive. +func (f *packedFileReader) next() (*fileBlockHeader, error) { + if f.h != nil { + // skip to last block in current file + for !f.h.last { + // discard remaining block data + if _, err := io.Copy(ioutil.Discard, f.r); err != nil { + return nil, err + } + if err := f.nextBlockInFile(); err != nil { + return nil, err + } + } + // discard last block data + if _, err := io.Copy(ioutil.Discard, f.r); err != nil { + return nil, err + } + } + var err error + f.h, err = f.r.next() // get next file block + if err != nil { + if err == errArchiveEnd { + return nil, io.EOF + } + return nil, err + } + if !f.h.first { + return nil, errInvalidFileBlock + } + return f.h, nil +} + +// Read reads the packed data for the current file into p. +func (f *packedFileReader) Read(p []byte) (int, error) { + n, err := f.r.Read(p) // read current block data + for err == io.EOF { // current block empty + if n > 0 { + return n, nil + } + if f.h == nil || f.h.last { + return 0, io.EOF // last block so end of file + } + if err := f.nextBlockInFile(); err != nil { + return 0, err + } + n, err = f.r.Read(p) // read new block data + } + return n, err +} + +func (f *packedFileReader) ReadByte() (byte, error) { + c, err := f.r.ReadByte() // read current block data + for err == io.EOF && f.h != nil && !f.h.last { // current block empty + if err := f.nextBlockInFile(); err != nil { + return 0, err + } + c, err = f.r.ReadByte() // read new block data + } + return c, err +} + +// Reader provides sequential access to files in a RAR archive. +type Reader struct { + r io.Reader // reader for current unpacked file + pr packedFileReader // reader for current packed file + dr decodeReader // reader for decoding and filters if file is compressed + cksum fileChecksum // current file checksum + solidr io.Reader // reader for solid file +} + +// Read reads from the current file in the RAR archive. +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if err == io.EOF && r.cksum != nil && !r.cksum.valid() { + return n, errBadFileChecksum + } + return n, err +} + +// Next advances to the next file in the archive. +func (r *Reader) Next() (*FileHeader, error) { + if r.solidr != nil { + // solid files must be read fully to update decoder information + if _, err := io.Copy(ioutil.Discard, r.solidr); err != nil { + return nil, err + } + } + + h, err := r.pr.next() // skip to next file + if err != nil { + return nil, err + } + r.solidr = nil + + br := byteReader(&r.pr) // start with packed file reader + + // check for encryption + if len(h.key) > 0 && len(h.iv) > 0 { + br = newAesDecryptReader(br, h.key, h.iv) // decrypt + } + r.r = br + // check for compression + if h.decoder != nil { + err = r.dr.init(br, h.decoder, h.winSize, !h.solid) + if err != nil { + return nil, err + } + r.r = &r.dr + if r.pr.r.isSolid() { + r.solidr = r.r + } + } + if h.UnPackedSize >= 0 && !h.UnKnownSize { + // Limit reading to UnPackedSize as there may be padding + r.r = &limitedReader{r.r, h.UnPackedSize, errShortFile} + } + r.cksum = h.cksum + if r.cksum != nil { + r.r = io.TeeReader(r.r, h.cksum) // write file data to checksum as it is read + } + fh := new(FileHeader) + *fh = h.FileHeader + return fh, nil +} + +func (r *Reader) init(fbr fileBlockReader) { + r.r = bytes.NewReader(nil) // initial reads will always return EOF + r.pr.r = fbr +} + +// NewReader creates a Reader reading from r. +// NewReader only supports single volume archives. +// Multi-volume archives must use OpenReader. +func NewReader(r io.Reader, password string) (*Reader, error) { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + fbr, err := newFileBlockReader(br, password) + if err != nil { + return nil, err + } + rr := new(Reader) + rr.init(fbr) + return rr, nil +} + +type ReadCloser struct { + v *volume + Reader +} + +// Close closes the rar file. +func (rc *ReadCloser) Close() error { + return rc.v.Close() +} + +// Volumes returns the volume filenames that have been used in decoding the archive +// up to this point. This will include the current open volume if the archive is still +// being processed. +func (rc *ReadCloser) Volumes() []string { + return rc.v.files +} + +// OpenReader opens a RAR archive specified by the name and returns a ReadCloser. +func OpenReader(name, password string) (*ReadCloser, error) { + v, err := openVolume(name, password) + if err != nil { + return nil, err + } + rc := new(ReadCloser) + rc.v = v + rc.Reader.init(v) + return rc, nil +} diff --git a/vendor/github.com/nwaples/rardecode/vm.go b/vendor/github.com/nwaples/rardecode/vm.go new file mode 100644 index 00000000..fd26a5a0 --- /dev/null +++ b/vendor/github.com/nwaples/rardecode/vm.go @@ -0,0 +1,687 @@ +package rardecode + +import ( + "encoding/binary" + "errors" +) + +const ( + // vm flag bits + flagC = 1 // Carry + flagZ = 2 // Zero + flagS = 0x80000000 // Sign + + maxCommands = 25000000 // maximum number of commands that can be run in a program + + vmRegs = 8 // number if registers + vmSize = 0x40000 // memory size + vmMask = vmSize - 1 +) + +var ( + errInvalidVMInstruction = errors.New("rardecode: invalid vm instruction") +) + +type vm struct { + ip uint32 // instruction pointer + ipMod bool // ip was modified + fl uint32 // flag bits + r [vmRegs]uint32 // registers + m []byte // memory +} + +func (v *vm) setIP(ip uint32) { + v.ip = ip + v.ipMod = true +} + +// execute runs a list of commands on the vm. +func (v *vm) execute(cmd []command) { + v.ip = 0 // reset instruction pointer + for n := 0; n < maxCommands; n++ { + ip := v.ip + if ip >= uint32(len(cmd)) { + return + } + ins := cmd[ip] + ins.f(v, ins.bm, ins.op) // run cpu instruction + if v.ipMod { + // command modified ip, don't increment + v.ipMod = false + } else { + v.ip++ // increment ip for next command + } + } +} + +// newVM creates a new RAR virtual machine using the byte slice as memory. +func newVM(mem []byte) *vm { + v := new(vm) + + if cap(mem) < vmSize+4 { + v.m = make([]byte, vmSize+4) + copy(v.m, mem) + } else { + v.m = mem[:vmSize+4] + for i := len(mem); i < len(v.m); i++ { + v.m[i] = 0 + } + } + v.r[7] = vmSize + return v +} + +type operand interface { + get(v *vm, byteMode bool) uint32 + set(v *vm, byteMode bool, n uint32) +} + +// Immediate Operand +type opI uint32 + +func (op opI) get(v *vm, bm bool) uint32 { return uint32(op) } +func (op opI) set(v *vm, bm bool, n uint32) {} + +// Direct Operand +type opD uint32 + +func (op opD) get(v *vm, byteMode bool) uint32 { + if byteMode { + return uint32(v.m[op]) + } + return binary.LittleEndian.Uint32(v.m[op:]) +} + +func (op opD) set(v *vm, byteMode bool, n uint32) { + if byteMode { + v.m[op] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[op:], n) + } +} + +// Register Operand +type opR uint32 + +func (op opR) get(v *vm, byteMode bool) uint32 { + if byteMode { + return v.r[op] & 0xFF + } + return v.r[op] +} + +func (op opR) set(v *vm, byteMode bool, n uint32) { + if byteMode { + v.r[op] = (v.r[op] & 0xFFFFFF00) | (n & 0xFF) + } else { + v.r[op] = n + } +} + +// Register Indirect Operand +type opRI uint32 + +func (op opRI) get(v *vm, byteMode bool) uint32 { + i := v.r[op] & vmMask + if byteMode { + return uint32(v.m[i]) + } + return binary.LittleEndian.Uint32(v.m[i:]) +} +func (op opRI) set(v *vm, byteMode bool, n uint32) { + i := v.r[op] & vmMask + if byteMode { + v.m[i] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[i:], n) + } +} + +// Base Plus Index Indirect Operand +type opBI struct { + r uint32 + i uint32 +} + +func (op opBI) get(v *vm, byteMode bool) uint32 { + i := (v.r[op.r] + op.i) & vmMask + if byteMode { + return uint32(v.m[i]) + } + return binary.LittleEndian.Uint32(v.m[i:]) +} +func (op opBI) set(v *vm, byteMode bool, n uint32) { + i := (v.r[op.r] + op.i) & vmMask + if byteMode { + v.m[i] = byte(n) + } else { + binary.LittleEndian.PutUint32(v.m[i:], n) + } +} + +type commandFunc func(v *vm, byteMode bool, op []operand) + +type command struct { + f commandFunc + bm bool // is byte mode + op []operand +} + +var ( + ops = []struct { + f commandFunc + byteMode bool // supports byte mode + nops int // number of operands + jop bool // is a jump op + }{ + {mov, true, 2, false}, + {cmp, true, 2, false}, + {add, true, 2, false}, + {sub, true, 2, false}, + {jz, false, 1, true}, + {jnz, false, 1, true}, + {inc, true, 1, false}, + {dec, true, 1, false}, + {jmp, false, 1, true}, + {xor, true, 2, false}, + {and, true, 2, false}, + {or, true, 2, false}, + {test, true, 2, false}, + {js, false, 1, true}, + {jns, false, 1, true}, + {jb, false, 1, true}, + {jbe, false, 1, true}, + {ja, false, 1, true}, + {jae, false, 1, true}, + {push, false, 1, false}, + {pop, false, 1, false}, + {call, false, 1, true}, + {ret, false, 0, false}, + {not, true, 1, false}, + {shl, true, 2, false}, + {shr, true, 2, false}, + {sar, true, 2, false}, + {neg, true, 1, false}, + {pusha, false, 0, false}, + {popa, false, 0, false}, + {pushf, false, 0, false}, + {popf, false, 0, false}, + {movzx, false, 2, false}, + {movsx, false, 2, false}, + {xchg, true, 2, false}, + {mul, true, 2, false}, + {div, true, 2, false}, + {adc, true, 2, false}, + {sbb, true, 2, false}, + {print, false, 0, false}, + } +) + +func mov(v *vm, bm bool, op []operand) { + op[0].set(v, bm, op[1].get(v, bm)) +} + +func cmp(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 - op[1].get(v, bm) + if r == 0 { + v.fl = flagZ + } else { + v.fl = 0 + if r > v1 { + v.fl = flagC + } + v.fl |= r & flagS + } +} + +func add(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 + op[1].get(v, bm) + v.fl = 0 + signBit := uint32(flagS) + if bm { + r &= 0xFF + signBit = 0x80 + } + if r < v1 { + v.fl |= flagC + } + if r == 0 { + v.fl |= flagZ + } else if r&signBit > 0 { + v.fl |= flagS + } + op[0].set(v, bm, r) +} + +func sub(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + r := v1 - op[1].get(v, bm) + v.fl = 0 + + if r == 0 { + v.fl = flagZ + } else { + v.fl = 0 + if r > v1 { + v.fl = flagC + } + v.fl |= r & flagS + } + op[0].set(v, bm, r) +} + +func jz(v *vm, bm bool, op []operand) { + if v.fl&flagZ > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jnz(v *vm, bm bool, op []operand) { + if v.fl&flagZ == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func inc(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) + 1 + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func dec(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) - 1 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func jmp(v *vm, bm bool, op []operand) { + v.setIP(op[0].get(v, false)) +} + +func xor(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) ^ op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func and(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) & op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func or(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) | op[1].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func test(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) & op[1].get(v, bm) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } +} + +func js(v *vm, bm bool, op []operand) { + if v.fl&flagS > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jns(v *vm, bm bool, op []operand) { + if v.fl&flagS == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jb(v *vm, bm bool, op []operand) { + if v.fl&flagC > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jbe(v *vm, bm bool, op []operand) { + if v.fl&(flagC|flagZ) > 0 { + v.setIP(op[0].get(v, false)) + } +} + +func ja(v *vm, bm bool, op []operand) { + if v.fl&(flagC|flagZ) == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func jae(v *vm, bm bool, op []operand) { + if v.fl&flagC == 0 { + v.setIP(op[0].get(v, false)) + } +} + +func push(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, op[0].get(v, false)) + +} + +func pop(v *vm, bm bool, op []operand) { + op[0].set(v, false, opRI(7).get(v, false)) + v.r[7] += 4 +} + +func call(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, v.ip+1) + v.setIP(op[0].get(v, false)) +} + +func ret(v *vm, bm bool, op []operand) { + r7 := v.r[7] + if r7 >= vmSize { + v.setIP(0xFFFFFFFF) // trigger end of program + } else { + v.setIP(binary.LittleEndian.Uint32(v.m[r7:])) + v.r[7] += 4 + } +} + +func not(v *vm, bm bool, op []operand) { + op[0].set(v, bm, ^op[0].get(v, bm)) +} + +func shl(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := v1 << v2 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1<<(v2-1))&0x80000000 > 0 { + v.fl |= flagC + } +} + +func shr(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := v1 >> v2 + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1>>(v2-1))&0x1 > 0 { + v.fl |= flagC + } +} + +func sar(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + v2 := op[1].get(v, bm) + r := uint32(int32(v1) >> v2) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if (v1>>(v2-1))&0x1 > 0 { + v.fl |= flagC + } +} + +func neg(v *vm, bm bool, op []operand) { + r := 0 - op[0].get(v, bm) + op[0].set(v, bm, r) + if r == 0 { + v.fl = flagZ + } else { + v.fl = r&flagS | flagC + } +} + +func pusha(v *vm, bm bool, op []operand) { + sp := opD(v.r[7]) + for _, r := range v.r { + sp = (sp - 4) & vmMask + sp.set(v, false, r) + } + v.r[7] = uint32(sp) +} + +func popa(v *vm, bm bool, op []operand) { + sp := opD(v.r[7]) + for i := 7; i >= 0; i-- { + v.r[i] = sp.get(v, false) + sp = (sp + 4) & vmMask + } +} + +func pushf(v *vm, bm bool, op []operand) { + v.r[7] -= 4 + opRI(7).set(v, false, v.fl) +} + +func popf(v *vm, bm bool, op []operand) { + v.fl = opRI(7).get(v, false) + v.r[7] += 4 +} + +func movzx(v *vm, bm bool, op []operand) { + op[0].set(v, false, op[1].get(v, true)) +} + +func movsx(v *vm, bm bool, op []operand) { + op[0].set(v, false, uint32(int8(op[1].get(v, true)))) +} + +func xchg(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + op[0].set(v, bm, op[1].get(v, bm)) + op[1].set(v, bm, v1) +} + +func mul(v *vm, bm bool, op []operand) { + r := op[0].get(v, bm) * op[1].get(v, bm) + op[0].set(v, bm, r) +} + +func div(v *vm, bm bool, op []operand) { + div := op[1].get(v, bm) + if div != 0 { + r := op[0].get(v, bm) / div + op[0].set(v, bm, r) + } +} + +func adc(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + fc := v.fl & flagC + r := v1 + op[1].get(v, bm) + fc + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if r < v1 || (r == v1 && fc > 0) { + v.fl |= flagC + } +} + +func sbb(v *vm, bm bool, op []operand) { + v1 := op[0].get(v, bm) + fc := v.fl & flagC + r := v1 - op[1].get(v, bm) - fc + if bm { + r &= 0xFF + } + op[0].set(v, bm, r) + + if r == 0 { + v.fl = flagZ + } else { + v.fl = r & flagS + } + if r > v1 || (r == v1 && fc > 0) { + v.fl |= flagC + } +} + +func print(v *vm, bm bool, op []operand) { + // TODO: ignore print for the moment +} + +func decodeArg(br *rarBitReader, byteMode bool) (operand, error) { + n, err := br.readBits(1) + if err != nil { + return nil, err + } + if n > 0 { // Register + n, err = br.readBits(3) + return opR(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { // Immediate + if byteMode { + n, err = br.readBits(8) + } else { + m, err := br.readUint32() + return opI(m), err + } + return opI(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { + // Register Indirect + n, err = br.readBits(3) + return opRI(n), err + } + n, err = br.readBits(1) + if err != nil { + return nil, err + } + if n == 0 { + // Base + Index Indirect + n, err = br.readBits(3) + if err != nil { + return nil, err + } + i, err := br.readUint32() + return opBI{r: uint32(n), i: i}, err + } + // Direct addressing + m, err := br.readUint32() + return opD(m & vmMask), err +} + +func fixJumpOp(op operand, off int) operand { + n, ok := op.(opI) + if !ok { + return op + } + if n >= 256 { + return n - 256 + } + if n >= 136 { + n -= 264 + } else if n >= 16 { + n -= 8 + } else if n >= 8 { + n -= 16 + } + return n + opI(off) +} + +func readCommands(br *rarBitReader) ([]command, error) { + var cmds []command + + for { + code, err := br.readBits(4) + if err != nil { + return cmds, err + } + if code&0x08 > 0 { + n, err := br.readBits(2) + if err != nil { + return cmds, err + } + code = (code<<2 | n) - 24 + } + + if code >= len(ops) { + return cmds, errInvalidVMInstruction + } + ins := ops[code] + + var com command + + if ins.byteMode { + n, err := br.readBits(1) + if err != nil { + return cmds, err + } + com.bm = n > 0 + } + com.f = ins.f + + if ins.nops > 0 { + com.op = make([]operand, ins.nops) + com.op[0], err = decodeArg(br, com.bm) + if err != nil { + return cmds, err + } + if ins.nops == 2 { + com.op[1], err = decodeArg(br, com.bm) + if err != nil { + return cmds, err + } + } else if ins.jop { + com.op[0] = fixJumpOp(com.op[0], len(cmds)) + } + } + cmds = append(cmds, com) + } +} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 00000000..5e987350 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml new file mode 100644 index 00000000..fd6c6db7 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -0,0 +1,24 @@ +language: go + +env: + - GO111MODULE=off + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + fast_finish: true + allow_failures: + - go: master + +sudo: false + +script: + - go test -v -cpu=2 + - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000..bd899d83 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 00000000..4ee388e8 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,90 @@ +# lz4 : LZ4 compression in pure Go + +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000..664d9be5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,413 @@ +package lz4 + +import ( + "encoding/binary" + "math/bits" + "sync" +) + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src); di >= 0 { + return di, nil + } + return 0, ErrInvalidSourceShortBuffer +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + if len(hashTable) < htSize { + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 00000000..bc5e78d4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 00000000..44211ad9 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 00000000..43cc14fb --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 00000000..20fef397 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 00000000..919888ed --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 00000000..1c45d181 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 00000000..7a76a6bc --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000..a3284bdf --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,116 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +import ( + "math/bits" + "sync" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +const ( + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M +) + +var ( + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } +) + +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller's responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} + +// Reset reset internal status +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 00000000..9a0fb007 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 00000000..12c761a2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000..87dd72bd --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,335 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + if bmsID < 4 || bmsID > 7 { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + bSize := blockSizeIndexToValue(bmsID - 4) + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + } + + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go new file mode 100644 index 00000000..1670a77d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader_legacy.go @@ -0,0 +1,207 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" +) + +// ReaderLegacy implements the LZ4Demo frame decoder. +// The Header is set after the first call to Read(). +type ReaderLegacy struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + lastBlock bool + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReaderLegacy returns a new LZ4Demo frame decoder. +// No access to the underlying io.Reader is performed. +func NewReaderLegacy(src io.Reader) *ReaderLegacy { + r := &ReaderLegacy{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *ReaderLegacy) readLegacyHeader() error { + z.lastBlock = false + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic != frameMagicLegacy { + return ErrInvalid + } + z.pos += 4 + + // Legacy has fixed 8MB blocksizes + // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame + bSize := blockSize4M * 2 + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size size=%d", bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *ReaderLegacy) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readLegacyHeader(); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug(" reading block from writer %d %d", z.idx, blockSize4M*2) + } + + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos) + } + z.pos += 4 + + // Legacy blocks are always compressed, even when detrimental + if debugFlag { + debug(" compressed block size %d", bLen) + } + + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + + z.idx = 0 + + // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this + // it means we've reached the end... + if n < blockSize4M*2 { + z.lastBlock = true + } + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data)) + } + if z.lastBlock && len(z.data) == z.idx { + return n, io.EOF + } + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *ReaderLegacy) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *ReaderLegacy) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000..f066d563 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,422 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "runtime" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = blockSize4M + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + if !isValidBlockSize(z.Header.BlockMaxSize) { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + z.newBuffers() + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + + // get a buffer from the pool and copy the data over + block := getBuffer(z.Header.BlockMaxSize)[:len(data)] + copy(block, data) + + go writerCompressBlock(c, z.Header, block) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + return nil + } + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] + copy(data, z.data[:z.idx]) + + z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) + return nil +} + +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if z.NoChecksum { + return nil + } + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() + z.dst = w + z.checksum.Reset() + z.idx = 0 + z.err = nil + // reset hashtable to ensure deterministic output. + for i := range z.hashtable { + z.hashtable[i] = 0 + } + z.WithConcurrency(n) +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + // release the uncompressed block since it is not used anymore + putBuffer(header.BlockMaxSize, data) + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + // release the compressed block since it was not used + putBuffer(header.BlockMaxSize, zdata) + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go new file mode 100644 index 00000000..ca8dc8c7 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer_legacy.go @@ -0,0 +1,182 @@ +package lz4 + +import ( + "encoding/binary" + "io" +) + +// WriterLegacy implements the LZ4Demo frame decoder. +type WriterLegacy struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + dst io.Writer // Destination. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). +} + +// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriterLegacy(dst io.Writer) *WriterLegacy { + z := new(WriterLegacy) + z.Reset(dst) + return z +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *WriterLegacy) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// writeHeader builds and writes the header to the underlying io.Writer. +func (z *WriterLegacy) writeHeader() error { + // Legacy has fixed 8MB blocksizes + // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame + bSize := 2 * blockSize4M + + buf := make([]byte, 2*bSize, 2*bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. + + z.idx = 0 + + // Header consists of one mageic number, write it out. + if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// compressBlock compresses a block. +func (z *WriterLegacy) compressBlock(data []byte) error { + bSize := 2 * blockSize4M + zdata := z.data[bSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + zdata = zdata[:zn] + + // Write the block. + if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + return nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *WriterLegacy) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := z.data[:z.idx] + z.idx = 0 + return z.compressBlock(data) +} + +// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *WriterLegacy) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + + if debugFlag { + debug("writing last empty block") + } + + return nil +} + +// Reset clears the state of the WriterLegacy z such that it is equivalent to its +// initial state from NewWriterLegacy, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *WriterLegacy) Reset(w io.Writer) { + z.Header.Reset() + z.dst = w + z.idx = 0 + // reset hashtable to ensure deterministic output. + for i := range z.hashtable { + z.hashtable[i] = 0 + } +} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 00000000..e3c2fc2f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,25 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000..009b8487 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2021 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 00000000..0a2dc828 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,73 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 00000000..5f7ec01b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 00000000..594e0c7f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,363 @@ +# TODO list + +## Release v0.5.x + +1. Support check flag in gxz command. + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Since the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000..e48450c2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000000000000000000000000000000000..46043f7dc89b610dc3badb9db3426620c4c97462 GIT binary patch literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000..f723cf25 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000..cc60a6b5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000..c6432913 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000..f1de88b4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000..6c20c77b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000..2a7bd19e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000..d2c07e8c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000..939be884 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000..2761de5f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000..040874c1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000..cbb943a0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000..8cd616ef --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000..20b256a9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000..60ed9aef --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000..5ed057a7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000..056f8975 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000..0fb7910b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000..04276c81 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000..be54dd85 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000..6e0edfc8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,116 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000..0bfc763c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000..96ebda0f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000..026ce48a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000..9a2648e0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000..f229fc9f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000..57f1ab90 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000..2ed13c88 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000..de3da37e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000..09d62f7d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000..6e927e93 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000..dfaaec95 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000..4f1bb339 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 00000000..a8c612ce --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 00000000..95240135 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000..7f974ffc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000..6b3a6662 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/xi2/xz/AUTHORS b/vendor/github.com/xi2/xz/AUTHORS new file mode 100644 index 00000000..657330e1 --- /dev/null +++ b/vendor/github.com/xi2/xz/AUTHORS @@ -0,0 +1,8 @@ +# Package xz authors + +Michael Cross + +# XZ Embedded authors + +Lasse Collin +Igor Pavlov diff --git a/vendor/github.com/xi2/xz/LICENSE b/vendor/github.com/xi2/xz/LICENSE new file mode 100644 index 00000000..b56f2e6a --- /dev/null +++ b/vendor/github.com/xi2/xz/LICENSE @@ -0,0 +1,18 @@ +Licensing of github.com/xi2/xz +============================== + + This Go package is a modified version of + + XZ Embedded + + The contents of the testdata directory are modified versions of + the test files from + + XZ Utils + + All the files in this package have been written by Michael Cross, + Lasse Collin and/or Igor PavLov. All these files have been put + into the public domain. You can do whatever you want with these + files. + + This software is provided "as is", without any warranty. diff --git a/vendor/github.com/xi2/xz/README.md b/vendor/github.com/xi2/xz/README.md new file mode 100644 index 00000000..2190af55 --- /dev/null +++ b/vendor/github.com/xi2/xz/README.md @@ -0,0 +1,10 @@ +# Xz + +Package xz implements XZ decompression natively in Go. + +Documentation at . + +Download and install with `go get github.com/xi2/xz`. + +If you need compression as well as decompression, you might want to +look at . diff --git a/vendor/github.com/xi2/xz/dec_bcj.go b/vendor/github.com/xi2/xz/dec_bcj.go new file mode 100644 index 00000000..a8a3df92 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_bcj.go @@ -0,0 +1,461 @@ +/* + * Branch/Call/Jump (BCJ) filter decoders + * + * Authors: Lasse Collin + * Igor Pavlov + * + * Translation to Go: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +/* from linux/lib/xz/xz_dec_bcj.c *************************************/ + +type xzDecBCJ struct { + /* Type of the BCJ filter being used */ + typ xzFilterID + /* + * Return value of the next filter in the chain. We need to preserve + * this information across calls, because we must not call the next + * filter anymore once it has returned xzStreamEnd + */ + ret xzRet + /* + * Absolute position relative to the beginning of the uncompressed + * data (in a single .xz Block). + */ + pos int + /* x86 filter state */ + x86PrevMask uint32 + /* Temporary space to hold the variables from xzBuf */ + out []byte + outPos int + temp struct { + /* Amount of already filtered data in the beginning of buf */ + filtered int + /* + * Buffer to hold a mix of filtered and unfiltered data. This + * needs to be big enough to hold Alignment + 2 * Look-ahead: + * + * Type Alignment Look-ahead + * x86 1 4 + * PowerPC 4 0 + * IA-64 16 0 + * ARM 4 0 + * ARM-Thumb 2 2 + * SPARC 4 0 + */ + buf []byte // slice buf will be backed by bufArray + bufArray [16]byte + } +} + +/* + * This is used to test the most significant byte of a memory address + * in an x86 instruction. + */ +func bcjX86TestMSByte(b byte) bool { + return b == 0x00 || b == 0xff +} + +func bcjX86Filter(s *xzDecBCJ, buf []byte) int { + var maskToAllowedStatus = []bool{ + true, true, true, false, true, false, false, false, + } + var maskToBitNum = []byte{0, 1, 2, 2, 3, 3, 3, 3} + var i int + var prevPos int = -1 + var prevMask uint32 = s.x86PrevMask + var src uint32 + var dest uint32 + var j uint32 + var b byte + if len(buf) <= 4 { + return 0 + } + for i = 0; i < len(buf)-4; i++ { + if buf[i]&0xfe != 0xe8 { + continue + } + prevPos = i - prevPos + if prevPos > 3 { + prevMask = 0 + } else { + prevMask = (prevMask << (uint(prevPos) - 1)) & 7 + if prevMask != 0 { + b = buf[i+4-int(maskToBitNum[prevMask])] + if !maskToAllowedStatus[prevMask] || bcjX86TestMSByte(b) { + prevPos = i + prevMask = prevMask<<1 | 1 + continue + } + } + } + prevPos = i + if bcjX86TestMSByte(buf[i+4]) { + src = getLE32(buf[i+1:]) + for { + dest = src - uint32(s.pos+i+5) + if prevMask == 0 { + break + } + j = uint32(maskToBitNum[prevMask]) * 8 + b = byte(dest >> (24 - j)) + if !bcjX86TestMSByte(b) { + break + } + src = dest ^ (1<<(32-j) - 1) + } + dest &= 0x01FFFFFF + dest |= 0 - dest&0x01000000 + putLE32(dest, buf[i+1:]) + i += 4 + } else { + prevMask = prevMask<<1 | 1 + } + } + prevPos = i - prevPos + if prevPos > 3 { + s.x86PrevMask = 0 + } else { + s.x86PrevMask = prevMask << (uint(prevPos) - 1) + } + return i +} + +func bcjPowerPCFilter(s *xzDecBCJ, buf []byte) int { + var i int + var instr uint32 + for i = 0; i+4 <= len(buf); i += 4 { + instr = getBE32(buf[i:]) + if instr&0xFC000003 == 0x48000001 { + instr &= 0x03FFFFFC + instr -= uint32(s.pos + i) + instr &= 0x03FFFFFC + instr |= 0x48000001 + putBE32(instr, buf[i:]) + } + } + return i +} + +var bcjIA64BranchTable = [...]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4, 4, 6, 6, 0, 0, 7, 7, + 4, 4, 0, 0, 4, 4, 0, 0, +} + +func bcjIA64Filter(s *xzDecBCJ, buf []byte) int { + var branchTable = bcjIA64BranchTable[:] + /* + * The local variables take a little bit stack space, but it's less + * than what LZMA2 decoder takes, so it doesn't make sense to reduce + * stack usage here without doing that for the LZMA2 decoder too. + */ + /* Loop counters */ + var i int + var j int + /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ + var slot uint32 + /* Bitwise offset of the instruction indicated by slot */ + var bitPos uint32 + /* bit_pos split into byte and bit parts */ + var bytePos uint32 + var bitRes uint32 + /* Address part of an instruction */ + var addr uint32 + /* Mask used to detect which instructions to convert */ + var mask uint32 + /* 41-bit instruction stored somewhere in the lowest 48 bits */ + var instr uint64 + /* Instruction normalized with bit_res for easier manipulation */ + var norm uint64 + for i = 0; i+16 <= len(buf); i += 16 { + mask = uint32(branchTable[buf[i]&0x1f]) + for slot, bitPos = 0, 5; slot < 3; slot, bitPos = slot+1, bitPos+41 { + if (mask>>slot)&1 == 0 { + continue + } + bytePos = bitPos >> 3 + bitRes = bitPos & 7 + instr = 0 + for j = 0; j < 6; j++ { + instr |= uint64(buf[i+j+int(bytePos)]) << (8 * uint(j)) + } + norm = instr >> bitRes + if (norm>>37)&0x0f == 0x05 && (norm>>9)&0x07 == 0 { + addr = uint32((norm >> 13) & 0x0fffff) + addr |= (uint32(norm>>36) & 1) << 20 + addr <<= 4 + addr -= uint32(s.pos + i) + addr >>= 4 + norm &= ^(uint64(0x8fffff) << 13) + norm |= uint64(addr&0x0fffff) << 13 + norm |= uint64(addr&0x100000) << (36 - 20) + instr &= 1<> (8 * uint(j))) + } + } + } + } + return i +} + +func bcjARMFilter(s *xzDecBCJ, buf []byte) int { + var i int + var addr uint32 + for i = 0; i+4 <= len(buf); i += 4 { + if buf[i+3] == 0xeb { + addr = uint32(buf[i]) | uint32(buf[i+1])<<8 | + uint32(buf[i+2])<<16 + addr <<= 2 + addr -= uint32(s.pos + i + 8) + addr >>= 2 + buf[i] = byte(addr) + buf[i+1] = byte(addr >> 8) + buf[i+2] = byte(addr >> 16) + } + } + return i +} + +func bcjARMThumbFilter(s *xzDecBCJ, buf []byte) int { + var i int + var addr uint32 + for i = 0; i+4 <= len(buf); i += 2 { + if buf[i+1]&0xf8 == 0xf0 && buf[i+3]&0xf8 == 0xf8 { + addr = uint32(buf[i+1]&0x07)<<19 | + uint32(buf[i])<<11 | + uint32(buf[i+3]&0x07)<<8 | + uint32(buf[i+2]) + addr <<= 1 + addr -= uint32(s.pos + i + 4) + addr >>= 1 + buf[i+1] = byte(0xf0 | (addr>>19)&0x07) + buf[i] = byte(addr >> 11) + buf[i+3] = byte(0xf8 | (addr>>8)&0x07) + buf[i+2] = byte(addr) + i += 2 + } + } + return i +} + +func bcjSPARCFilter(s *xzDecBCJ, buf []byte) int { + var i int + var instr uint32 + for i = 0; i+4 <= len(buf); i += 4 { + instr = getBE32(buf[i:]) + if instr>>22 == 0x100 || instr>>22 == 0x1ff { + instr <<= 2 + instr -= uint32(s.pos + i) + instr >>= 2 + instr = (0x40000000 - instr&0x400000) | + 0x40000000 | (instr & 0x3FFFFF) + putBE32(instr, buf[i:]) + } + } + return i +} + +/* + * Apply the selected BCJ filter. Update *pos and s.pos to match the amount + * of data that got filtered. + */ +func bcjApply(s *xzDecBCJ, buf []byte, pos *int) { + var filtered int + buf = buf[*pos:] + switch s.typ { + case idBCJX86: + filtered = bcjX86Filter(s, buf) + case idBCJPowerPC: + filtered = bcjPowerPCFilter(s, buf) + case idBCJIA64: + filtered = bcjIA64Filter(s, buf) + case idBCJARM: + filtered = bcjARMFilter(s, buf) + case idBCJARMThumb: + filtered = bcjARMThumbFilter(s, buf) + case idBCJSPARC: + filtered = bcjSPARCFilter(s, buf) + default: + /* Never reached */ + } + *pos += filtered + s.pos += filtered +} + +/* + * Flush pending filtered data from temp to the output buffer. + * Move the remaining mixture of possibly filtered and unfiltered + * data to the beginning of temp. + */ +func bcjFlush(s *xzDecBCJ, b *xzBuf) { + var copySize int + copySize = len(b.out) - b.outPos + if copySize > s.temp.filtered { + copySize = s.temp.filtered + } + copy(b.out[b.outPos:], s.temp.buf[:copySize]) + b.outPos += copySize + s.temp.filtered -= copySize + copy(s.temp.buf, s.temp.buf[copySize:]) + s.temp.buf = s.temp.buf[:len(s.temp.buf)-copySize] +} + +/* + * Decode raw stream which has a BCJ filter as the first filter. + * + * The BCJ filter functions are primitive in sense that they process the + * data in chunks of 1-16 bytes. To hide this issue, this function does + * some buffering. + */ +func xzDecBCJRun(s *xzDecBCJ, b *xzBuf, chain func(*xzBuf) xzRet) xzRet { + var outStart int + /* + * Flush pending already filtered data to the output buffer. Return + * immediately if we couldn't flush everything, or if the next + * filter in the chain had already returned xzStreamEnd. + */ + if s.temp.filtered > 0 { + bcjFlush(s, b) + if s.temp.filtered > 0 { + return xzOK + } + if s.ret == xzStreamEnd { + return xzStreamEnd + } + } + /* + * If we have more output space than what is currently pending in + * temp, copy the unfiltered data from temp to the output buffer + * and try to fill the output buffer by decoding more data from the + * next filter in the chain. Apply the BCJ filter on the new data + * in the output buffer. If everything cannot be filtered, copy it + * to temp and rewind the output buffer position accordingly. + * + * This needs to be always run when len(temp.buf) == 0 to handle a special + * case where the output buffer is full and the next filter has no + * more output coming but hasn't returned xzStreamEnd yet. + */ + if len(s.temp.buf) < len(b.out)-b.outPos || len(s.temp.buf) == 0 { + outStart = b.outPos + copy(b.out[b.outPos:], s.temp.buf) + b.outPos += len(s.temp.buf) + s.ret = chain(b) + if s.ret != xzStreamEnd && s.ret != xzOK { + return s.ret + } + bcjApply(s, b.out[:b.outPos], &outStart) + /* + * As an exception, if the next filter returned xzStreamEnd, + * we can do that too, since the last few bytes that remain + * unfiltered are meant to remain unfiltered. + */ + if s.ret == xzStreamEnd { + return xzStreamEnd + } + s.temp.buf = s.temp.bufArray[:b.outPos-outStart] + b.outPos -= len(s.temp.buf) + copy(s.temp.buf, b.out[b.outPos:]) + /* + * If there wasn't enough input to the next filter to fill + * the output buffer with unfiltered data, there's no point + * to try decoding more data to temp. + */ + if b.outPos+len(s.temp.buf) < len(b.out) { + return xzOK + } + } + /* + * We have unfiltered data in temp. If the output buffer isn't full + * yet, try to fill the temp buffer by decoding more data from the + * next filter. Apply the BCJ filter on temp. Then we hopefully can + * fill the actual output buffer by copying filtered data from temp. + * A mix of filtered and unfiltered data may be left in temp; it will + * be taken care on the next call to this function. + */ + if b.outPos < len(b.out) { + /* Make b.out temporarily point to s.temp. */ + s.out = b.out + s.outPos = b.outPos + b.out = s.temp.bufArray[:] + b.outPos = len(s.temp.buf) + s.ret = chain(b) + s.temp.buf = s.temp.bufArray[:b.outPos] + b.out = s.out + b.outPos = s.outPos + if s.ret != xzOK && s.ret != xzStreamEnd { + return s.ret + } + bcjApply(s, s.temp.buf, &s.temp.filtered) + /* + * If the next filter returned xzStreamEnd, we mark that + * everything is filtered, since the last unfiltered bytes + * of the stream are meant to be left as is. + */ + if s.ret == xzStreamEnd { + s.temp.filtered = len(s.temp.buf) + } + bcjFlush(s, b) + if s.temp.filtered > 0 { + return xzOK + } + } + return s.ret +} + +/* + * Allocate memory for BCJ decoders. xzDecBCJReset must be used before + * calling xzDecBCJRun. + */ +func xzDecBCJCreate() *xzDecBCJ { + return new(xzDecBCJ) +} + +/* + * Decode the Filter ID of a BCJ filter and check the start offset is + * valid. Returns xzOK if the given Filter ID and offset is + * supported. Otherwise xzOptionsError is returned. + */ +func xzDecBCJReset(s *xzDecBCJ, id xzFilterID, offset int) xzRet { + switch id { + case idBCJX86: + case idBCJPowerPC: + case idBCJIA64: + case idBCJARM: + case idBCJARMThumb: + case idBCJSPARC: + default: + /* Unsupported Filter ID */ + return xzOptionsError + } + // check offset is a multiple of alignment + switch id { + case idBCJPowerPC, idBCJARM, idBCJSPARC: + if offset%4 != 0 { + return xzOptionsError + } + case idBCJIA64: + if offset%16 != 0 { + return xzOptionsError + } + case idBCJARMThumb: + if offset%2 != 0 { + return xzOptionsError + } + } + s.typ = id + s.ret = xzOK + s.pos = offset + s.x86PrevMask = 0 + s.temp.filtered = 0 + s.temp.buf = nil + return xzOK +} diff --git a/vendor/github.com/xi2/xz/dec_delta.go b/vendor/github.com/xi2/xz/dec_delta.go new file mode 100644 index 00000000..19df5908 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_delta.go @@ -0,0 +1,55 @@ +/* + * Delta decoder + * + * Author: Lasse Collin + * + * Translation to Go: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +type xzDecDelta struct { + delta [256]byte + pos byte + distance int // in range [1, 256] +} + +/* + * Decode raw stream which has a delta filter as the first filter. + */ +func xzDecDeltaRun(s *xzDecDelta, b *xzBuf, chain func(*xzBuf) xzRet) xzRet { + outStart := b.outPos + ret := chain(b) + for i := outStart; i < b.outPos; i++ { + tmp := b.out[i] + s.delta[byte(s.distance+int(s.pos))] + s.delta[s.pos] = tmp + b.out[i] = tmp + s.pos-- + } + return ret +} + +/* + * Allocate memory for a delta decoder. xzDecDeltaReset must be used + * before calling xzDecDeltaRun. + */ +func xzDecDeltaCreate() *xzDecDelta { + return new(xzDecDelta) +} + +/* + * Returns xzOK if the given distance is valid. Otherwise + * xzOptionsError is returned. + */ +func xzDecDeltaReset(s *xzDecDelta, distance int) xzRet { + if distance < 1 || distance > 256 { + return xzOptionsError + } + s.delta = [256]byte{} + s.pos = 0 + s.distance = distance + return xzOK +} diff --git a/vendor/github.com/xi2/xz/dec_lzma2.go b/vendor/github.com/xi2/xz/dec_lzma2.go new file mode 100644 index 00000000..fa42e471 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_lzma2.go @@ -0,0 +1,1235 @@ +/* + * LZMA2 decoder + * + * Authors: Lasse Collin + * Igor Pavlov + * + * Translation to Go: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +/* from linux/lib/xz/xz_lzma2.h ***************************************/ + +/* Range coder constants */ +const ( + rcShiftBits = 8 + rcTopBits = 24 + rcTopValue = 1 << rcTopBits + rcBitModelTotalBits = 11 + rcBitModelTotal = 1 << rcBitModelTotalBits + rcMoveBits = 5 +) + +/* + * Maximum number of position states. A position state is the lowest pb + * number of bits of the current uncompressed offset. In some places there + * are different sets of probabilities for different position states. + */ +const posStatesMax = 1 << 4 + +/* + * lzmaState is used to track which LZMA symbols have occurred most recently + * and in which order. This information is used to predict the next symbol. + * + * Symbols: + * - Literal: One 8-bit byte + * - Match: Repeat a chunk of data at some distance + * - Long repeat: Multi-byte match at a recently seen distance + * - Short repeat: One-byte repeat at a recently seen distance + * + * The symbol names are in from STATE-oldest-older-previous. REP means + * either short or long repeated match, and NONLIT means any non-literal. + */ +type lzmaState int + +const ( + stateLitLit lzmaState = iota + stateMatchLitLit + stateRepLitLit + stateShortrepLitLit + stateMatchLit + stateRepList + stateShortrepLit + stateLitMatch + stateLitLongrep + stateLitShortrep + stateNonlitMatch + stateNonlitRep +) + +/* Total number of states */ +const states = 12 + +/* The lowest 7 states indicate that the previous state was a literal. */ +const litStates = 7 + +/* Indicate that the latest symbol was a literal. */ +func lzmaStateLiteral(state *lzmaState) { + switch { + case *state <= stateShortrepLitLit: + *state = stateLitLit + case *state <= stateLitShortrep: + *state -= 3 + default: + *state -= 6 + } +} + +/* Indicate that the latest symbol was a match. */ +func lzmaStateMatch(state *lzmaState) { + if *state < litStates { + *state = stateLitMatch + } else { + *state = stateNonlitMatch + } +} + +/* Indicate that the latest state was a long repeated match. */ +func lzmaStateLongRep(state *lzmaState) { + if *state < litStates { + *state = stateLitLongrep + } else { + *state = stateNonlitRep + } +} + +/* Indicate that the latest symbol was a short match. */ +func lzmaStateShortRep(state *lzmaState) { + if *state < litStates { + *state = stateLitShortrep + } else { + *state = stateNonlitRep + } +} + +/* Test if the previous symbol was a literal. */ +func lzmaStateIsLiteral(state lzmaState) bool { + return state < litStates +} + +/* Each literal coder is divided in three sections: + * - 0x001-0x0FF: Without match byte + * - 0x101-0x1FF: With match byte; match bit is 0 + * - 0x201-0x2FF: With match byte; match bit is 1 + * + * Match byte is used when the previous LZMA symbol was something else than + * a literal (that is, it was some kind of match). + */ +const literalCoderSize = 0x300 + +/* Maximum number of literal coders */ +const literalCodersMax = 1 << 4 + +/* Minimum length of a match is two bytes. */ +const matchLenMin = 2 + +/* Match length is encoded with 4, 5, or 10 bits. + * + * Length Bits + * 2-9 4 = Choice=0 + 3 bits + * 10-17 5 = Choice=1 + Choice2=0 + 3 bits + * 18-273 10 = Choice=1 + Choice2=1 + 8 bits + */ +const ( + lenLowBits = 3 + lenLowSymbols = 1 << lenLowBits + lenMidBits = 3 + lenMidSymbols = 1 << lenMidBits + lenHighBits = 8 + lenHighSymbols = 1 << lenHighBits +) + +/* + * Different sets of probabilities are used for match distances that have + * very short match length: Lengths of 2, 3, and 4 bytes have a separate + * set of probabilities for each length. The matches with longer length + * use a shared set of probabilities. + */ +const distStates = 4 + +/* + * Get the index of the appropriate probability array for decoding + * the distance slot. + */ +func lzmaGetDistState(len uint32) uint32 { + if len < distStates+matchLenMin { + return len - matchLenMin + } else { + return distStates - 1 + } +} + +/* + * The highest two bits of a 32-bit match distance are encoded using six bits. + * This six-bit value is called a distance slot. This way encoding a 32-bit + * value takes 6-36 bits, larger values taking more bits. + */ +const ( + distSlotBits = 6 + distSlots = 1 << distSlotBits +) + +/* Match distances up to 127 are fully encoded using probabilities. Since + * the highest two bits (distance slot) are always encoded using six bits, + * the distances 0-3 don't need any additional bits to encode, since the + * distance slot itself is the same as the actual distance. distModelStart + * indicates the first distance slot where at least one additional bit is + * needed. + */ +const distModelStart = 4 + +/* + * Match distances greater than 127 are encoded in three pieces: + * - distance slot: the highest two bits + * - direct bits: 2-26 bits below the highest two bits + * - alignment bits: four lowest bits + * + * Direct bits don't use any probabilities. + * + * The distance slot value of 14 is for distances 128-191. + */ +const distModelEnd = 14 + +/* Distance slots that indicate a distance <= 127. */ +const ( + fullDistancesBits = distModelEnd / 2 + fullDistances = 1 << fullDistancesBits +) + +/* + * For match distances greater than 127, only the highest two bits and the + * lowest four bits (alignment) is encoded using probabilities. + */ +const ( + alignBits = 4 + alignSize = 1 << alignBits +) + +/* from linux/lib/xz/xz_dec_lzma2.c ***********************************/ + +/* + * Range decoder initialization eats the first five bytes of each LZMA chunk. + */ +const rcInitBytes = 5 + +/* + * Minimum number of usable input buffer to safely decode one LZMA symbol. + * The worst case is that we decode 22 bits using probabilities and 26 + * direct bits. This may decode at maximum of 20 bytes of input. However, + * lzmaMain does an extra normalization before returning, thus we + * need to put 21 here. + */ +const lzmaInRequired = 21 + +/* + * Dictionary (history buffer) + * + * These are always true: + * start <= pos <= full <= end + * pos <= limit <= end + * end == size + * size <= sizeMax + * len(buf) <= size + */ +type dictionary struct { + /* The history buffer */ + buf []byte + /* Old position in buf (before decoding more data) */ + start uint32 + /* Position in buf */ + pos uint32 + /* + * How full dictionary is. This is used to detect corrupt input that + * would read beyond the beginning of the uncompressed stream. + */ + full uint32 + /* Write limit; we don't write to buf[limit] or later bytes. */ + limit uint32 + /* + * End of the dictionary buffer. This is the same as the + * dictionary size. + */ + end uint32 + /* + * Size of the dictionary as specified in Block Header. This is used + * together with "full" to detect corrupt input that would make us + * read beyond the beginning of the uncompressed stream. + */ + size uint32 + /* Maximum allowed dictionary size. */ + sizeMax uint32 +} + +/* Range decoder */ +type rcDec struct { + rnge uint32 + code uint32 + /* + * Number of initializing bytes remaining to be read + * by rcReadInit. + */ + initBytesLeft uint32 + /* + * Buffer from which we read our input. It can be either + * temp.buf or the caller-provided input buffer. + */ + in []byte + inPos int + inLimit int +} + +/* Probabilities for a length decoder. */ +type lzmaLenDec struct { + /* Probability of match length being at least 10 */ + choice uint16 + /* Probability of match length being at least 18 */ + choice2 uint16 + /* Probabilities for match lengths 2-9 */ + low [posStatesMax][lenLowSymbols]uint16 + /* Probabilities for match lengths 10-17 */ + mid [posStatesMax][lenMidSymbols]uint16 + /* Probabilities for match lengths 18-273 */ + high [lenHighSymbols]uint16 +} + +type lzmaDec struct { + /* Distances of latest four matches */ + rep0 uint32 + rep1 uint32 + rep2 uint32 + rep3 uint32 + /* Types of the most recently seen LZMA symbols */ + state lzmaState + /* + * Length of a match. This is updated so that dictRepeat can + * be called again to finish repeating the whole match. + */ + len uint32 + /* + * LZMA properties or related bit masks (number of literal + * context bits, a mask derived from the number of literal + * position bits, and a mask derived from the number + * position bits) + */ + lc uint32 + literalPosMask uint32 + posMask uint32 + /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ + isMatch [states][posStatesMax]uint16 + /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ + isRep [states]uint16 + /* + * If 0, distance of a repeated match is rep0. + * Otherwise check is_rep1. + */ + isRep0 [states]uint16 + /* + * If 0, distance of a repeated match is rep1. + * Otherwise check is_rep2. + */ + isRep1 [states]uint16 + /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ + isRep2 [states]uint16 + /* + * If 1, the repeated match has length of one byte. Otherwise + * the length is decoded from rep_len_decoder. + */ + isRep0Long [states][posStatesMax]uint16 + /* + * Probability tree for the highest two bits of the match + * distance. There is a separate probability tree for match + * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. + */ + distSlot [distStates][distSlots]uint16 + /* + * Probility trees for additional bits for match distance + * when the distance is in the range [4, 127]. + */ + distSpecial [fullDistances - distModelEnd]uint16 + /* + * Probability tree for the lowest four bits of a match + * distance that is equal to or greater than 128. + */ + distAlign [alignSize]uint16 + /* Length of a normal match */ + matchLenDec lzmaLenDec + /* Length of a repeated match */ + repLenDec lzmaLenDec + /* Probabilities of literals */ + literal [literalCodersMax][literalCoderSize]uint16 +} + +// type of lzma2Dec.sequence +type lzma2Seq int + +const ( + seqControl lzma2Seq = iota + seqUncompressed1 + seqUncompressed2 + seqCompressed0 + seqCompressed1 + seqProperties + seqLZMAPrepare + seqLZMARun + seqCopy +) + +type lzma2Dec struct { + /* Position in xzDecLZMA2Run. */ + sequence lzma2Seq + /* Next position after decoding the compressed size of the chunk. */ + nextSequence lzma2Seq + /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ + uncompressed int + /* + * Compressed size of LZMA chunk or compressed/uncompressed + * size of uncompressed chunk (64 KiB at maximum) + */ + compressed int + /* + * True if dictionary reset is needed. This is false before + * the first chunk (LZMA or uncompressed). + */ + needDictReset bool + /* + * True if new LZMA properties are needed. This is false + * before the first LZMA chunk. + */ + needProps bool +} + +type xzDecLZMA2 struct { + /* + * The order below is important on x86 to reduce code size and + * it shouldn't hurt on other platforms. Everything up to and + * including lzma.pos_mask are in the first 128 bytes on x86-32, + * which allows using smaller instructions to access those + * variables. On x86-64, fewer variables fit into the first 128 + * bytes, but this is still the best order without sacrificing + * the readability by splitting the structures. + */ + rc rcDec + dict dictionary + lzma2 lzma2Dec + lzma lzmaDec + /* + * Temporary buffer which holds small number of input bytes between + * decoder calls. See lzma2LZMA for details. + */ + temp struct { + buf []byte // slice buf will be backed by bufArray + bufArray [3 * lzmaInRequired]byte + } +} + +/************** + * Dictionary * + **************/ + +/* + * Reset the dictionary state. When in single-call mode, set up the beginning + * of the dictionary to point to the actual output buffer. + */ +func dictReset(dict *dictionary, b *xzBuf) { + dict.start = 0 + dict.pos = 0 + dict.limit = 0 + dict.full = 0 +} + +/* Set dictionary write limit */ +func dictLimit(dict *dictionary, outMax int) { + if dict.end-dict.pos <= uint32(outMax) { + dict.limit = dict.end + } else { + dict.limit = dict.pos + uint32(outMax) + } +} + +/* Return true if at least one byte can be written into the dictionary. */ +func dictHasSpace(dict *dictionary) bool { + return dict.pos < dict.limit +} + +/* + * Get a byte from the dictionary at the given distance. The distance is + * assumed to valid, or as a special case, zero when the dictionary is + * still empty. This special case is needed for single-call decoding to + * avoid writing a '\x00' to the end of the destination buffer. + */ +func dictGet(dict *dictionary, dist uint32) uint32 { + var offset uint32 = dict.pos - dist - 1 + if dist >= dict.pos { + offset += dict.end + } + if dict.full > 0 { + return uint32(dict.buf[offset]) + } + return 0 +} + +/* + * Put one byte into the dictionary. It is assumed that there is space for it. + */ +func dictPut(dict *dictionary, byte byte) { + dict.buf[dict.pos] = byte + dict.pos++ + if dict.full < dict.pos { + dict.full = dict.pos + } +} + +/* + * Repeat given number of bytes from the given distance. If the distance is + * invalid, false is returned. On success, true is returned and *len is + * updated to indicate how many bytes were left to be repeated. + */ +func dictRepeat(dict *dictionary, len *uint32, dist uint32) bool { + var back uint32 + var left uint32 + if dist >= dict.full || dist >= dict.size { + return false + } + left = dict.limit - dict.pos + if left > *len { + left = *len + } + *len -= left + back = dict.pos - dist - 1 + if dist >= dict.pos { + back += dict.end + } + for { + dict.buf[dict.pos] = dict.buf[back] + dict.pos++ + back++ + if back == dict.end { + back = 0 + } + left-- + if !(left > 0) { + break + } + } + if dict.full < dict.pos { + dict.full = dict.pos + } + return true +} + +/* Copy uncompressed data as is from input to dictionary and output buffers. */ +func dictUncompressed(dict *dictionary, b *xzBuf, left *int) { + var copySize int + for *left > 0 && b.inPos < len(b.in) && b.outPos < len(b.out) { + copySize = len(b.in) - b.inPos + if copySize > len(b.out)-b.outPos { + copySize = len(b.out) - b.outPos + } + if copySize > int(dict.end-dict.pos) { + copySize = int(dict.end - dict.pos) + } + if copySize > *left { + copySize = *left + } + *left -= copySize + copy(dict.buf[dict.pos:], b.in[b.inPos:b.inPos+copySize]) + dict.pos += uint32(copySize) + if dict.full < dict.pos { + dict.full = dict.pos + } + if dict.pos == dict.end { + dict.pos = 0 + } + copy(b.out[b.outPos:], b.in[b.inPos:b.inPos+copySize]) + dict.start = dict.pos + b.outPos += copySize + b.inPos += copySize + } +} + +/* + * Flush pending data from dictionary to b.out. It is assumed that there is + * enough space in b.out. This is guaranteed because caller uses dictLimit + * before decoding data into the dictionary. + */ +func dictFlush(dict *dictionary, b *xzBuf) int { + var copySize int = int(dict.pos - dict.start) + if dict.pos == dict.end { + dict.pos = 0 + } + copy(b.out[b.outPos:], dict.buf[dict.start:dict.start+uint32(copySize)]) + dict.start = dict.pos + b.outPos += copySize + return copySize +} + +/***************** + * Range decoder * + *****************/ + +/* Reset the range decoder. */ +func rcReset(rc *rcDec) { + rc.rnge = ^uint32(0) + rc.code = 0 + rc.initBytesLeft = rcInitBytes +} + +/* + * Read the first five initial bytes into rc->code if they haven't been + * read already. (Yes, the first byte gets completely ignored.) + */ +func rcReadInit(rc *rcDec, b *xzBuf) bool { + for rc.initBytesLeft > 0 { + if b.inPos == len(b.in) { + return false + } + rc.code = rc.code<<8 + uint32(b.in[b.inPos]) + b.inPos++ + rc.initBytesLeft-- + } + return true +} + +/* Return true if there may not be enough input for the next decoding loop. */ +func rcLimitExceeded(rc *rcDec) bool { + return rc.inPos > rc.inLimit +} + +/* + * Return true if it is possible (from point of view of range decoder) that + * we have reached the end of the LZMA chunk. + */ +func rcIsFinished(rc *rcDec) bool { + return rc.code == 0 +} + +/* Read the next input byte if needed. */ +func rcNormalize(rc *rcDec) { + if rc.rnge < rcTopValue { + rc.rnge <<= rcShiftBits + rc.code = rc.code<> rcBitModelTotalBits) * uint32(*prob) + if rc.code < bound { + rc.rnge = bound + *prob += (rcBitModelTotal - *prob) >> rcMoveBits + bit = false + } else { + rc.rnge -= bound + rc.code -= bound + *prob -= *prob >> rcMoveBits + bit = true + } + return bit +} + +/* Decode a bittree starting from the most significant bit. */ +func rcBittree(rc *rcDec, probs []uint16, limit uint32) uint32 { + var symbol uint32 = 1 + for { + if rcBit(rc, &probs[symbol-1]) { + symbol = symbol<<1 + 1 + } else { + symbol <<= 1 + } + if !(symbol < limit) { + break + } + } + return symbol +} + +/* Decode a bittree starting from the least significant bit. */ +func rcBittreeReverse(rc *rcDec, probs []uint16, dest *uint32, limit uint32) { + var symbol uint32 = 1 + var i uint32 = 0 + for { + if rcBit(rc, &probs[symbol-1]) { + symbol = symbol<<1 + 1 + *dest += 1 << i + } else { + symbol <<= 1 + } + i++ + if !(i < limit) { + break + } + } +} + +/* Decode direct bits (fixed fifty-fifty probability) */ +func rcDirect(rc *rcDec, dest *uint32, limit uint32) { + var mask uint32 + for { + rcNormalize(rc) + rc.rnge >>= 1 + rc.code -= rc.rnge + mask = 0 - rc.code>>31 + rc.code += rc.rnge & mask + *dest = *dest<<1 + mask + 1 + limit-- + if !(limit > 0) { + break + } + } +} + +/******** + * LZMA * + ********/ + +/* Get pointer to literal coder probability array. */ +func lzmaLiteralProbs(s *xzDecLZMA2) []uint16 { + var prevByte uint32 = dictGet(&s.dict, 0) + var low uint32 = prevByte >> (8 - s.lzma.lc) + var high uint32 = (s.dict.pos & s.lzma.literalPosMask) << s.lzma.lc + return s.lzma.literal[low+high][:] +} + +/* Decode a literal (one 8-bit byte) */ +func lzmaLiteral(s *xzDecLZMA2) { + var probs []uint16 + var symbol uint32 + var matchByte uint32 + var matchBit uint32 + var offset uint32 + var i uint32 + probs = lzmaLiteralProbs(s) + if lzmaStateIsLiteral(s.lzma.state) { + symbol = rcBittree(&s.rc, probs[1:], 0x100) + } else { + symbol = 1 + matchByte = dictGet(&s.dict, s.lzma.rep0) << 1 + offset = 0x100 + for { + matchBit = matchByte & offset + matchByte <<= 1 + i = offset + matchBit + symbol + if rcBit(&s.rc, &probs[i]) { + symbol = symbol<<1 + 1 + offset &= matchBit + } else { + symbol <<= 1 + offset &= ^matchBit + } + if !(symbol < 0x100) { + break + } + } + } + dictPut(&s.dict, byte(symbol)) + lzmaStateLiteral(&s.lzma.state) +} + +/* Decode the length of the match into s.lzma.len. */ +func lzmaLen(s *xzDecLZMA2, l *lzmaLenDec, posState uint32) { + var probs []uint16 + var limit uint32 + switch { + case !rcBit(&s.rc, &l.choice): + probs = l.low[posState][:] + limit = lenLowSymbols + s.lzma.len = matchLenMin + case !rcBit(&s.rc, &l.choice2): + probs = l.mid[posState][:] + limit = lenMidSymbols + s.lzma.len = matchLenMin + lenLowSymbols + default: + probs = l.high[:] + limit = lenHighSymbols + s.lzma.len = matchLenMin + lenLowSymbols + lenMidSymbols + } + s.lzma.len += rcBittree(&s.rc, probs[1:], limit) - limit +} + +/* Decode a match. The distance will be stored in s.lzma.rep0. */ +func lzmaMatch(s *xzDecLZMA2, posState uint32) { + var probs []uint16 + var distSlot uint32 + var limit uint32 + lzmaStateMatch(&s.lzma.state) + s.lzma.rep3 = s.lzma.rep2 + s.lzma.rep2 = s.lzma.rep1 + s.lzma.rep1 = s.lzma.rep0 + lzmaLen(s, &s.lzma.matchLenDec, posState) + probs = s.lzma.distSlot[lzmaGetDistState(s.lzma.len)][:] + distSlot = rcBittree(&s.rc, probs[1:], distSlots) - distSlots + if distSlot < distModelStart { + s.lzma.rep0 = distSlot + } else { + limit = distSlot>>1 - 1 + s.lzma.rep0 = 2 + distSlot&1 + if distSlot < distModelEnd { + s.lzma.rep0 <<= limit + probs = s.lzma.distSpecial[s.lzma.rep0-distSlot:] + rcBittreeReverse(&s.rc, probs, &s.lzma.rep0, limit) + } else { + rcDirect(&s.rc, &s.lzma.rep0, limit-alignBits) + s.lzma.rep0 <<= alignBits + rcBittreeReverse( + &s.rc, s.lzma.distAlign[1:], &s.lzma.rep0, alignBits) + } + } +} + +/* + * Decode a repeated match. The distance is one of the four most recently + * seen matches. The distance will be stored in s.lzma.rep0. + */ +func lzmaRepMatch(s *xzDecLZMA2, posState uint32) { + var tmp uint32 + if !rcBit(&s.rc, &s.lzma.isRep0[s.lzma.state]) { + if !rcBit(&s.rc, &s.lzma.isRep0Long[s.lzma.state][posState]) { + lzmaStateShortRep(&s.lzma.state) + s.lzma.len = 1 + return + } + } else { + if !rcBit(&s.rc, &s.lzma.isRep1[s.lzma.state]) { + tmp = s.lzma.rep1 + } else { + if !rcBit(&s.rc, &s.lzma.isRep2[s.lzma.state]) { + tmp = s.lzma.rep2 + } else { + tmp = s.lzma.rep3 + s.lzma.rep3 = s.lzma.rep2 + } + s.lzma.rep2 = s.lzma.rep1 + } + s.lzma.rep1 = s.lzma.rep0 + s.lzma.rep0 = tmp + } + lzmaStateLongRep(&s.lzma.state) + lzmaLen(s, &s.lzma.repLenDec, posState) +} + +/* LZMA decoder core */ +func lzmaMain(s *xzDecLZMA2) bool { + var posState uint32 + /* + * If the dictionary was reached during the previous call, try to + * finish the possibly pending repeat in the dictionary. + */ + if dictHasSpace(&s.dict) && s.lzma.len > 0 { + dictRepeat(&s.dict, &s.lzma.len, s.lzma.rep0) + } + /* + * Decode more LZMA symbols. One iteration may consume up to + * lzmaInRequired - 1 bytes. + */ + for dictHasSpace(&s.dict) && !rcLimitExceeded(&s.rc) { + posState = s.dict.pos & s.lzma.posMask + if !rcBit(&s.rc, &s.lzma.isMatch[s.lzma.state][posState]) { + lzmaLiteral(s) + } else { + if rcBit(&s.rc, &s.lzma.isRep[s.lzma.state]) { + lzmaRepMatch(s, posState) + } else { + lzmaMatch(s, posState) + } + if !dictRepeat(&s.dict, &s.lzma.len, s.lzma.rep0) { + return false + } + } + } + /* + * Having the range decoder always normalized when we are outside + * this function makes it easier to correctly handle end of the chunk. + */ + rcNormalize(&s.rc) + return true +} + +/* + * Reset the LZMA decoder and range decoder state. Dictionary is not reset + * here, because LZMA state may be reset without resetting the dictionary. + */ +func lzmaReset(s *xzDecLZMA2) { + s.lzma.state = stateLitLit + s.lzma.rep0 = 0 + s.lzma.rep1 = 0 + s.lzma.rep2 = 0 + s.lzma.rep3 = 0 + /* All probabilities are initialized to the same value, v */ + v := uint16(rcBitModelTotal / 2) + s.lzma.matchLenDec.choice = v + s.lzma.matchLenDec.choice2 = v + s.lzma.repLenDec.choice = v + s.lzma.repLenDec.choice2 = v + for _, m := range [][]uint16{ + s.lzma.isRep[:], s.lzma.isRep0[:], s.lzma.isRep1[:], + s.lzma.isRep2[:], s.lzma.distSpecial[:], s.lzma.distAlign[:], + s.lzma.matchLenDec.high[:], s.lzma.repLenDec.high[:], + } { + for j := range m { + m[j] = v + } + } + for i := range s.lzma.isMatch { + for j := range s.lzma.isMatch[i] { + s.lzma.isMatch[i][j] = v + } + } + for i := range s.lzma.isRep0Long { + for j := range s.lzma.isRep0Long[i] { + s.lzma.isRep0Long[i][j] = v + } + } + for i := range s.lzma.distSlot { + for j := range s.lzma.distSlot[i] { + s.lzma.distSlot[i][j] = v + } + } + for i := range s.lzma.literal { + for j := range s.lzma.literal[i] { + s.lzma.literal[i][j] = v + } + } + for i := range s.lzma.matchLenDec.low { + for j := range s.lzma.matchLenDec.low[i] { + s.lzma.matchLenDec.low[i][j] = v + } + } + for i := range s.lzma.matchLenDec.mid { + for j := range s.lzma.matchLenDec.mid[i] { + s.lzma.matchLenDec.mid[i][j] = v + } + } + for i := range s.lzma.repLenDec.low { + for j := range s.lzma.repLenDec.low[i] { + s.lzma.repLenDec.low[i][j] = v + } + } + for i := range s.lzma.repLenDec.mid { + for j := range s.lzma.repLenDec.mid[i] { + s.lzma.repLenDec.mid[i][j] = v + } + } + rcReset(&s.rc) +} + +/* + * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks + * from the decoded lp and pb values. On success, the LZMA decoder state is + * reset and true is returned. + */ +func lzmaProps(s *xzDecLZMA2, props byte) bool { + if props > (4*5+4)*9+8 { + return false + } + s.lzma.posMask = 0 + for props >= 9*5 { + props -= 9 * 5 + s.lzma.posMask++ + } + s.lzma.posMask = 1<= 9 { + props -= 9 + s.lzma.literalPosMask++ + } + s.lzma.lc = uint32(props) + if s.lzma.lc+s.lzma.literalPosMask > 4 { + return false + } + s.lzma.literalPosMask = 1< 0 || s.lzma2.compressed == 0 { + tmp = 2*lzmaInRequired - len(s.temp.buf) + if tmp > s.lzma2.compressed-len(s.temp.buf) { + tmp = s.lzma2.compressed - len(s.temp.buf) + } + if tmp > inAvail { + tmp = inAvail + } + copy(s.temp.bufArray[len(s.temp.buf):], b.in[b.inPos:b.inPos+tmp]) + switch { + case len(s.temp.buf)+tmp == s.lzma2.compressed: + for i := len(s.temp.buf) + tmp; i < len(s.temp.bufArray); i++ { + s.temp.bufArray[i] = 0 + } + s.rc.inLimit = len(s.temp.buf) + tmp + case len(s.temp.buf)+tmp < lzmaInRequired: + s.temp.buf = s.temp.bufArray[:len(s.temp.buf)+tmp] + b.inPos += tmp + return true + default: + s.rc.inLimit = len(s.temp.buf) + tmp - lzmaInRequired + } + s.rc.in = s.temp.bufArray[:] + s.rc.inPos = 0 + if !lzmaMain(s) || s.rc.inPos > len(s.temp.buf)+tmp { + return false + } + s.lzma2.compressed -= s.rc.inPos + if s.rc.inPos < len(s.temp.buf) { + copy(s.temp.buf, s.temp.buf[s.rc.inPos:]) + s.temp.buf = s.temp.buf[:len(s.temp.buf)-s.rc.inPos] + return true + } + b.inPos += s.rc.inPos - len(s.temp.buf) + s.temp.buf = nil + } + inAvail = len(b.in) - b.inPos + if inAvail >= lzmaInRequired { + s.rc.in = b.in + s.rc.inPos = b.inPos + if inAvail >= s.lzma2.compressed+lzmaInRequired { + s.rc.inLimit = b.inPos + s.lzma2.compressed + } else { + s.rc.inLimit = len(b.in) - lzmaInRequired + } + if !lzmaMain(s) { + return false + } + inAvail = s.rc.inPos - b.inPos + if inAvail > s.lzma2.compressed { + return false + } + s.lzma2.compressed -= inAvail + b.inPos = s.rc.inPos + } + inAvail = len(b.in) - b.inPos + if inAvail < lzmaInRequired { + if inAvail > s.lzma2.compressed { + inAvail = s.lzma2.compressed + } + s.temp.buf = s.temp.bufArray[:inAvail] + copy(s.temp.buf, b.in[b.inPos:]) + b.inPos += inAvail + } + return true +} + +/* + * Take care of the LZMA2 control layer, and forward the job of actual LZMA + * decoding or copying of uncompressed chunks to other functions. + */ +func xzDecLZMA2Run(s *xzDecLZMA2, b *xzBuf) xzRet { + var tmp int + for b.inPos < len(b.in) || s.lzma2.sequence == seqLZMARun { + switch s.lzma2.sequence { + case seqControl: + /* + * LZMA2 control byte + * + * Exact values: + * 0x00 End marker + * 0x01 Dictionary reset followed by + * an uncompressed chunk + * 0x02 Uncompressed chunk (no dictionary reset) + * + * Highest three bits (s.control & 0xE0): + * 0xE0 Dictionary reset, new properties and state + * reset, followed by LZMA compressed chunk + * 0xC0 New properties and state reset, followed + * by LZMA compressed chunk (no dictionary + * reset) + * 0xA0 State reset using old properties, + * followed by LZMA compressed chunk (no + * dictionary reset) + * 0x80 LZMA chunk (no dictionary or state reset) + * + * For LZMA compressed chunks, the lowest five bits + * (s.control & 1F) are the highest bits of the + * uncompressed size (bits 16-20). + * + * A new LZMA2 stream must begin with a dictionary + * reset. The first LZMA chunk must set new + * properties and reset the LZMA state. + * + * Values that don't match anything described above + * are invalid and we return xzDataError. + */ + tmp = int(b.in[b.inPos]) + b.inPos++ + if tmp == 0x00 { + return xzStreamEnd + } + switch { + case tmp >= 0xe0 || tmp == 0x01: + s.lzma2.needProps = true + s.lzma2.needDictReset = false + dictReset(&s.dict, b) + case s.lzma2.needDictReset: + return xzDataError + } + if tmp >= 0x80 { + s.lzma2.uncompressed = (tmp & 0x1f) << 16 + s.lzma2.sequence = seqUncompressed1 + switch { + case tmp >= 0xc0: + /* + * When there are new properties, + * state reset is done at + * seqProperties. + */ + s.lzma2.needProps = false + s.lzma2.nextSequence = seqProperties + case s.lzma2.needProps: + return xzDataError + default: + s.lzma2.nextSequence = seqLZMAPrepare + if tmp >= 0xa0 { + lzmaReset(s) + } + } + } else { + if tmp > 0x02 { + return xzDataError + } + s.lzma2.sequence = seqCompressed0 + s.lzma2.nextSequence = seqCopy + } + case seqUncompressed1: + s.lzma2.uncompressed += int(b.in[b.inPos]) << 8 + b.inPos++ + s.lzma2.sequence = seqUncompressed2 + case seqUncompressed2: + s.lzma2.uncompressed += int(b.in[b.inPos]) + 1 + b.inPos++ + s.lzma2.sequence = seqCompressed0 + case seqCompressed0: + s.lzma2.compressed += int(b.in[b.inPos]) << 8 + b.inPos++ + s.lzma2.sequence = seqCompressed1 + case seqCompressed1: + s.lzma2.compressed += int(b.in[b.inPos]) + 1 + b.inPos++ + s.lzma2.sequence = s.lzma2.nextSequence + case seqProperties: + if !lzmaProps(s, b.in[b.inPos]) { + return xzDataError + } + b.inPos++ + s.lzma2.sequence = seqLZMAPrepare + fallthrough + case seqLZMAPrepare: + if s.lzma2.compressed < rcInitBytes { + return xzDataError + } + if !rcReadInit(&s.rc, b) { + return xzOK + } + s.lzma2.compressed -= rcInitBytes + s.lzma2.sequence = seqLZMARun + fallthrough + case seqLZMARun: + /* + * Set dictionary limit to indicate how much we want + * to be encoded at maximum. Decode new data into the + * dictionary. Flush the new data from dictionary to + * b.out. Check if we finished decoding this chunk. + * In case the dictionary got full but we didn't fill + * the output buffer yet, we may run this loop + * multiple times without changing s.lzma2.sequence. + */ + outMax := len(b.out) - b.outPos + if outMax > s.lzma2.uncompressed { + outMax = s.lzma2.uncompressed + } + dictLimit(&s.dict, outMax) + if !lzma2LZMA(s, b) { + return xzDataError + } + s.lzma2.uncompressed -= dictFlush(&s.dict, b) + switch { + case s.lzma2.uncompressed == 0: + if s.lzma2.compressed > 0 || s.lzma.len > 0 || + !rcIsFinished(&s.rc) { + return xzDataError + } + rcReset(&s.rc) + s.lzma2.sequence = seqControl + case b.outPos == len(b.out) || + b.inPos == len(b.in) && + len(s.temp.buf) < s.lzma2.compressed: + return xzOK + } + case seqCopy: + dictUncompressed(&s.dict, b, &s.lzma2.compressed) + if s.lzma2.compressed > 0 { + return xzOK + } + s.lzma2.sequence = seqControl + } + } + return xzOK +} + +/* + * Allocate memory for LZMA2 decoder. xzDecLZMA2Reset must be used + * before calling xzDecLZMA2Run. + */ +func xzDecLZMA2Create(dictMax uint32) *xzDecLZMA2 { + s := new(xzDecLZMA2) + s.dict.sizeMax = dictMax + return s +} + +/* + * Decode the LZMA2 properties (one byte) and reset the decoder. Return + * xzOK on success, xzMemlimitError if the preallocated dictionary is not + * big enough, and xzOptionsError if props indicates something that this + * decoder doesn't support. + */ +func xzDecLZMA2Reset(s *xzDecLZMA2, props byte) xzRet { + if props > 40 { + return xzOptionsError // Bigger than 4 GiB + } + if props == 40 { + s.dict.size = ^uint32(0) + } else { + s.dict.size = uint32(2 + props&1) + s.dict.size <<= props>>1 + 11 + } + if s.dict.size > s.dict.sizeMax { + return xzMemlimitError + } + s.dict.end = s.dict.size + if len(s.dict.buf) < int(s.dict.size) { + s.dict.buf = make([]byte, s.dict.size) + } + s.lzma.len = 0 + s.lzma2.sequence = seqControl + s.lzma2.compressed = 0 + s.lzma2.uncompressed = 0 + s.lzma2.needDictReset = true + s.temp.buf = nil + return xzOK +} diff --git a/vendor/github.com/xi2/xz/dec_stream.go b/vendor/github.com/xi2/xz/dec_stream.go new file mode 100644 index 00000000..9381a3c8 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_stream.go @@ -0,0 +1,932 @@ +/* + * .xz Stream decoder + * + * Author: Lasse Collin + * + * Translation to Go: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +import ( + "bytes" + "crypto/sha256" + "hash" + "hash/crc32" + "hash/crc64" +) + +/* from linux/lib/xz/xz_stream.h **************************************/ + +/* + * See the .xz file format specification at + * http://tukaani.org/xz/xz-file-format.txt + * to understand the container format. + */ +const ( + streamHeaderSize = 12 + headerMagic = "\xfd7zXZ\x00" + footerMagic = "YZ" +) + +/* + * Variable-length integer can hold a 63-bit unsigned integer or a special + * value indicating that the value is unknown. + */ +type vliType uint64 + +const ( + vliUnknown vliType = ^vliType(0) + /* Maximum encoded size of a VLI */ + vliBytesMax = 8 * 8 / 7 // (Sizeof(vliType) * 8 / 7) +) + +/* from linux/lib/xz/xz_dec_stream.c **********************************/ + +/* Hash used to validate the Index field */ +type xzDecHash struct { + unpadded vliType + uncompressed vliType + sha256 hash.Hash +} + +// type of xzDec.sequence +type xzDecSeq int + +const ( + seqStreamHeader xzDecSeq = iota + seqBlockStart + seqBlockHeader + seqBlockUncompress + seqBlockPadding + seqBlockCheck + seqIndex + seqIndexPadding + seqIndexCRC32 + seqStreamFooter +) + +// type of xzDec.index.sequence +type xzDecIndexSeq int + +const ( + seqIndexCount xzDecIndexSeq = iota + seqIndexUnpadded + seqIndexUncompressed +) + +/** + * xzDec - Opaque type to hold the XZ decoder state + */ +type xzDec struct { + /* Position in decMain */ + sequence xzDecSeq + /* Position in variable-length integers and Check fields */ + pos int + /* Variable-length integer decoded by decVLI */ + vli vliType + /* Saved inPos and outPos */ + inStart int + outStart int + /* CRC32 checksum hash used in Index */ + crc32 hash.Hash + /* Hashes used in Blocks */ + checkCRC32 hash.Hash + checkCRC64 hash.Hash + checkSHA256 hash.Hash + /* for checkTypes CRC32/CRC64/SHA256, check is one of the above 3 hashes */ + check hash.Hash + /* Embedded stream header struct containing CheckType */ + *Header + /* + * True if the next call to xzDecRun is allowed to return + * xzBufError. + */ + allowBufError bool + /* Information stored in Block Header */ + blockHeader struct { + /* + * Value stored in the Compressed Size field, or + * vliUnknown if Compressed Size is not present. + */ + compressed vliType + /* + * Value stored in the Uncompressed Size field, or + * vliUnknown if Uncompressed Size is not present. + */ + uncompressed vliType + /* Size of the Block Header field */ + size int + } + /* Information collected when decoding Blocks */ + block struct { + /* Observed compressed size of the current Block */ + compressed vliType + /* Observed uncompressed size of the current Block */ + uncompressed vliType + /* Number of Blocks decoded so far */ + count vliType + /* + * Hash calculated from the Block sizes. This is used to + * validate the Index field. + */ + hash xzDecHash + } + /* Variables needed when verifying the Index field */ + index struct { + /* Position in decIndex */ + sequence xzDecIndexSeq + /* Size of the Index in bytes */ + size vliType + /* Number of Records (matches block.count in valid files) */ + count vliType + /* + * Hash calculated from the Records (matches block.hash in + * valid files). + */ + hash xzDecHash + } + /* + * Temporary buffer needed to hold Stream Header, Block Header, + * and Stream Footer. The Block Header is the biggest (1 KiB) + * so we reserve space according to that. bufArray has to be aligned + * to a multiple of four bytes; the variables before it + * should guarantee this. + */ + temp struct { + pos int + buf []byte // slice buf will be backed by bufArray + bufArray [1024]byte + } + // chain is the function (or to be more precise, closure) which + // does the decompression and will call into the lzma2 and other + // filter code as needed. It is constructed by decBlockHeader + chain func(b *xzBuf) xzRet + // lzma2 holds the state of the last filter (which must be LZMA2) + lzma2 *xzDecLZMA2 + // pointers to allocated BCJ/Delta filters + bcjs []*xzDecBCJ + deltas []*xzDecDelta + // number of currently in use BCJ/Delta filters from the above + bcjsUsed int + deltasUsed int +} + +/* Sizes of the Check field with different Check IDs */ +var checkSizes = [...]byte{ + 0, + 4, 4, 4, + 8, 8, 8, + 16, 16, 16, + 32, 32, 32, + 64, 64, 64, +} + +/* + * Fill s.temp by copying data starting from b.in[b.inPos]. Caller + * must have set s.temp.pos to indicate how much data we are supposed + * to copy into s.temp.buf. Return true once s.temp.pos has reached + * len(s.temp.buf). + */ +func fillTemp(s *xzDec, b *xzBuf) bool { + copySize := len(b.in) - b.inPos + tempRemaining := len(s.temp.buf) - s.temp.pos + if copySize > tempRemaining { + copySize = tempRemaining + } + copy(s.temp.buf[s.temp.pos:], b.in[b.inPos:]) + b.inPos += copySize + s.temp.pos += copySize + if s.temp.pos == len(s.temp.buf) { + s.temp.pos = 0 + return true + } + return false +} + +/* Decode a variable-length integer (little-endian base-128 encoding) */ +func decVLI(s *xzDec, in []byte, inPos *int) xzRet { + var byte byte + if s.pos == 0 { + s.vli = 0 + } + for *inPos < len(in) { + byte = in[*inPos] + *inPos++ + s.vli |= vliType(byte&0x7f) << uint(s.pos) + if byte&0x80 == 0 { + /* Don't allow non-minimal encodings. */ + if byte == 0 && s.pos != 0 { + return xzDataError + } + s.pos = 0 + return xzStreamEnd + } + s.pos += 7 + if s.pos == 7*vliBytesMax { + return xzDataError + } + } + return xzOK +} + +/* + * Decode the Compressed Data field from a Block. Update and validate + * the observed compressed and uncompressed sizes of the Block so that + * they don't exceed the values possibly stored in the Block Header + * (validation assumes that no integer overflow occurs, since vliType + * is uint64). Update s.check if presence of the CRC32/CRC64/SHA256 + * field was indicated in Stream Header. + * + * Once the decoding is finished, validate that the observed sizes match + * the sizes possibly stored in the Block Header. Update the hash and + * Block count, which are later used to validate the Index field. + */ +func decBlock(s *xzDec, b *xzBuf) xzRet { + var ret xzRet + s.inStart = b.inPos + s.outStart = b.outPos + ret = s.chain(b) + s.block.compressed += vliType(b.inPos - s.inStart) + s.block.uncompressed += vliType(b.outPos - s.outStart) + /* + * There is no need to separately check for vliUnknown since + * the observed sizes are always smaller than vliUnknown. + */ + if s.block.compressed > s.blockHeader.compressed || + s.block.uncompressed > s.blockHeader.uncompressed { + return xzDataError + } + switch s.CheckType { + case CheckCRC32, CheckCRC64, CheckSHA256: + _, _ = s.check.Write(b.out[s.outStart:b.outPos]) + } + if ret == xzStreamEnd { + if s.blockHeader.compressed != vliUnknown && + s.blockHeader.compressed != s.block.compressed { + return xzDataError + } + if s.blockHeader.uncompressed != vliUnknown && + s.blockHeader.uncompressed != s.block.uncompressed { + return xzDataError + } + s.block.hash.unpadded += + vliType(s.blockHeader.size) + s.block.compressed + s.block.hash.unpadded += vliType(checkSizes[s.CheckType]) + s.block.hash.uncompressed += s.block.uncompressed + var buf [2 * 8]byte // 2*Sizeof(vliType) + putLE64(uint64(s.block.hash.unpadded), buf[:]) + putLE64(uint64(s.block.hash.uncompressed), buf[8:]) + _, _ = s.block.hash.sha256.Write(buf[:]) + s.block.count++ + } + return ret +} + +/* Update the Index size and the CRC32 hash. */ +func indexUpdate(s *xzDec, b *xzBuf) { + inUsed := b.inPos - s.inStart + s.index.size += vliType(inUsed) + _, _ = s.crc32.Write(b.in[s.inStart : s.inStart+inUsed]) +} + +/* + * Decode the Number of Records, Unpadded Size, and Uncompressed Size + * fields from the Index field. That is, Index Padding and CRC32 are not + * decoded by this function. + * + * This can return xzOK (more input needed), xzStreamEnd (everything + * successfully decoded), or xzDataError (input is corrupt). + */ +func decIndex(s *xzDec, b *xzBuf) xzRet { + var ret xzRet + for { + ret = decVLI(s, b.in, &b.inPos) + if ret != xzStreamEnd { + indexUpdate(s, b) + return ret + } + switch s.index.sequence { + case seqIndexCount: + s.index.count = s.vli + /* + * Validate that the Number of Records field + * indicates the same number of Records as + * there were Blocks in the Stream. + */ + if s.index.count != s.block.count { + return xzDataError + } + s.index.sequence = seqIndexUnpadded + case seqIndexUnpadded: + s.index.hash.unpadded += s.vli + s.index.sequence = seqIndexUncompressed + case seqIndexUncompressed: + s.index.hash.uncompressed += s.vli + var buf [2 * 8]byte // 2*Sizeof(vliType) + putLE64(uint64(s.index.hash.unpadded), buf[:]) + putLE64(uint64(s.index.hash.uncompressed), buf[8:]) + _, _ = s.index.hash.sha256.Write(buf[:]) + s.index.count-- + s.index.sequence = seqIndexUnpadded + } + if !(s.index.count > 0) { + break + } + } + return xzStreamEnd +} + +/* + * Validate that the next 4 bytes match s.crc32.Sum(nil). s.pos must + * be zero when starting to validate the first byte. + */ +func crcValidate(s *xzDec, b *xzBuf) xzRet { + sum := s.crc32.Sum(nil) + // CRC32 - reverse slice + sum[0], sum[1], sum[2], sum[3] = sum[3], sum[2], sum[1], sum[0] + for { + if b.inPos == len(b.in) { + return xzOK + } + if sum[s.pos] != b.in[b.inPos] { + return xzDataError + } + b.inPos++ + s.pos++ + if !(s.pos < 4) { + break + } + } + s.crc32.Reset() + s.pos = 0 + return xzStreamEnd +} + +/* + * Validate that the next 4/8/32 bytes match s.check.Sum(nil). s.pos + * must be zero when starting to validate the first byte. + */ +func checkValidate(s *xzDec, b *xzBuf) xzRet { + sum := s.check.Sum(nil) + if s.CheckType == CheckCRC32 || s.CheckType == CheckCRC64 { + // CRC32/64 - reverse slice + for i, j := 0, len(sum)-1; i < j; i, j = i+1, j-1 { + sum[i], sum[j] = sum[j], sum[i] + } + } + for { + if b.inPos == len(b.in) { + return xzOK + } + if sum[s.pos] != b.in[b.inPos] { + return xzDataError + } + b.inPos++ + s.pos++ + if !(s.pos < len(sum)) { + break + } + } + s.check.Reset() + s.pos = 0 + return xzStreamEnd +} + +/* + * Skip over the Check field when the Check ID is not supported. + * Returns true once the whole Check field has been skipped over. + */ +func checkSkip(s *xzDec, b *xzBuf) bool { + for s.pos < int(checkSizes[s.CheckType]) { + if b.inPos == len(b.in) { + return false + } + b.inPos++ + s.pos++ + } + s.pos = 0 + return true +} + +/* polynomial table used in decStreamHeader below */ +var xzCRC64Table = crc64.MakeTable(crc64.ECMA) + +/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ +func decStreamHeader(s *xzDec) xzRet { + if string(s.temp.buf[:len(headerMagic)]) != headerMagic { + return xzFormatError + } + if crc32.ChecksumIEEE(s.temp.buf[len(headerMagic):len(headerMagic)+2]) != + getLE32(s.temp.buf[len(headerMagic)+2:]) { + return xzDataError + } + if s.temp.buf[len(headerMagic)] != 0 { + return xzOptionsError + } + /* + * Of integrity checks, we support none (Check ID = 0), + * CRC32 (Check ID = 1), CRC64 (Check ID = 4) and SHA256 (Check ID = 10) + * However, we will accept other check types too, but then the check + * won't be verified and a warning (xzUnsupportedCheck) will be given. + */ + s.CheckType = CheckID(s.temp.buf[len(headerMagic)+1]) + if s.CheckType > checkMax { + return xzOptionsError + } + switch s.CheckType { + case CheckNone: + // CheckNone: no action needed + case CheckCRC32: + if s.checkCRC32 == nil { + s.checkCRC32 = crc32.NewIEEE() + } else { + s.checkCRC32.Reset() + } + s.check = s.checkCRC32 + case CheckCRC64: + if s.checkCRC64 == nil { + s.checkCRC64 = crc64.New(xzCRC64Table) + } else { + s.checkCRC64.Reset() + } + s.check = s.checkCRC64 + case CheckSHA256: + if s.checkSHA256 == nil { + s.checkSHA256 = sha256.New() + } else { + s.checkSHA256.Reset() + } + s.check = s.checkSHA256 + default: + return xzUnsupportedCheck + } + return xzOK +} + +/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ +func decStreamFooter(s *xzDec) xzRet { + if string(s.temp.buf[10:10+len(footerMagic)]) != footerMagic { + return xzDataError + } + if crc32.ChecksumIEEE(s.temp.buf[4:10]) != getLE32(s.temp.buf) { + return xzDataError + } + /* + * Validate Backward Size. Note that we never added the size of the + * Index CRC32 field to s->index.size, thus we use s->index.size / 4 + * instead of s->index.size / 4 - 1. + */ + if s.index.size>>2 != vliType(getLE32(s.temp.buf[4:])) { + return xzDataError + } + if s.temp.buf[8] != 0 || CheckID(s.temp.buf[9]) != s.CheckType { + return xzDataError + } + /* + * Use xzStreamEnd instead of xzOK to be more convenient + * for the caller. + */ + return xzStreamEnd +} + +/* Decode the Block Header and initialize the filter chain. */ +func decBlockHeader(s *xzDec) xzRet { + var ret xzRet + /* + * Validate the CRC32. We know that the temp buffer is at least + * eight bytes so this is safe. + */ + crc := getLE32(s.temp.buf[len(s.temp.buf)-4:]) + s.temp.buf = s.temp.buf[:len(s.temp.buf)-4] + if crc32.ChecksumIEEE(s.temp.buf) != crc { + return xzDataError + } + s.temp.pos = 2 + /* + * Catch unsupported Block Flags. + */ + if s.temp.buf[1]&0x3C != 0 { + return xzOptionsError + } + /* Compressed Size */ + if s.temp.buf[1]&0x40 != 0 { + if decVLI(s, s.temp.buf, &s.temp.pos) != xzStreamEnd { + return xzDataError + } + if s.vli >= 1<<63-8 { + // the whole block must stay smaller than 2^63 bytes + // the block header cannot be smaller than 8 bytes + return xzDataError + } + if s.vli == 0 { + // compressed size must be non-zero + return xzDataError + } + s.blockHeader.compressed = s.vli + } else { + s.blockHeader.compressed = vliUnknown + } + /* Uncompressed Size */ + if s.temp.buf[1]&0x80 != 0 { + if decVLI(s, s.temp.buf, &s.temp.pos) != xzStreamEnd { + return xzDataError + } + s.blockHeader.uncompressed = s.vli + } else { + s.blockHeader.uncompressed = vliUnknown + } + // get total number of filters (1-4) + filterTotal := int(s.temp.buf[1]&0x03) + 1 + // slice to hold decoded filters + filterList := make([]struct { + id xzFilterID + props uint32 + }, filterTotal) + // decode the non-last filters which cannot be LZMA2 + for i := 0; i < filterTotal-1; i++ { + /* Valid Filter Flags always take at least two bytes. */ + if len(s.temp.buf)-s.temp.pos < 2 { + return xzDataError + } + s.temp.pos += 2 + switch id := xzFilterID(s.temp.buf[s.temp.pos-2]); id { + case idDelta: + // delta filter + if s.temp.buf[s.temp.pos-1] != 0x01 { + return xzOptionsError + } + /* Filter Properties contains distance - 1 */ + if len(s.temp.buf)-s.temp.pos < 1 { + return xzDataError + } + props := uint32(s.temp.buf[s.temp.pos]) + s.temp.pos++ + filterList[i] = struct { + id xzFilterID + props uint32 + }{id: id, props: props} + case idBCJX86, idBCJPowerPC, idBCJIA64, + idBCJARM, idBCJARMThumb, idBCJSPARC: + // bcj filter + var props uint32 + switch s.temp.buf[s.temp.pos-1] { + case 0x00: + props = 0 + case 0x04: + if len(s.temp.buf)-s.temp.pos < 4 { + return xzDataError + } + props = getLE32(s.temp.buf[s.temp.pos:]) + s.temp.pos += 4 + default: + return xzOptionsError + } + filterList[i] = struct { + id xzFilterID + props uint32 + }{id: id, props: props} + default: + return xzOptionsError + } + } + /* + * decode the last filter which must be LZMA2 + */ + if len(s.temp.buf)-s.temp.pos < 2 { + return xzDataError + } + /* Filter ID = LZMA2 */ + if xzFilterID(s.temp.buf[s.temp.pos]) != idLZMA2 { + return xzOptionsError + } + s.temp.pos++ + /* Size of Properties = 1-byte Filter Properties */ + if s.temp.buf[s.temp.pos] != 0x01 { + return xzOptionsError + } + s.temp.pos++ + /* Filter Properties contains LZMA2 dictionary size. */ + if len(s.temp.buf)-s.temp.pos < 1 { + return xzDataError + } + props := uint32(s.temp.buf[s.temp.pos]) + s.temp.pos++ + filterList[filterTotal-1] = struct { + id xzFilterID + props uint32 + }{id: idLZMA2, props: props} + /* + * Process the filter list and create s.chain, going from last + * filter (LZMA2) to first filter + * + * First, LZMA2. + */ + ret = xzDecLZMA2Reset(s.lzma2, byte(filterList[filterTotal-1].props)) + if ret != xzOK { + return ret + } + s.chain = func(b *xzBuf) xzRet { + return xzDecLZMA2Run(s.lzma2, b) + } + /* + * Now the non-last filters + */ + for i := filterTotal - 2; i >= 0; i-- { + switch id := filterList[i].id; id { + case idDelta: + // delta filter + var delta *xzDecDelta + if s.deltasUsed < len(s.deltas) { + delta = s.deltas[s.deltasUsed] + } else { + delta = xzDecDeltaCreate() + s.deltas = append(s.deltas, delta) + } + s.deltasUsed++ + ret = xzDecDeltaReset(delta, int(filterList[i].props)+1) + if ret != xzOK { + return ret + } + chain := s.chain + s.chain = func(b *xzBuf) xzRet { + return xzDecDeltaRun(delta, b, chain) + } + case idBCJX86, idBCJPowerPC, idBCJIA64, + idBCJARM, idBCJARMThumb, idBCJSPARC: + // bcj filter + var bcj *xzDecBCJ + if s.bcjsUsed < len(s.bcjs) { + bcj = s.bcjs[s.bcjsUsed] + } else { + bcj = xzDecBCJCreate() + s.bcjs = append(s.bcjs, bcj) + } + s.bcjsUsed++ + ret = xzDecBCJReset(bcj, id, int(filterList[i].props)) + if ret != xzOK { + return ret + } + chain := s.chain + s.chain = func(b *xzBuf) xzRet { + return xzDecBCJRun(bcj, b, chain) + } + } + } + /* The rest must be Header Padding. */ + for s.temp.pos < len(s.temp.buf) { + if s.temp.buf[s.temp.pos] != 0x00 { + return xzOptionsError + } + s.temp.pos++ + } + s.temp.pos = 0 + s.block.compressed = 0 + s.block.uncompressed = 0 + return xzOK +} + +func decMain(s *xzDec, b *xzBuf) xzRet { + var ret xzRet + /* + * Store the start position for the case when we are in the middle + * of the Index field. + */ + s.inStart = b.inPos + for { + switch s.sequence { + case seqStreamHeader: + /* + * Stream Header is copied to s.temp, and then + * decoded from there. This way if the caller + * gives us only little input at a time, we can + * still keep the Stream Header decoding code + * simple. Similar approach is used in many places + * in this file. + */ + if !fillTemp(s, b) { + return xzOK + } + /* + * If decStreamHeader returns + * xzUnsupportedCheck, it is still possible + * to continue decoding. Thus, update s.sequence + * before calling decStreamHeader. + */ + s.sequence = seqBlockStart + ret = decStreamHeader(s) + if ret != xzOK { + return ret + } + fallthrough + case seqBlockStart: + /* We need one byte of input to continue. */ + if b.inPos == len(b.in) { + return xzOK + } + /* See if this is the beginning of the Index field. */ + if b.in[b.inPos] == 0 { + s.inStart = b.inPos + b.inPos++ + s.sequence = seqIndex + break + } + /* + * Calculate the size of the Block Header and + * prepare to decode it. + */ + s.blockHeader.size = (int(b.in[b.inPos]) + 1) * 4 + s.temp.buf = s.temp.bufArray[:s.blockHeader.size] + s.temp.pos = 0 + s.sequence = seqBlockHeader + fallthrough + case seqBlockHeader: + if !fillTemp(s, b) { + return xzOK + } + ret = decBlockHeader(s) + if ret != xzOK { + return ret + } + s.sequence = seqBlockUncompress + fallthrough + case seqBlockUncompress: + ret = decBlock(s, b) + if ret != xzStreamEnd { + return ret + } + s.sequence = seqBlockPadding + fallthrough + case seqBlockPadding: + /* + * Size of Compressed Data + Block Padding + * must be a multiple of four. We don't need + * s->block.compressed for anything else + * anymore, so we use it here to test the size + * of the Block Padding field. + */ + for s.block.compressed&3 != 0 { + if b.inPos == len(b.in) { + return xzOK + } + if b.in[b.inPos] != 0 { + return xzDataError + } + b.inPos++ + s.block.compressed++ + } + s.sequence = seqBlockCheck + fallthrough + case seqBlockCheck: + switch s.CheckType { + case CheckCRC32, CheckCRC64, CheckSHA256: + ret = checkValidate(s, b) + if ret != xzStreamEnd { + return ret + } + default: + if !checkSkip(s, b) { + return xzOK + } + } + s.sequence = seqBlockStart + case seqIndex: + ret = decIndex(s, b) + if ret != xzStreamEnd { + return ret + } + s.sequence = seqIndexPadding + fallthrough + case seqIndexPadding: + for (s.index.size+vliType(b.inPos-s.inStart))&3 != 0 { + if b.inPos == len(b.in) { + indexUpdate(s, b) + return xzOK + } + if b.in[b.inPos] != 0 { + return xzDataError + } + b.inPos++ + } + /* Finish the CRC32 value and Index size. */ + indexUpdate(s, b) + /* Compare the hashes to validate the Index field. */ + if !bytes.Equal( + s.block.hash.sha256.Sum(nil), s.index.hash.sha256.Sum(nil)) { + return xzDataError + } + s.sequence = seqIndexCRC32 + fallthrough + case seqIndexCRC32: + ret = crcValidate(s, b) + if ret != xzStreamEnd { + return ret + } + s.temp.buf = s.temp.bufArray[:streamHeaderSize] + s.sequence = seqStreamFooter + fallthrough + case seqStreamFooter: + if !fillTemp(s, b) { + return xzOK + } + return decStreamFooter(s) + } + } + /* Never reached */ +} + +/** + * xzDecRun - Run the XZ decoder + * @s: Decoder state allocated using xzDecInit + * @b: Input and output buffers + * + * See xzRet for details of return values. + * + * xzDecRun is a wrapper for decMain to handle some special cases. + * + * We must return xzBufError when it seems clear that we are not + * going to make any progress anymore. This is to prevent the caller + * from calling us infinitely when the input file is truncated or + * otherwise corrupt. Since zlib-style API allows that the caller + * fills the input buffer only when the decoder doesn't produce any + * new output, we have to be careful to avoid returning xzBufError + * too easily: xzBufError is returned only after the second + * consecutive call to xzDecRun that makes no progress. + */ +func xzDecRun(s *xzDec, b *xzBuf) xzRet { + inStart := b.inPos + outStart := b.outPos + ret := decMain(s, b) + if ret == xzOK && inStart == b.inPos && outStart == b.outPos { + if s.allowBufError { + ret = xzBufError + } + s.allowBufError = true + } else { + s.allowBufError = false + } + return ret +} + +/** + * xzDecInit - Allocate and initialize a XZ decoder state + * @dictMax: Maximum size of the LZMA2 dictionary (history buffer) for + * decoding. LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dictMax don't make sense. + * + * dictMax specifies the maximum allowed dictionary size that xzDecRun + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * xzDecInit returns a pointer to an xzDec, which is ready to be used with + * xzDecRun. + */ +func xzDecInit(dictMax uint32, header *Header) *xzDec { + s := new(xzDec) + s.crc32 = crc32.NewIEEE() + s.Header = header + s.block.hash.sha256 = sha256.New() + s.index.hash.sha256 = sha256.New() + s.lzma2 = xzDecLZMA2Create(dictMax) + xzDecReset(s) + return s +} + +/** + * xzDecReset - Reset an already allocated decoder state + * @s: Decoder state allocated using xzDecInit + * + * This function can be used to reset the decoder state without + * reallocating memory with xzDecInit. + */ +func xzDecReset(s *xzDec) { + s.sequence = seqStreamHeader + s.allowBufError = false + s.pos = 0 + s.crc32.Reset() + s.check = nil + s.CheckType = checkUnset + s.block.compressed = 0 + s.block.uncompressed = 0 + s.block.count = 0 + s.block.hash.unpadded = 0 + s.block.hash.uncompressed = 0 + s.block.hash.sha256.Reset() + s.index.sequence = seqIndexCount + s.index.size = 0 + s.index.count = 0 + s.index.hash.unpadded = 0 + s.index.hash.uncompressed = 0 + s.index.hash.sha256.Reset() + s.temp.pos = 0 + s.temp.buf = s.temp.bufArray[:streamHeaderSize] + s.chain = nil + s.bcjsUsed = 0 + s.deltasUsed = 0 +} diff --git a/vendor/github.com/xi2/xz/dec_util.go b/vendor/github.com/xi2/xz/dec_util.go new file mode 100644 index 00000000..c4227522 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_util.go @@ -0,0 +1,52 @@ +/* + * XZ decompressor utility functions + * + * Author: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +func getLE32(buf []byte) uint32 { + return uint32(buf[0]) | + uint32(buf[1])<<8 | + uint32(buf[2])<<16 | + uint32(buf[3])<<24 +} + +func getBE32(buf []byte) uint32 { + return uint32(buf[0])<<24 | + uint32(buf[1])<<16 | + uint32(buf[2])<<8 | + uint32(buf[3]) +} + +func putLE32(val uint32, buf []byte) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) + buf[2] = byte(val >> 16) + buf[3] = byte(val >> 24) + return +} + +func putBE32(val uint32, buf []byte) { + buf[0] = byte(val >> 24) + buf[1] = byte(val >> 16) + buf[2] = byte(val >> 8) + buf[3] = byte(val) + return +} + +func putLE64(val uint64, buf []byte) { + buf[0] = byte(val) + buf[1] = byte(val >> 8) + buf[2] = byte(val >> 16) + buf[3] = byte(val >> 24) + buf[4] = byte(val >> 32) + buf[5] = byte(val >> 40) + buf[6] = byte(val >> 48) + buf[7] = byte(val >> 56) + return +} diff --git a/vendor/github.com/xi2/xz/dec_xz.go b/vendor/github.com/xi2/xz/dec_xz.go new file mode 100644 index 00000000..1b18a838 --- /dev/null +++ b/vendor/github.com/xi2/xz/dec_xz.go @@ -0,0 +1,124 @@ +/* + * XZ decompressor + * + * Authors: Lasse Collin + * Igor Pavlov + * + * Translation to Go: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +/* from linux/include/linux/xz.h **************************************/ + +/** + * xzRet - Return codes + * @xzOK: Everything is OK so far. More input or more + * output space is required to continue. + * @xzStreamEnd: Operation finished successfully. + * @xzUnSupportedCheck: Integrity check type is not supported. Decoding + * is still possible by simply calling xzDecRun + * again. + * @xzMemlimitError: A bigger LZMA2 dictionary would be needed than + * allowed by the dictMax argument given to + * xzDecInit. + * @xzFormatError: File format was not recognized (wrong magic + * bytes). + * @xzOptionsError: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @xzDataError: Compressed data is corrupt. + * @xzBufError: Cannot make any progress. + * + * xzBufError is returned when two consecutive calls to XZ code cannot + * consume any input and cannot produce any new output. This happens + * when there is no new input available, or the output buffer is full + * while at least one output byte is still pending. Assuming your code + * is not buggy, you can get this error only when decoding a + * compressed stream that is truncated or otherwise corrupt. + */ +type xzRet int + +const ( + xzOK xzRet = iota + xzStreamEnd + xzUnsupportedCheck + xzMemlimitError + xzFormatError + xzOptionsError + xzDataError + xzBufError +) + +/** + * xzBuf - Passing input and output buffers to XZ code + * @in: Input buffer. + * @inPos: Current position in the input buffer. This must not exceed + * input buffer size. + * @out: Output buffer. + * @outPos: Current position in the output buffer. This must not exceed + * output buffer size. + * + * Only the contents of the output buffer from out[outPos] onward, and + * the variables inPos and outPos are modified by the XZ code. + */ +type xzBuf struct { + in []byte + inPos int + out []byte + outPos int +} + +/* All XZ filter IDs */ +type xzFilterID int64 + +const ( + idDelta xzFilterID = 0x03 + idBCJX86 xzFilterID = 0x04 + idBCJPowerPC xzFilterID = 0x05 + idBCJIA64 xzFilterID = 0x06 + idBCJARM xzFilterID = 0x07 + idBCJARMThumb xzFilterID = 0x08 + idBCJSPARC xzFilterID = 0x09 + idLZMA2 xzFilterID = 0x21 +) + +// CheckID is the type of the data integrity check in an XZ stream +// calculated from the uncompressed data. +type CheckID int + +func (id CheckID) String() string { + switch id { + case CheckNone: + return "None" + case CheckCRC32: + return "CRC32" + case CheckCRC64: + return "CRC64" + case CheckSHA256: + return "SHA256" + default: + return "Unknown" + } +} + +const ( + CheckNone CheckID = 0x00 + CheckCRC32 CheckID = 0x01 + CheckCRC64 CheckID = 0x04 + CheckSHA256 CheckID = 0x0A + checkMax CheckID = 0x0F + checkUnset CheckID = -1 +) + +// An XZ stream contains a stream header which holds information about +// the stream. That information is exposed as fields of the +// Reader. Currently it contains only the stream's data integrity +// check type. +type Header struct { + CheckType CheckID // type of the stream's data integrity check +} diff --git a/vendor/github.com/xi2/xz/doc.go b/vendor/github.com/xi2/xz/doc.go new file mode 100644 index 00000000..f8c62e62 --- /dev/null +++ b/vendor/github.com/xi2/xz/doc.go @@ -0,0 +1,35 @@ +// Package xz implements XZ decompression natively in Go. +// +// Usage +// +// For ease of use, this package is designed to have a similar API to +// compress/gzip. See the examples for further details. +// +// Implementation +// +// This package is a translation from C to Go of XZ Embedded +// (http://tukaani.org/xz/embedded.html) with enhancements made so as +// to implement all mandatory and optional parts of the XZ file format +// specification v1.0.4. It supports all filters and block check +// types, supports multiple streams, and performs index verification +// using SHA-256 as recommended by the specification. +// +// Speed +// +// On the author's Intel Ivybridge i5, decompression speed is about +// half that of the standard XZ Utils (tested with a recent linux +// kernel tarball). +// +// Thanks +// +// Thanks are due to Lasse Collin and Igor Pavlov, the authors of XZ +// Embedded, on whose code package xz is based. It would not exist +// without their decision to allow others to modify and reuse their +// code. +// +// Bug reports +// +// For bug reports relating to this package please contact the author +// through https://github.com/xi2/xz/issues, and not the authors of XZ +// Embedded. +package xz diff --git a/vendor/github.com/xi2/xz/reader.go b/vendor/github.com/xi2/xz/reader.go new file mode 100644 index 00000000..e321d755 --- /dev/null +++ b/vendor/github.com/xi2/xz/reader.go @@ -0,0 +1,256 @@ +/* + * Package xz Go Reader API + * + * Author: Michael Cross + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +package xz + +import ( + "errors" + "io" +) + +// Package specific errors. +var ( + ErrUnsupportedCheck = errors.New("xz: integrity check type not supported") + ErrMemlimit = errors.New("xz: LZMA2 dictionary size exceeds max") + ErrFormat = errors.New("xz: file format not recognized") + ErrOptions = errors.New("xz: compression options not supported") + ErrData = errors.New("xz: data is corrupt") + ErrBuf = errors.New("xz: data is truncated or corrupt") +) + +// DefaultDictMax is the default maximum dictionary size in bytes used +// by the decoder. This value is sufficient to decompress files +// created with XZ Utils "xz -9". +const DefaultDictMax = 1 << 26 // 64 MiB + +// inBufSize is the input buffer size used by the decoder. +const inBufSize = 1 << 13 // 8 KiB + +// A Reader is an io.Reader that can be used to retrieve uncompressed +// data from an XZ file. +// +// In general, an XZ file can be a concatenation of other XZ +// files. Reads from the Reader return the concatenation of the +// uncompressed data of each. +type Reader struct { + Header + r io.Reader // the wrapped io.Reader + multistream bool // true if reader is in multistream mode + rEOF bool // true after io.EOF received on r + dEOF bool // true after decoder has completed + padding int // bytes of stream padding read (or -1) + in [inBufSize]byte // backing array for buf.in + buf *xzBuf // decoder input/output buffers + dec *xzDec // decoder state + err error // the result of the last decoder call +} + +// NewReader creates a new Reader reading from r. The decompressor +// will use an LZMA2 dictionary size up to dictMax bytes in +// size. Passing a value of zero sets dictMax to DefaultDictMax. If +// an individual XZ stream requires a dictionary size greater than +// dictMax in order to decompress, Read will return ErrMemlimit. +// +// If NewReader is passed a value of nil for r then a Reader is +// created such that all read attempts will return io.EOF. This is +// useful if you just want to allocate memory for a Reader which will +// later be initialized with Reset. +// +// Due to internal buffering, the Reader may read more data than +// necessary from r. +func NewReader(r io.Reader, dictMax uint32) (*Reader, error) { + if dictMax == 0 { + dictMax = DefaultDictMax + } + z := &Reader{ + r: r, + multistream: true, + padding: -1, + buf: &xzBuf{}, + } + if r == nil { + z.rEOF, z.dEOF = true, true + } + z.dec = xzDecInit(dictMax, &z.Header) + var err error + if r != nil { + _, err = z.Read(nil) // read stream header + } + return z, err +} + +// decode is a wrapper around xzDecRun that additionally handles +// stream padding. It treats the padding as a kind of stream that +// decodes to nothing. +// +// When decoding padding, z.padding >= 0 +// When decoding a real stream, z.padding == -1 +func (z *Reader) decode() (ret xzRet) { + if z.padding >= 0 { + // read all padding in input buffer + for z.buf.inPos < len(z.buf.in) && + z.buf.in[z.buf.inPos] == 0 { + z.buf.inPos++ + z.padding++ + } + switch { + case z.buf.inPos == len(z.buf.in) && z.rEOF: + // case: out of padding. no more input data available + if z.padding%4 != 0 { + ret = xzDataError + } else { + ret = xzStreamEnd + } + case z.buf.inPos == len(z.buf.in): + // case: read more padding next loop iteration + ret = xzOK + default: + // case: out of padding. more input data available + if z.padding%4 != 0 { + ret = xzDataError + } else { + xzDecReset(z.dec) + ret = xzStreamEnd + } + } + } else { + ret = xzDecRun(z.dec, z.buf) + } + return +} + +func (z *Reader) Read(p []byte) (n int, err error) { + // restore err + err = z.err + // set decoder output buffer to p + z.buf.out = p + z.buf.outPos = 0 + for { + // update n + n = z.buf.outPos + // if last call to decoder ended with an error, return that error + if err != nil { + break + } + // if decoder has finished, return with err == io.EOF + if z.dEOF { + err = io.EOF + break + } + // if p full, return with err == nil, unless we have not yet + // read the stream header with Read(nil) + if n == len(p) && z.CheckType != checkUnset { + break + } + // if needed, read more data from z.r + if z.buf.inPos == len(z.buf.in) && !z.rEOF { + rn, e := z.r.Read(z.in[:]) + if e != nil && e != io.EOF { + // read error + err = e + break + } + if e == io.EOF { + z.rEOF = true + } + // set new input buffer in z.buf + z.buf.in = z.in[:rn] + z.buf.inPos = 0 + } + // decode more data + ret := z.decode() + switch ret { + case xzOK: + // no action needed + case xzStreamEnd: + if z.padding >= 0 { + z.padding = -1 + if !z.multistream || z.rEOF { + z.dEOF = true + } + } else { + z.padding = 0 + } + case xzUnsupportedCheck: + err = ErrUnsupportedCheck + case xzMemlimitError: + err = ErrMemlimit + case xzFormatError: + err = ErrFormat + case xzOptionsError: + err = ErrOptions + case xzDataError: + err = ErrData + case xzBufError: + err = ErrBuf + } + // save err + z.err = err + } + return +} + +// Multistream controls whether the reader is operating in multistream +// mode. +// +// If enabled (the default), the Reader expects the input to be a +// sequence of XZ streams, possibly interspersed with stream padding, +// which it reads one after another. The effect is that the +// concatenation of a sequence of XZ streams or XZ files is +// treated as equivalent to the compressed result of the concatenation +// of the sequence. This is standard behaviour for XZ readers. +// +// Calling Multistream(false) disables this behaviour; disabling the +// behaviour can be useful when reading file formats that distinguish +// individual XZ streams. In this mode, when the Reader reaches the +// end of the stream, Read returns io.EOF. To start the next stream, +// call z.Reset(nil) followed by z.Multistream(false). If there is no +// next stream, z.Reset(nil) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// Reset, for non-nil values of io.Reader r, discards the Reader z's +// state and makes it equivalent to the result of its original state +// from NewReader, but reading from r instead. This permits reusing a +// Reader rather than allocating a new one. +// +// If you wish to leave r unchanged use z.Reset(nil). This keeps r +// unchanged and ensures internal buffering is preserved. If the +// Reader was at the end of a stream it is then ready to read any +// follow on streams. If there are no follow on streams z.Reset(nil) +// returns io.EOF. If the Reader was not at the end of a stream then +// z.Reset(nil) does nothing. +func (z *Reader) Reset(r io.Reader) error { + switch { + case r == nil: + z.multistream = true + if !z.dEOF { + return nil + } + if z.rEOF { + return io.EOF + } + z.dEOF = false + _, err := z.Read(nil) // read stream header + return err + default: + z.r = r + z.multistream = true + z.rEOF = false + z.dEOF = false + z.padding = -1 + z.buf.in = nil + z.buf.inPos = 0 + xzDecReset(z.dec) + z.err = nil + _, err := z.Read(nil) // read stream header + return err + } +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 00000000..425e8eec --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,536 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package cast5 // import "golang.org/x/crypto/cast5" + +import ( + "errors" + "math/bits" +) + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 00000000..8907183e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,232 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package armor // import "golang.org/x/crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 00000000..5b6e16c1 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,161 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 00000000..e601e389 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 00000000..644b2e07 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,424 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package clearsign // import "golang.org/x/crypto/openpgp/clearsign" + +import ( + "bufio" + "bytes" + "crypto" + "fmt" + "hash" + "io" + "net/textproto" + "strconv" + "strings" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by passing Bytes into openpgp.CheckDetachedSignature. +type Block struct { + Headers textproto.MIMEHeader // Optional unverified Hash headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well as +// the suffix of data which remains after the message. Any prefix data is +// discarded. +// +// If no message is found, or if the message is invalid, Decode returns nil and +// the whole data slice. The only allowed header type is Hash, and it is not +// verified against the signature hash. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line and check it does not have a suffix. + suffix, rest := getLine(rest) + if len(suffix) != 0 { + return nil, data + } + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + // Reject headers with control or Unicode characters. + if i := bytes.IndexFunc(line, func(r rune) bool { + return r < 0x20 || r > 0x7e + }); i != -1 { + return nil, data + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := string(line[0:i]), string(line[i+1:]) + key = strings.TrimSpace(key) + if key != "Hash" { + return nil, data + } + val = strings.TrimSpace(val) + b.Headers.Add(key, val) + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + hashers []hash.Hash // one per key in privateKeys + hashType crypto.Hash + toHash io.Writer // writes to all the hashes in hashers + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKeys []*packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.toHash.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.toHash.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.toHash.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + t := d.config.Now() + for i, k := range d.privateKeys { + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = k.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = t + sig.IssuerKeyId = &k.KeyId + + if err = sig.Sign(d.hashers[i], k, d.config); err != nil { + return + } + if err = sig.Serialize(out); err != nil { + return + } + } + + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config) +} + +// EncodeMulti returns a WriteCloser which will clear-sign a message with all the +// private keys indicated and write it to w. If config is nil, sensible defaults +// are used. +func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + for _, k := range privateKeys { + if k.Encrypted { + return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString())) + } + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + var hashers []hash.Hash + var ws []io.Writer + for range privateKeys { + h := hashType.New() + hashers = append(hashers, h) + ws = append(ws, h) + } + toHash := io.MultiWriter(ws...) + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + hashers: hashers, + hashType: hashType, + toHash: toHash, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKeys: privateKeys, + config: config, + } + + return +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000..743b35a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,130 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +// +// Deprecated: this package was only provided to support ElGamal encryption in +// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see +// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has +// compatibility and security issues (see https://eprint.iacr.org/2021/923). +// Moreover, this package doesn't protect against side-channel attacks. +package elgamal // import "golang.org/x/crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 00000000..1d7a0ea0 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,78 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package errors // import "golang.org/x/crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 00000000..d62f787e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,693 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + identity.SelfSignature = sig + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000..e8f0b5ca --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 00000000..c76eecc9 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000..6d763972 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,208 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 00000000..1a9ec6e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000..ce2a33a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000..17135033 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000..39844773 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = io.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 00000000..0a19794a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,590 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package packet // import "golang.org/x/crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rsa" + "io" + "math/big" + "math/bits" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte + sentFirst bool + buf []byte +} + +// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. +const minFirstPartialWrite = 512 + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + off := 0 + if !w.sentFirst { + if len(w.buf) > 0 || len(p) < minFirstPartialWrite { + off = len(w.buf) + w.buf = append(w.buf, p...) + if len(w.buf) < minFirstPartialWrite { + return len(p), nil + } + p = w.buf + w.buf = nil + } + w.sentFirst = true + } + + power := uint8(30) + for len(p) > 0 { + l := 1 << power + if len(p) < l { + power = uint8(bits.Len32(uint32(len(p)))) - 1 + l = 1 << power + } + w.lengthByte[0] = 224 + power + _, err = w.w.Write(w.lengthByte[:]) + if err == nil { + var m int + m, err = w.w.Write(p[:l]) + n += m + } + if err != nil { + if n < off { + return 0, err + } + return n - off, err + } + p = p[l:] + } + return n - off, nil +} + +func (w *partialLengthWriter) Close() error { + if len(w.buf) > 0 { + // In this case we can't send a 512 byte packet. + // Just send what we have. + p := w.buf + w.sentFirst = true + w.buf = nil + if _, err := w.Write(p); err != nil { + return err + } + } + + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000..192aac37 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,384 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = io.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000..fcd5f525 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,753 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 00000000..5daf7b6c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 00000000..34bc7c61 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 00000000..b2a24a53 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 00000000..6edff889 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000..744c2d2c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000..1a1a6296 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an io.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000..ff7ef530 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,90 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := io.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 00000000..359a462e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := io.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 00000000..48a89314 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,448 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package openpgp // import "golang.org/x/crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000..f53244a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,279 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package s2k // import "golang.org/x/crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 00000000..b89d48b8 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,418 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + return writeAndSign(payload, candidateHashes, signed, hints, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[len(candidateHashes)-1:] + preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an io.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go new file mode 100644 index 00000000..5e6aad5c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +func newEventProcessor(out chan<- watch.Event) *eventProcessor { + return &eventProcessor{ + out: out, + cond: sync.NewCond(&sync.Mutex{}), + done: make(chan struct{}), + } +} + +// eventProcessor buffers events and writes them to an out chan when a reader +// is waiting. Because of the requirement to buffer events, it synchronizes +// input with a condition, and synchronizes output with a channels. It needs to +// be able to yield while both waiting on an input condition and while blocked +// on writing to the output channel. +type eventProcessor struct { + out chan<- watch.Event + + cond *sync.Cond + buff []watch.Event + + done chan struct{} +} + +func (e *eventProcessor) run() { + for { + batch := e.takeBatch() + e.writeBatch(batch) + if e.stopped() { + return + } + } +} + +func (e *eventProcessor) takeBatch() []watch.Event { + e.cond.L.Lock() + defer e.cond.L.Unlock() + + for len(e.buff) == 0 && !e.stopped() { + e.cond.Wait() + } + + batch := e.buff + e.buff = nil + return batch +} + +func (e *eventProcessor) writeBatch(events []watch.Event) { + for _, event := range events { + select { + case e.out <- event: + case <-e.done: + return + } + } +} + +func (e *eventProcessor) push(event watch.Event) { + e.cond.L.Lock() + defer e.cond.L.Unlock() + defer e.cond.Signal() + e.buff = append(e.buff, event) +} + +func (e *eventProcessor) stopped() bool { + select { + case <-e.done: + return true + default: + return false + } +} + +func (e *eventProcessor) stop() { + close(e.done) + e.cond.Signal() +} + +// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface +// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. +// it also returns a channel you can use to wait for the informers to fully shutdown. +func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) { + ch := make(chan watch.Event) + w := watch.NewProxyWatcher(ch) + e := newEventProcessor(ch) + + indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e.push(watch.Event{ + Type: watch.Added, + Object: obj.(runtime.Object), + }) + }, + UpdateFunc: func(old, new interface{}) { + e.push(watch.Event{ + Type: watch.Modified, + Object: new.(runtime.Object), + }) + }, + DeleteFunc: func(obj interface{}) { + staleObj, stale := obj.(cache.DeletedFinalStateUnknown) + if stale { + // We have no means of passing the additional information down using + // watch API based on watch.Event but the caller can filter such + // objects by checking if metadata.deletionTimestamp is set + obj = staleObj.Obj + } + + e.push(watch.Event{ + Type: watch.Deleted, + Object: obj.(runtime.Object), + }) + }, + }, cache.Indexers{}) + + go e.run() + + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer e.stop() + informer.Run(w.StopChan()) + }() + + return indexer, informer, w, doneCh +} diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go new file mode 100644 index 00000000..e4806d2e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -0,0 +1,296 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/davecgh/go-spew/spew" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// resourceVersionGetter is an interface used to get resource version from events. +// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method +type resourceVersionGetter interface { + GetResourceVersion() string +} + +// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout) +// it will get restarted from the last point without the consumer even knowing about it. +// RetryWatcher does that by inspecting events and keeping track of resourceVersion. +// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes. +// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to +// use Informers for that. +type RetryWatcher struct { + lastResourceVersion string + watcherClient cache.Watcher + resultChan chan watch.Event + stopChan chan struct{} + doneChan chan struct{} + minRestartDelay time.Duration +} + +// NewRetryWatcher creates a new RetryWatcher. +// It will make sure that watches gets restarted in case of recoverable errors. +// The initialResourceVersion will be given to watch method when first called. +func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) { + return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second) +} + +func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) { + switch initialResourceVersion { + case "", "0": + // TODO: revisit this if we ever get WATCH v2 where it means start "now" + // without doing the synthetic list of objects at the beginning (see #74022) + return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion) + default: + break + } + + rw := &RetryWatcher{ + lastResourceVersion: initialResourceVersion, + watcherClient: watcherClient, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + resultChan: make(chan watch.Event, 0), + minRestartDelay: minRestartDelay, + } + + go rw.receive() + return rw, nil +} + +func (rw *RetryWatcher) send(event watch.Event) bool { + // Writing to an unbuffered channel is blocking operation + // and we need to check if stop wasn't requested while doing so. + select { + case rw.resultChan <- event: + return true + case <-rw.stopChan: + return false + } +} + +// doReceive returns true when it is done, false otherwise. +// If it is not done the second return value holds the time to wait before calling it again. +func (rw *RetryWatcher) doReceive() (bool, time.Duration) { + watcher, err := rw.watcherClient.Watch(metav1.ListOptions{ + ResourceVersion: rw.lastResourceVersion, + AllowWatchBookmarks: true, + }) + // We are very unlikely to hit EOF here since we are just establishing the call, + // but it may happen that the apiserver is just shutting down (e.g. being restarted) + // This is consistent with how it is handled for informers + switch err { + case nil: + break + + case io.EOF: + // watch closed normally + return false, 0 + + case io.ErrUnexpectedEOF: + klog.V(1).InfoS("Watch closed with unexpected EOF", "err", err) + return false, 0 + + default: + msg := "Watch failed" + if net.IsProbableEOF(err) || net.IsTimeout(err) { + klog.V(5).InfoS(msg, "err", err) + // Retry + return false, 0 + } + + klog.ErrorS(err, msg) + // Retry + return false, 0 + } + + if watcher == nil { + klog.ErrorS(nil, "Watch returned nil watcher") + // Retry + return false, 0 + } + + ch := watcher.ResultChan() + defer watcher.Stop() + + for { + select { + case <-rw.stopChan: + klog.V(4).InfoS("Stopping RetryWatcher.") + return true, 0 + case event, ok := <-ch: + if !ok { + klog.V(4).InfoS("Failed to get event! Re-creating the watcher.", "resourceVersion", rw.lastResourceVersion) + return false, 0 + } + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + metaObject, ok := event.Object.(resourceVersionGetter) + if !ok { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + resourceVersion := metaObject.GetResourceVersion() + if resourceVersion == "" { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + // All is fine; send the non-bookmark events and update resource version. + if event.Type != watch.Bookmark { + ok = rw.send(event) + if !ok { + return true, 0 + } + } + rw.lastResourceVersion = resourceVersion + + continue + + case watch.Error: + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + // Retry unknown errors + return false, 0 + } + + status := statusErr.ErrStatus + + statusDelay := time.Duration(0) + if status.Details != nil { + statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second + } + + switch status.Code { + case http.StatusGone: + // Never retry RV too old errors + _ = rw.send(event) + return true, 0 + + case http.StatusGatewayTimeout, http.StatusInternalServerError: + // Retry + return false, statusDelay + + default: + // We retry by default. RetryWatcher is meant to proceed unless it is certain + // that it can't. If we are not certain, we proceed with retry and leave it + // up to the user to timeout if needed. + + // Log here so we have a record of hitting the unexpected error + // and we can whitelist some error codes if we missed any that are expected. + klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object)) + + // Retry + return false, statusDelay + } + + default: + klog.Errorf("Failed to recognize Event type %q", event.Type) + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus, + }) + // We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + } + } +} + +// receive reads the result from a watcher, restarting it if necessary. +func (rw *RetryWatcher) receive() { + defer close(rw.doneChan) + defer close(rw.resultChan) + + klog.V(4).Info("Starting RetryWatcher.") + defer klog.V(4).Info("Stopping RetryWatcher.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-rw.stopChan: + cancel() + return + case <-ctx.Done(): + return + } + }() + + // We use non sliding until so we don't introduce delays on happy path when WATCH call + // timeouts or gets closed and we need to reestablish it while also avoiding hot loops. + wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) { + done, retryAfter := rw.doReceive() + if done { + cancel() + return + } + + timer := time.NewTimer(retryAfter) + select { + case <-ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + + klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion) + }, rw.minRestartDelay) +} + +// ResultChan implements Interface. +func (rw *RetryWatcher) ResultChan() <-chan watch.Event { + return rw.resultChan +} + +// Stop implements Interface. +func (rw *RetryWatcher) Stop() { + close(rw.stopChan) +} + +// Done allows the caller to be notified when Retry watcher stops. +func (rw *RetryWatcher) Done() <-chan struct{} { + return rw.doneChan +} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go new file mode 100644 index 00000000..81d4ff0d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -0,0 +1,169 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition failed or detected an error state. +type PreconditionFunc func(store cache.Store) (bool, error) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event watch.Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. +var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") + +// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// Waits until context deadline or until context is canceled. +// +// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! +// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. +// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, +// Warning: solving such issues. +// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. +func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var lastEvent *watch.Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-ctx.Done(): + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} + +// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors. +// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0" +// given the underlying WATCH call issues (#74022). +// Remaining behaviour is identical to function UntilWithoutRetry. (See above.) +// Until can deal with API timeouts and lost connections. +// It guarantees you to see all events and in the order they happened. +// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. +// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing +// +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// +// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). +func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { + w, err := NewRetryWatcher(initialResourceVersion, watcherClient) + if err != nil { + return nil, err + } + + return UntilWithoutRetry(ctx, w, conditions...) +} + +// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, +// and watches the output until each provided condition succeeds, in a way that is identical +// to function UntilWithoutRetry. (See above.) +// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. +// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will +// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple +// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will +// re-list to recover and you always get an event, if there has been a change, after recovery. +// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for +// particular object, not between more of them even it's the same resource. +// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: +// waiting for object reaching a state, "small" controllers, ... +func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { + indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType) + // We need to wait for the internal informers to fully stop so it's easier to reason about + // and it works with non-thread safe clients. + defer func() { <-done }() + // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and + // let UntilWithoutRetry to stop it + defer watcher.Stop() + + if precondition != nil { + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) + } + + done, err := precondition(indexer) + if err != nil { + return nil, err + } + + if done { + return nil, nil + } + } + + return UntilWithoutRetry(ctx, watcher, conditions...) +} + +// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. +func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout < 0 { + // This should be handled in validation + klog.Errorf("Timeout for context shall not be negative!") + timeout = 0 + } + + if timeout == 0 { + return context.WithCancel(parent) + } + + return context.WithTimeout(parent, timeout) +} diff --git a/vendor/k8s.io/client-go/util/certificate/csr/csr.go b/vendor/k8s.io/client-go/util/certificate/csr/csr.go new file mode 100644 index 00000000..0390d1c0 --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/csr/csr.go @@ -0,0 +1,364 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csr + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "fmt" + "reflect" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + certutil "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" +) + +// RequestCertificate will either use an existing (if this process has run +// before but not to completion) or create a certificate signing request using the +// PEM encoded CSR and send it to API server. An optional requestedDuration may be passed +// to set the spec.expirationSeconds field on the CSR to control the lifetime of the issued +// certificate. This is not guaranteed as the signer may choose to ignore the request. +func RequestCertificate(client clientset.Interface, csrData []byte, name, signerName string, requestedDuration *time.Duration, usages []certificatesv1.KeyUsage, privateKey interface{}) (reqName string, reqUID types.UID, err error) { + csr := &certificatesv1.CertificateSigningRequest{ + // Username, UID, Groups will be injected by API server. + TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: csrData, + Usages: usages, + SignerName: signerName, + }, + } + if len(csr.Name) == 0 { + csr.GenerateName = "csr-" + } + if requestedDuration != nil { + csr.Spec.ExpirationSeconds = DurationToExpirationSeconds(*requestedDuration) + } + + reqName, reqUID, err = create(client, csr) + switch { + case err == nil: + return reqName, reqUID, err + + case apierrors.IsAlreadyExists(err) && len(name) > 0: + klog.Infof("csr for this node already exists, reusing") + req, err := get(client, name) + if err != nil { + return "", "", formatError("cannot retrieve certificate signing request: %v", err) + } + if err := ensureCompatible(req, csr, privateKey); err != nil { + return "", "", fmt.Errorf("retrieved csr is not compatible: %v", err) + } + klog.Infof("csr for this node is still valid") + return req.Name, req.UID, nil + + default: + return "", "", formatError("cannot create certificate signing request: %v", err) + } +} + +func DurationToExpirationSeconds(duration time.Duration) *int32 { + return pointer.Int32(int32(duration / time.Second)) +} + +func ExpirationSecondsToDuration(expirationSeconds int32) time.Duration { + return time.Duration(expirationSeconds) * time.Second +} + +func get(client clientset.Interface, name string) (*certificatesv1.CertificateSigningRequest, error) { + v1req, v1err := client.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}) + if v1err == nil || !apierrors.IsNotFound(v1err) { + return v1req, v1err + } + + v1beta1req, v1beta1err := client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}) + if v1beta1err != nil { + return nil, v1beta1err + } + + v1req = &certificatesv1.CertificateSigningRequest{ + ObjectMeta: v1beta1req.ObjectMeta, + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: v1beta1req.Spec.Request, + }, + } + if v1beta1req.Spec.SignerName != nil { + v1req.Spec.SignerName = *v1beta1req.Spec.SignerName + } + for _, usage := range v1beta1req.Spec.Usages { + v1req.Spec.Usages = append(v1req.Spec.Usages, certificatesv1.KeyUsage(usage)) + } + return v1req, nil +} + +func create(client clientset.Interface, csr *certificatesv1.CertificateSigningRequest) (reqName string, reqUID types.UID, err error) { + // only attempt a create via v1 if we specified signerName and usages and are not using the legacy unknown signerName + if len(csr.Spec.Usages) > 0 && len(csr.Spec.SignerName) > 0 && csr.Spec.SignerName != "kubernetes.io/legacy-unknown" { + v1req, v1err := client.CertificatesV1().CertificateSigningRequests().Create(context.TODO(), csr, metav1.CreateOptions{}) + switch { + case v1err != nil && apierrors.IsNotFound(v1err): + // v1 CSR API was not found, continue to try v1beta1 + + case v1err != nil: + // other creation error + return "", "", v1err + + default: + // success + return v1req.Name, v1req.UID, v1err + } + } + + // convert relevant bits to v1beta1 + v1beta1csr := &certificatesv1beta1.CertificateSigningRequest{ + ObjectMeta: csr.ObjectMeta, + Spec: certificatesv1beta1.CertificateSigningRequestSpec{ + SignerName: &csr.Spec.SignerName, + Request: csr.Spec.Request, + }, + } + for _, usage := range csr.Spec.Usages { + v1beta1csr.Spec.Usages = append(v1beta1csr.Spec.Usages, certificatesv1beta1.KeyUsage(usage)) + } + + // create v1beta1 + v1beta1req, v1beta1err := client.CertificatesV1beta1().CertificateSigningRequests().Create(context.TODO(), v1beta1csr, metav1.CreateOptions{}) + if v1beta1err != nil { + return "", "", v1beta1err + } + return v1beta1req.Name, v1beta1req.UID, nil +} + +// WaitForCertificate waits for a certificate to be issued until timeout, or returns an error. +func WaitForCertificate(ctx context.Context, client clientset.Interface, reqName string, reqUID types.UID) (certData []byte, err error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", reqName).String() + + var lw *cache.ListWatch + var obj runtime.Object + for { + // see if the v1 API is available + if _, err := client.CertificatesV1().CertificateSigningRequests().List(ctx, metav1.ListOptions{FieldSelector: fieldSelector}); err == nil { + // watch v1 objects + obj = &certificatesv1.CertificateSigningRequest{} + lw = &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1().CertificateSigningRequests().List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1().CertificateSigningRequests().Watch(ctx, options) + }, + } + break + } else { + klog.V(2).Infof("error fetching v1 certificate signing request: %v", err) + } + + // return if we've timed out + if err := ctx.Err(); err != nil { + return nil, wait.ErrWaitTimeout + } + + // see if the v1beta1 API is available + if _, err := client.CertificatesV1beta1().CertificateSigningRequests().List(ctx, metav1.ListOptions{FieldSelector: fieldSelector}); err == nil { + // watch v1beta1 objects + obj = &certificatesv1beta1.CertificateSigningRequest{} + lw = &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1beta1().CertificateSigningRequests().List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(ctx, options) + }, + } + break + } else { + klog.V(2).Infof("error fetching v1beta1 certificate signing request: %v", err) + } + + // return if we've timed out + if err := ctx.Err(); err != nil { + return nil, wait.ErrWaitTimeout + } + + // wait and try again + time.Sleep(time.Second) + } + + var issuedCertificate []byte + _, err = watchtools.UntilWithSync( + ctx, + lw, + obj, + nil, + func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Modified, watch.Added: + case watch.Deleted: + return false, fmt.Errorf("csr %q was deleted", reqName) + default: + return false, nil + } + + switch csr := event.Object.(type) { + case *certificatesv1.CertificateSigningRequest: + if csr.UID != reqUID { + return false, fmt.Errorf("csr %q changed UIDs", csr.Name) + } + approved := false + for _, c := range csr.Status.Conditions { + if c.Type == certificatesv1.CertificateDenied { + return false, fmt.Errorf("certificate signing request is denied, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1.CertificateFailed { + return false, fmt.Errorf("certificate signing request failed, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1.CertificateApproved { + approved = true + } + } + if approved { + if len(csr.Status.Certificate) > 0 { + klog.V(2).Infof("certificate signing request %s is issued", csr.Name) + issuedCertificate = csr.Status.Certificate + return true, nil + } + klog.V(2).Infof("certificate signing request %s is approved, waiting to be issued", csr.Name) + } + + case *certificatesv1beta1.CertificateSigningRequest: + if csr.UID != reqUID { + return false, fmt.Errorf("csr %q changed UIDs", csr.Name) + } + approved := false + for _, c := range csr.Status.Conditions { + if c.Type == certificatesv1beta1.CertificateDenied { + return false, fmt.Errorf("certificate signing request is denied, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1beta1.CertificateFailed { + return false, fmt.Errorf("certificate signing request failed, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificatesv1beta1.CertificateApproved { + approved = true + } + } + if approved { + if len(csr.Status.Certificate) > 0 { + klog.V(2).Infof("certificate signing request %s is issued", csr.Name) + issuedCertificate = csr.Status.Certificate + return true, nil + } + klog.V(2).Infof("certificate signing request %s is approved, waiting to be issued", csr.Name) + } + + default: + return false, fmt.Errorf("unexpected type received: %T", event.Object) + } + + return false, nil + }, + ) + if err == wait.ErrWaitTimeout { + return nil, wait.ErrWaitTimeout + } + if err != nil { + return nil, formatError("cannot watch on the certificate signing request: %v", err) + } + + return issuedCertificate, nil +} + +// ensureCompatible ensures that a CSR object is compatible with an original CSR +func ensureCompatible(new, orig *certificatesv1.CertificateSigningRequest, privateKey interface{}) error { + newCSR, err := parseCSR(new.Spec.Request) + if err != nil { + return fmt.Errorf("unable to parse new csr: %v", err) + } + origCSR, err := parseCSR(orig.Spec.Request) + if err != nil { + return fmt.Errorf("unable to parse original csr: %v", err) + } + if !reflect.DeepEqual(newCSR.Subject, origCSR.Subject) { + return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCSR.Subject, origCSR.Subject) + } + if len(new.Spec.SignerName) > 0 && len(orig.Spec.SignerName) > 0 && new.Spec.SignerName != orig.Spec.SignerName { + return fmt.Errorf("csr signerNames differ: new %q, orig: %q", new.Spec.SignerName, orig.Spec.SignerName) + } + signer, ok := privateKey.(crypto.Signer) + if !ok { + return fmt.Errorf("privateKey is not a signer") + } + newCSR.PublicKey = signer.Public() + if err := newCSR.CheckSignature(); err != nil { + return fmt.Errorf("error validating signature new CSR against old key: %v", err) + } + if len(new.Status.Certificate) > 0 { + certs, err := certutil.ParseCertsPEM(new.Status.Certificate) + if err != nil { + return fmt.Errorf("error parsing signed certificate for CSR: %v", err) + } + now := time.Now() + for _, cert := range certs { + if now.After(cert.NotAfter) { + return fmt.Errorf("one of the certificates for the CSR has expired: %s", cert.NotAfter) + } + } + } + return nil +} + +// formatError preserves the type of an API message but alters the message. Expects +// a single argument format string, and returns the wrapped error. +func formatError(format string, err error) error { + if s, ok := err.(apierrors.APIStatus); ok { + se := &apierrors.StatusError{ErrStatus: s.Status()} + se.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message) + return se + } + return fmt.Errorf(format, err) +} + +// parseCSR extracts the CSR from the API object and decodes it. +func parseCSR(pemData []byte) (*x509.CertificateRequest, error) { + // extract PEM from request object + block, _ := pem.Decode(pemData) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST") + } + return x509.ParseCertificateRequest(block.Bytes) +} diff --git a/vendor/k8s.io/helm/internal/third_party/dep/fs/fs.go b/vendor/k8s.io/helm/internal/third_party/dep/fs/fs.go new file mode 100644 index 00000000..83259219 --- /dev/null +++ b/vendor/k8s.io/helm/internal/third_party/dep/fs/fs.go @@ -0,0 +1,373 @@ +/* +Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + + "github.com/pkg/errors" +) + +// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep. +// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go +// No changes to the code were made other than removing some unused functions + +// RenameWithFallback attempts to rename a file or directory, but falls back to +// copying in the event of a cross-device link error. If the fallback copy +// succeeds, src is still removed, emulating normal rename behavior. +func RenameWithFallback(src, dst string) error { + _, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "cannot stat %s", src) + } + + err = os.Rename(src, dst) + if err == nil { + return nil + } + + return renameFallback(err, src, dst) +} + +// renameByCopy attempts to rename a file or directory by copying it to the +// destination and then removing the src thus emulating the rename behavior. +func renameByCopy(src, dst string) error { + var cerr error + if dir, _ := IsDir(src); dir { + cerr = CopyDir(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying directory failed") + } + } else { + cerr = copyFile(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying file failed") + } + } + + if cerr != nil { + return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst) + } + + return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) +} + +var ( + errSrcNotDir = errors.New("source is not a directory") + errDstExist = errors.New("destination already exists") +) + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory must *not* exist. +func CopyDir(src, dst string) error { + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + // We use os.Lstat() here to ensure we don't fall in a loop where a symlink + // actually links to a one of its parent directories. + fi, err := os.Lstat(src) + if err != nil { + return err + } + if !fi.IsDir() { + return errSrcNotDir + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return errDstExist + } + + if err = os.MkdirAll(dst, fi.Mode()); err != nil { + return errors.Wrapf(err, "cannot mkdir %s", dst) + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "cannot read directory %s", dst) + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err = CopyDir(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying directory failed") + } + } else { + // This will include symlinks, which is what we want when + // copying things. + if err = copyFile(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying file failed") + } + } + } + + return nil +} + +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all its contents will be replaced by the contents +// of the source file. The file mode will be copied from the source. +func copyFile(src, dst string) (err error) { + if sym, err := IsSymlink(src); err != nil { + return errors.Wrap(err, "symlink check failed") + } else if sym { + if err := cloneSymlink(src, dst); err != nil { + if runtime.GOOS == "windows" { + // If cloning the symlink fails on Windows because the user + // does not have the required privileges, ignore the error and + // fall back to copying the file contents. + // + // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx + if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { + return err + } + } else { + return err + } + } else { + return nil + } + } + + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + + if _, err = io.Copy(out, in); err != nil { + out.Close() + return + } + + // Check for write errors on Close + if err = out.Close(); err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + + // Temporary fix for Go < 1.9 + // + // See: https://github.com/golang/dep/issues/774 + // and https://github.com/golang/go/issues/20829 + if runtime.GOOS == "windows" { + dst = fixLongPath(dst) + } + err = os.Chmod(dst, si.Mode()) + + return +} + +// cloneSymlink will create a new symlink that points to the resolved path of sl. +// If sl is a relative symlink, dst will also be a relative symlink. +func cloneSymlink(sl, dst string) error { + resolved, err := os.Readlink(sl) + if err != nil { + return err + } + + return os.Symlink(resolved, dst) +} + +// IsDir determines is the path given is a directory or not. +func IsDir(name string) (bool, error) { + fi, err := os.Stat(name) + if err != nil { + return false, err + } + if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", name) + } + return true, nil +} + +// IsSymlink determines if the given path is a symbolic link. +func IsSymlink(path string) (bool, error) { + l, err := os.Lstat(path) + if err != nil { + return false, err + } + + return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !isAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} + +func isAbs(path string) (b bool) { + v := volumeName(path) + if v == "" { + return false + } + path = path[len(v):] + if path == "" { + return false + } + return os.IsPathSeparator(path[0]) +} + +func volumeName(path string) (v string) { + if len(path) < 2 { + return "" + } + // with drive letter + c := path[0] + if path[1] == ':' && + ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || + 'A' <= c && c <= 'Z') { + return path[:2] + } + // is it UNC + if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && + !os.IsPathSeparator(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if os.IsPathSeparator(path[n]) { + n++ + // third, following something characters. its share name. + if !os.IsPathSeparator(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if os.IsPathSeparator(path[n]) { + break + } + } + return path[:n] + } + break + } + } + } + return "" +} diff --git a/vendor/k8s.io/helm/internal/third_party/dep/fs/rename.go b/vendor/k8s.io/helm/internal/third_party/dep/fs/rename.go new file mode 100644 index 00000000..0bb60094 --- /dev/null +++ b/vendor/k8s.io/helm/internal/third_party/dep/fs/rename.go @@ -0,0 +1,58 @@ +// +build !windows + +/* +Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } else if terr.Err != syscall.EXDEV { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + + return renameByCopy(src, dst) +} diff --git a/vendor/k8s.io/helm/internal/third_party/dep/fs/rename_windows.go b/vendor/k8s.io/helm/internal/third_party/dep/fs/rename_windows.go new file mode 100644 index 00000000..14f017d0 --- /dev/null +++ b/vendor/k8s.io/helm/internal/third_party/dep/fs/rename_windows.go @@ -0,0 +1,69 @@ +// +build windows + +/* +Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } + + if terr.Err != syscall.EXDEV { + // In windows it can drop down to an operating system call that + // returns an operating system error with a different number and + // message. Checking for that as a fall back. + noerr, ok := terr.Err.(syscall.Errno) + + // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. + // See https://msdn.microsoft.com/en-us/library/cc231199.aspx + if ok && noerr != 0x11 { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + } + + return renameByCopy(src, dst) +} diff --git a/vendor/k8s.io/helm/pkg/downloader/chart_downloader.go b/vendor/k8s.io/helm/pkg/downloader/chart_downloader.go new file mode 100644 index 00000000..c2e9f6dc --- /dev/null +++ b/vendor/k8s.io/helm/pkg/downloader/chart_downloader.go @@ -0,0 +1,365 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/helm/helmpath" + "k8s.io/helm/pkg/provenance" + "k8s.io/helm/pkg/repo" + "k8s.io/helm/pkg/urlutil" +) + +// VerificationStrategy describes a strategy for determining whether to verify a chart. +type VerificationStrategy int + +const ( + // VerifyNever will skip all verification of a chart. + VerifyNever VerificationStrategy = iota + // VerifyIfPossible will attempt a verification, it will not error if verification + // data is missing. But it will not stop processing if verification fails. + VerifyIfPossible + // VerifyAlways will always attempt a verification, and will fail if the + // verification fails. + VerifyAlways + // VerifyLater will fetch verification data, but not do any verification. + // This is to accommodate the case where another step of the process will + // perform verification. + VerifyLater +) + +// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos. +var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL") + +// ChartDownloader handles downloading a chart. +// +// It is capable of performing verifications on charts as well. +type ChartDownloader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Verify indicates what verification strategy to use. + Verify VerificationStrategy + // Keyring is the keyring file used for verification. + Keyring string + // HelmHome is the $HELM_HOME. + HelmHome helmpath.Home + // Getters collection for the operation + Getters getter.Providers + // Username chart repository username + Username string + // Password chart repository password + Password string +} + +// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file. +// +// If Verify is set to VerifyNever, the verification will be nil. +// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure. +// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails. +// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it. +// +// For VerifyNever and VerifyIfPossible, the Verification may be empty. +// +// Returns a string path to the location where the file was downloaded and a verification +// (if provenance was verified), or an error if something bad happened. +func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { + u, g, err := c.ResolveChartVersion(ref, version) + if err != nil { + return "", nil, err + } + + data, err := g.Get(u.String()) + if err != nil { + return "", nil, err + } + + name := filepath.Base(u.Path) + destfile := filepath.Join(dest, name) + if err := ioutil.WriteFile(destfile, data.Bytes(), 0644); err != nil { + return destfile, nil, err + } + + // If provenance is requested, verify it. + ver := &provenance.Verification{} + if c.Verify > VerifyNever { + body, err := g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return destfile, ver, fmt.Errorf("Failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return destfile, ver, nil + } + provfile := destfile + ".prov" + if err := ioutil.WriteFile(provfile, body.Bytes(), 0644); err != nil { + return destfile, nil, err + } + + if c.Verify != VerifyLater { + ver, err = VerifyChart(destfile, c.Keyring) + if err != nil { + // Fail always in this case, since it means the verification step + // failed. + return destfile, ver, err + } + } + } + return destfile, ver, nil +} + +// ResolveChartVersion resolves a chart reference to a URL. +// +// It returns the URL as well as a preconfigured repo.Getter that can fetch +// the URL. +// +// A reference may be an HTTP URL, a 'reponame/chartname' reference, or a local path. +// +// A version is a SemVer string (1.2.3-beta.1+f334a6789). +// +// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned) +// - For a chart reference +// * If version is non-empty, this will return the URL for that version +// * If version is empty, this will return the URL for the latest version +// * If no version can be found, an error is returned +func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, getter.Getter, error) { + u, err := url.Parse(ref) + if err != nil { + return nil, nil, fmt.Errorf("invalid chart URL format: %s", ref) + } + + rf, err := repo.LoadRepositoriesFile(c.HelmHome.RepositoryFile()) + if err != nil { + return u, nil, err + } + + if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { + // In this case, we have to find the parent repo that contains this chart + // URL. And this is an unfortunate problem, as it requires actually going + // through each repo cache file and finding a matching URL. But basically + // we want to find the repo in case we have special SSL cert config + // for that repo. + + rc, err := c.scanReposForURL(ref, rf) + if err != nil { + // If there is no special config, return the default HTTP client and + // swallow the error. + if err == ErrNoOwnerRepo { + getterConstructor, err := c.Getters.ByScheme(u.Scheme) + if err != nil { + return u, nil, err + } + g, err := getterConstructor(ref, "", "", "") + if t, ok := g.(*getter.HttpGetter); ok { + t.SetCredentials(c.Username, c.Password) + } + return u, g, err + } + return u, nil, err + } + r, err := repo.NewChartRepository(rc, c.Getters) + c.setCredentials(r) + // If we get here, we don't need to go through the next phase of looking + // up the URL. We have it already. So we just return. + return u, r.Client, err + } + + // See if it's of the form: repo/path_to_chart + p := strings.SplitN(u.Path, "/", 2) + if len(p) < 2 { + return u, nil, fmt.Errorf("Non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) + } + + repoName := p[0] + chartName := p[1] + rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) + + if err != nil { + return u, nil, err + } + + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return u, nil, err + } + c.setCredentials(r) + + // Skip if dependency not contain name + if len(r.Config.Name) == 0 { + return u, r.Client, nil + } + + // Next, we need to load the index, and actually look up the chart. + i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name)) + if err != nil { + return u, r.Client, fmt.Errorf("no cached repo found. (try 'helm repo update'). %s", err) + } + + cv, err := i.Get(chartName, version) + if err != nil { + return u, r.Client, fmt.Errorf("chart %q matching version %q not found in %s index. (try 'helm repo update'). %s", chartName, version, r.Config.Name, err) + } + + if len(cv.URLs) == 0 { + return u, r.Client, fmt.Errorf("chart %q has no downloadable URLs", ref) + } + + // TODO: Seems that picking first URL is not fully correct + u, err = url.Parse(cv.URLs[0]) + if err != nil { + return u, r.Client, fmt.Errorf("invalid chart URL format: %s", ref) + } + + // If the URL is relative (no scheme), prepend the chart repo's base URL + if !u.IsAbs() { + repoURL, err := url.Parse(rc.URL) + if err != nil { + return repoURL, r.Client, err + } + q := repoURL.Query() + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + repoURL.Path = strings.TrimSuffix(repoURL.Path, "/") + "/" + u = repoURL.ResolveReference(u) + u.RawQuery = q.Encode() + return u, r.Client, err + } + + return u, r.Client, nil +} + +// setCredentials if HttpGetter is used, this method sets the configured repository credentials on the HttpGetter. +func (c *ChartDownloader) setCredentials(r *repo.ChartRepository) { + if t, ok := r.Client.(*getter.HttpGetter); ok { + t.SetCredentials(c.getRepoCredentials(r)) + } +} + +// getRepoCredentials if this ChartDownloader is not configured to use credentials, and the chart repository sent as an argument is, +// then the repository's configured credentials are returned. +// Else, this ChartDownloader's credentials are returned. +func (c *ChartDownloader) getRepoCredentials(r *repo.ChartRepository) (username, password string) { + username = c.Username + password = c.Password + if r != nil && r.Config != nil { + if username == "" { + username = r.Config.Username + } + if password == "" { + password = r.Config.Password + } + } + return +} + +// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart. +// +// It assumes that a chart archive file is accompanied by a provenance file whose +// name is the archive file name plus the ".prov" extension. +func VerifyChart(path string, keyring string) (*provenance.Verification, error) { + // For now, error out if it's not a tar file. + if fi, err := os.Stat(path); err != nil { + return nil, err + } else if fi.IsDir() { + return nil, errors.New("unpacked charts cannot be verified") + } else if !isTar(path) { + return nil, errors.New("chart must be a tgz file") + } + + provfile := path + ".prov" + if _, err := os.Stat(provfile); err != nil { + return nil, fmt.Errorf("could not load provenance file %s: %s", provfile, err) + } + + sig, err := provenance.NewFromKeyring(keyring, "") + if err != nil { + return nil, fmt.Errorf("failed to load keyring: %s", err) + } + return sig.Verify(path, provfile) +} + +// isTar tests whether the given file is a tar file. +// +// Currently, this simply checks extension, since a subsequent function will +// untar the file and validate its binary format. +func isTar(filename string) bool { + return strings.ToLower(filepath.Ext(filename)) == ".tgz" +} + +func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) { + for _, rc := range cfgs { + if rc.Name == name { + if rc.URL == "" { + return nil, fmt.Errorf("no URL found for repository %s", name) + } + return rc, nil + } + } + return nil, fmt.Errorf("repo %s not found", name) +} + +// scanReposForURL scans all repos to find which repo contains the given URL. +// +// This will attempt to find the given URL in all of the known repositories files. +// +// If the URL is found, this will return the repo entry that contained that URL. +// +// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo +// error is returned. +// +// Other errors may be returned when repositories cannot be loaded or searched. +// +// Technically, the fact that a URL is not found in a repo is not a failure indication. +// Charts are not required to be included in an index before they are valid. So +// be mindful of this case. +// +// The same URL can technically exist in two or more repositories. This algorithm +// will return the first one it finds. Order is determined by the order of repositories +// in the repositories.yaml file. +func (c *ChartDownloader) scanReposForURL(u string, rf *repo.RepoFile) (*repo.Entry, error) { + // FIXME: This is far from optimal. Larger installations and index files will + // incur a performance hit for this type of scanning. + for _, rc := range rf.Repositories { + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return nil, err + } + + i, err := repo.LoadIndexFile(c.HelmHome.CacheIndex(r.Config.Name)) + if err != nil { + return nil, fmt.Errorf("no cached repo found. (try 'helm repo update'). %s", err) + } + + for _, entry := range i.Entries { + for _, ver := range entry { + for _, dl := range ver.URLs { + if urlutil.Equal(u, dl) { + return rc, nil + } + } + } + } + } + // This means that there is no repo file for the given URL. + return nil, ErrNoOwnerRepo +} diff --git a/vendor/k8s.io/helm/pkg/downloader/doc.go b/vendor/k8s.io/helm/pkg/downloader/doc.go new file mode 100644 index 00000000..c70b2f69 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/downloader/doc.go @@ -0,0 +1,23 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package downloader provides a library for downloading charts. + +This package contains various tools for downloading charts from repository +servers, and then storing them in Helm-specific directory structures (like +HELM_HOME). This library contains many functions that depend on a specific +filesystem layout. +*/ +package downloader diff --git a/vendor/k8s.io/helm/pkg/downloader/manager.go b/vendor/k8s.io/helm/pkg/downloader/manager.go new file mode 100644 index 00000000..ce2e2ef0 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/downloader/manager.go @@ -0,0 +1,752 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "sync" + + "github.com/Masterminds/semver" + "github.com/ghodss/yaml" + + "k8s.io/helm/internal/third_party/dep/fs" + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/helm/helmpath" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/repo" + "k8s.io/helm/pkg/resolver" + "k8s.io/helm/pkg/urlutil" +) + +// Manager handles the lifecycle of fetching, resolving, and storing dependencies. +type Manager struct { + // Out is used to print warnings and notifications. + Out io.Writer + // ChartPath is the path to the unpacked base chart upon which this operates. + ChartPath string + // HelmHome is the $HELM_HOME directory + HelmHome helmpath.Home + // Verification indicates whether the chart should be verified. + Verify VerificationStrategy + // Debug is the global "--debug" flag + Debug bool + // Keyring is the key ring file. + Keyring string + // SkipUpdate indicates that the repository should not be updated first. + SkipUpdate bool + // Getter collection for the operation + Getters []getter.Provider +} + +// Build rebuilds a local charts directory from a lockfile. +// +// If the lockfile is not present, this will run a Manager.Update() +// +// If SkipUpdate is set, this will not update the repository. +func (m *Manager) Build() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If a lock file is found, run a build from that. Otherwise, just do + // an update. + lock, err := chartutil.LoadRequirementsLock(c) + if err != nil { + return m.Update() + } + + // A lock must accompany a requirements.yaml file. + req, err := chartutil.LoadRequirements(c) + if err != nil { + return fmt.Errorf("requirements.yaml cannot be opened: %s", err) + } + if sum, err := resolver.HashReq(req); err != nil || sum != lock.Digest { + return fmt.Errorf("requirements.lock is out of sync with requirements.yaml") + } + + // Check that all of the repos we're dependent on actually exist. + if err := m.hasAllRepos(lock.Dependencies); err != nil { + return err + } + + if !m.SkipUpdate { + // For each repo in the file, update the cached copy of that repo + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to fetch every package here into charts/ + return m.downloadAll(lock.Dependencies) +} + +// Update updates a local charts directory. +// +// It first reads the requirements.yaml file, and then attempts to +// negotiate versions based on that. It will download the versions +// from remote chart repositories unless SkipUpdate is true. +func (m *Manager) Update() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If no requirements file is found, we consider this a successful + // completion. + req, err := chartutil.LoadRequirements(c) + if err != nil { + if err == chartutil.ErrRequirementsNotFound { + fmt.Fprintf(m.Out, "No requirements found in %s/charts.\n", m.ChartPath) + return nil + } + return err + } + + // Hash requirements.yaml + hash, err := resolver.HashReq(req) + if err != nil { + return err + } + + // Check that all of the repos we're dependent on actually exist and + // the repo index names. + repoNames, err := m.getRepoNames(req.Dependencies) + if err != nil { + return err + } + + // For each repo in the file, update the cached copy of that repo + if !m.SkipUpdate { + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to find out which version of a chart best satisfies the + // requirements the requirements.yaml + lock, err := m.resolve(req, repoNames, hash) + if err != nil { + return err + } + + // Now we need to fetch every package here into charts/ + if err := m.downloadAll(lock.Dependencies); err != nil { + return err + } + + // If the lock file hasn't changed, don't write a new one. + oldLock, err := chartutil.LoadRequirementsLock(c) + if err == nil && oldLock.Digest == lock.Digest { + return nil + } + + // Finally, we need to write the lockfile. + return writeLock(m.ChartPath, lock) +} + +func (m *Manager) loadChartDir() (*chart.Chart, error) { + if fi, err := os.Stat(m.ChartPath); err != nil { + return nil, fmt.Errorf("could not find %s: %s", m.ChartPath, err) + } else if !fi.IsDir() { + return nil, errors.New("only unpacked charts can be updated") + } + return chartutil.LoadDir(m.ChartPath) +} + +// resolve takes a list of requirements and translates them into an exact version to download. +// +// This returns a lock file, which has all of the requirements normalized to a specific version. +func (m *Manager) resolve(req *chartutil.Requirements, repoNames map[string]string, hash string) (*chartutil.RequirementsLock, error) { + res := resolver.New(m.ChartPath, m.HelmHome) + return res.Resolve(req, repoNames, hash) +} + +// downloadAll takes a list of dependencies and downloads them into charts/ +// +// It will delete versions of the chart that exist on disk and might cause +// a conflict. +func (m *Manager) downloadAll(deps []*chartutil.Dependency) error { + repos, err := m.loadChartRepositories() + if err != nil { + return err + } + + destPath := filepath.Join(m.ChartPath, "charts") + tmpPath := filepath.Join(m.ChartPath, "tmpcharts") + + // Create 'charts' directory if it doesn't already exist. + if fi, err := os.Stat(destPath); err != nil { + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + } else if !fi.IsDir() { + return fmt.Errorf("%q is not a directory", destPath) + } + + if err := fs.RenameWithFallback(destPath, tmpPath); err != nil { + return fmt.Errorf("Unable to move current charts to tmp dir: %v", err) + } + + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + + fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps)) + var saveError error + for _, dep := range deps { + // No repository means the chart is in charts directory + if dep.Repository == "" { + fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name) + chartPath := filepath.Join(tmpPath, dep.Name) + ch, err := chartutil.LoadDir(chartPath) + if err != nil { + return fmt.Errorf("Unable to load chart: %v", err) + } + + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return fmt.Errorf("Dependency %s has an invalid version/constraint format: %s", dep.Name, err) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return fmt.Errorf("Invalid version %s for dependency %s: %s", dep.Version, dep.Name, err) + } + + if !constraint.Check(v) { + saveError = fmt.Errorf("Dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version) + break + } + continue + } + if strings.HasPrefix(dep.Repository, "file://") { + if m.Debug { + fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository) + } + ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version) + if err != nil { + saveError = err + break + } + dep.Version = ver + continue + } + + fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository) + + // Any failure to resolve/download a chart should fail: + // https://github.com/kubernetes/helm/issues/1439 + churl, username, password, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + if err != nil { + saveError = fmt.Errorf("could not find %s: %s", churl, err) + break + } + + dl := ChartDownloader{ + Out: m.Out, + Verify: m.Verify, + Keyring: m.Keyring, + HelmHome: m.HelmHome, + Getters: m.Getters, + Username: username, + Password: password, + } + + if _, _, err := dl.DownloadTo(churl, "", destPath); err != nil { + saveError = fmt.Errorf("could not download %s: %s", churl, err) + break + } + } + + if saveError == nil { + fmt.Fprintln(m.Out, "Deleting outdated charts") + for _, dep := range deps { + // Chart from local charts directory stays in place + if dep.Repository != "" { + if err := m.safeDeleteDep(dep.Name, tmpPath); err != nil { + return err + } + } + } + if err := move(tmpPath, destPath); err != nil { + return err + } + if err := os.RemoveAll(tmpPath); err != nil { + return fmt.Errorf("Failed to remove %v: %v", tmpPath, err) + } + } else { + fmt.Fprintln(m.Out, "Save error occurred: ", saveError) + fmt.Fprintln(m.Out, "Deleting newly downloaded charts, restoring pre-update state") + for _, dep := range deps { + if err := m.safeDeleteDep(dep.Name, destPath); err != nil { + return err + } + } + if err := os.RemoveAll(destPath); err != nil { + return fmt.Errorf("Failed to remove %v: %v", destPath, err) + } + if err := fs.RenameWithFallback(tmpPath, destPath); err != nil { + return fmt.Errorf("Unable to move current charts to tmp dir: %v", err) + } + return saveError + } + return nil +} + +// safeDeleteDep deletes any versions of the given dependency in the given directory. +// +// It does this by first matching the file name to an expected pattern, then loading +// the file to verify that it is a chart with the same name as the given name. +// +// Because it requires tar file introspection, it is more intensive than a basic delete. +// +// This will only return errors that should stop processing entirely. Other errors +// will emit log messages or be ignored. +func (m *Manager) safeDeleteDep(name, dir string) error { + files, err := filepath.Glob(filepath.Join(dir, name+"-*.tgz")) + if err != nil { + // Only for ErrBadPattern + return err + } + for _, fname := range files { + ch, err := chartutil.LoadFile(fname) + if err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)", fname, err) + continue + } + if ch.Metadata.Name != name { + // This is not the file you are looking for. + continue + } + if err := os.Remove(fname); err != nil { + fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err) + continue + } + } + return nil +} + +// hasAllRepos ensures that all of the referenced deps are in the local repo cache. +func (m *Manager) hasAllRepos(deps []*chartutil.Dependency) error { + rf, err := repo.LoadRepositoriesFile(m.HelmHome.RepositoryFile()) + if err != nil { + return err + } + repos := rf.Repositories + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} + for _, dd := range deps { + // If repo is from local path, continue + if strings.HasPrefix(dd.Repository, "file://") { + continue + } + + found := false + if dd.Repository == "" { + found = true + } else { + for _, repo := range repos { + if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) { + found = true + } + } + } + if !found { + missing = append(missing, dd.Repository) + } + } + if len(missing) > 0 { + return fmt.Errorf("no repository definition for %s. Please add the missing repos via 'helm repo add'", strings.Join(missing, ", ")) + } + return nil +} + +// getRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file. +func (m *Manager) getRepoNames(deps []*chartutil.Dependency) (map[string]string, error) { + rf, err := repo.LoadRepositoriesFile(m.HelmHome.RepositoryFile()) + if err != nil { + return nil, err + } + repos := rf.Repositories + + reposMap := make(map[string]string) + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} + for _, dd := range deps { + // Don't map the repository, we don't need to download chart from charts directory + if dd.Repository == "" { + continue + } + // if dep chart is from local path, verify the path is valid + if strings.HasPrefix(dd.Repository, "file://") { + if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil { + return nil, err + } + + if m.Debug { + fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository) + } + reposMap[dd.Name] = dd.Repository + continue + } + + found := false + + for _, repo := range repos { + if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) || + (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) { + found = true + dd.Repository = repo.URL + reposMap[dd.Name] = repo.Name + break + } else if urlutil.Equal(repo.URL, dd.Repository) { + found = true + reposMap[dd.Name] = repo.Name + break + } + } + if !found { + repository := dd.Repository + // Add if URL + _, err := url.ParseRequestURI(repository) + if err == nil { + reposMap[repository] = repository + continue + } + missing = append(missing, repository) + } + } + + if len(missing) > 0 { + errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) + // It is common for people to try to enter "stable" as a repository instead of the actual URL. + // For this case, let's give them a suggestion. + containsNonURL := false + for _, repo := range missing { + if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { + containsNonURL = true + } + } + if containsNonURL { + errorMessage += ` +Note that repositories must be URLs or aliases. For example, to refer to the stable +repository, use "https://kubernetes-charts.storage.googleapis.com/" or "@stable" instead of +"stable". Don't forget to add the repo, too ('helm repo add').` + } + return nil, errors.New(errorMessage) + } + + return reposMap, nil +} + +// UpdateRepositories updates all of the local repos to the latest. +func (m *Manager) UpdateRepositories() error { + rf, err := repo.LoadRepositoriesFile(m.HelmHome.RepositoryFile()) + if err != nil { + return err + } + if repos := rf.Repositories; len(repos) > 0 { + // This prints warnings straight to out. + if err := m.parallelRepoUpdate(repos); err != nil { + return err + } + } + return nil +} + +func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { + out := m.Out + fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...") + var wg sync.WaitGroup + for _, c := range repos { + r, err := repo.NewChartRepository(c, m.Getters) + if err != nil { + return err + } + wg.Add(1) + go func(r *repo.ChartRepository) { + if err := r.DownloadIndexFile(m.HelmHome.Cache()); err != nil { + fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err) + } else { + fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", r.Config.Name) + } + wg.Done() + }(r) + } + wg.Wait() + fmt.Fprintln(out, "Update Complete.") + return nil +} + +// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified. +// +// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the +// newest version will be returned. +// +// repoURL is the repository to search +// +// If it finds a URL that is "relative", it will prepend the repoURL. +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, err error) { + for _, cr := range repos { + if urlutil.Equal(repoURL, cr.Config.URL) { + var entry repo.ChartVersions + entry, err = findEntryByName(name, cr) + if err != nil { + return + } + var ve *repo.ChartVersion + ve, err = findVersionedEntry(version, entry) + if err != nil { + return + } + url, err = normalizeURL(repoURL, ve.URLs[0]) + if err != nil { + return + } + username = cr.Config.Username + password = cr.Config.Password + return + } + } + url, err = repo.FindChartInRepoURL(repoURL, name, version, "", "", "", m.Getters) + if err == nil { + return + } + err = fmt.Errorf("chart %s not found in %s", name, repoURL) + return +} + +// findEntryByName finds an entry in the chart repository whose name matches the given name. +// +// It returns the ChartVersions for that entry. +func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) { + for ename, entry := range cr.IndexFile.Entries { + if ename == name { + return entry, nil + } + } + return nil, errors.New("entry not found") +} + +// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints. +// +// If version is empty, the first chart found is returned. +func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) { + for _, verEntry := range vers { + if len(verEntry.URLs) == 0 { + // Not a legit entry. + continue + } + + if version == "" || versionEquals(version, verEntry.Version) { + return verEntry, nil + } + } + return nil, errors.New("no matching version") +} + +func versionEquals(v1, v2 string) bool { + sv1, err := semver.NewVersion(v1) + if err != nil { + // Fallback to string comparison. + return v1 == v2 + } + sv2, err := semver.NewVersion(v2) + if err != nil { + return false + } + return sv1.Equal(sv2) +} + +func normalizeURL(baseURL, urlOrPath string) (string, error) { + u, err := url.Parse(urlOrPath) + if err != nil { + return urlOrPath, err + } + if u.IsAbs() { + return u.String(), nil + } + u2, err := url.Parse(baseURL) + if err != nil { + return urlOrPath, fmt.Errorf("Base URL failed to parse: %s", err) + } + + u2.Path = path.Join(u2.Path, urlOrPath) + return u2.String(), nil +} + +// loadChartRepositories reads the repositories.yaml, and then builds a map of +// ChartRepositories. +// +// The key is the local name (which is only present in the repositories.yaml). +func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) { + indices := map[string]*repo.ChartRepository{} + repoyaml := m.HelmHome.RepositoryFile() + + // Load repositories.yaml file + rf, err := repo.LoadRepositoriesFile(repoyaml) + if err != nil { + return indices, fmt.Errorf("failed to load %s: %s", repoyaml, err) + } + + for _, re := range rf.Repositories { + lname := re.Name + cacheindex := m.HelmHome.CacheIndex(lname) + index, err := repo.LoadIndexFile(cacheindex) + if err != nil { + return indices, err + } + + // TODO: use constructor + cr := &repo.ChartRepository{ + Config: re, + IndexFile: index, + } + indices[lname] = cr + } + return indices, nil +} + +// writeLock writes a lockfile to disk +func writeLock(chartpath string, lock *chartutil.RequirementsLock) error { + data, err := yaml.Marshal(lock) + if err != nil { + return err + } + dest := filepath.Join(chartpath, "requirements.lock") + return ioutil.WriteFile(dest, data, 0644) +} + +// tarFromLocalDir archive a dep chart from local directory and save it into charts/ +func tarFromLocalDir(chartpath, name, repo, version string) (string, error) { + destPath := filepath.Join(chartpath, "charts") + + if !strings.HasPrefix(repo, "file://") { + return "", fmt.Errorf("wrong format: chart %s repository %s", name, repo) + } + + origPath, err := resolver.GetLocalPath(repo, chartpath) + if err != nil { + return "", err + } + + ch, err := chartutil.LoadDir(origPath) + if err != nil { + return "", err + } + + constraint, err := semver.NewConstraint(version) + if err != nil { + return "", fmt.Errorf("dependency %s has an invalid version/constraint format: %s", name, err) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return "", err + } + + if constraint.Check(v) { + _, err = chartutil.Save(ch, destPath) + return ch.Metadata.Version, err + } + + return "", fmt.Errorf("can't get a valid version for dependency %s", name) +} + +// move files from tmppath to destpath +func move(tmpPath, destPath string) error { + files, _ := ioutil.ReadDir(tmpPath) + for _, file := range files { + filename := file.Name() + tmpfile := filepath.Join(tmpPath, filename) + destfile := filepath.Join(destPath, filename) + if err := fs.RenameWithFallback(tmpfile, destfile); err != nil { + return fmt.Errorf("Unable to move local charts to charts dir: %v", err) + } + } + return nil +} + +func copyFile(source string, destination string) (err error) { + sourceFile, err := os.Open(source) + if err != nil { + return err + } + defer sourceFile.Close() + destinationFile, err := os.Create(destination) + if err != nil { + return err + } + defer destinationFile.Close() + _, err = io.Copy(destinationFile, sourceFile) + if err == nil { + stats, err := os.Stat(source) + if err == nil { + return os.Chmod(destination, stats.Mode()) + } + } + return err +} + +func copyDir(source string, destination string) (err error) { + fi, err := os.Stat(source) + if err != nil { + return err + } + if !fi.IsDir() { + return fmt.Errorf("Source is not a directory") + } + _, err = os.Open(destination) + if !os.IsNotExist(err) { + return fmt.Errorf("Destination already exists") + } + err = os.MkdirAll(destination, fi.Mode()) + if err != nil { + return err + } + + entries, err := ioutil.ReadDir(source) + for _, entry := range entries { + sourceFile := source + "/" + entry.Name() + destinationFile := destination + "/" + entry.Name() + if entry.IsDir() { + err = copyDir(sourceFile, destinationFile) + if err != nil { + return err + } + } else { + err = copyFile(sourceFile, destinationFile) + if err != nil { + return err + } + } + } + return +} diff --git a/vendor/k8s.io/helm/pkg/getter/doc.go b/vendor/k8s.io/helm/pkg/getter/doc.go new file mode 100644 index 00000000..c53ef1ae --- /dev/null +++ b/vendor/k8s.io/helm/pkg/getter/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package getter provides a generalize tool for fetching data by scheme. + +This provides a method by which the plugin system can load arbitrary protocol +handlers based upon a URL scheme. +*/ +package getter diff --git a/vendor/k8s.io/helm/pkg/getter/getter.go b/vendor/k8s.io/helm/pkg/getter/getter.go new file mode 100644 index 00000000..062c7269 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/getter/getter.go @@ -0,0 +1,98 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + + "k8s.io/helm/pkg/helm/environment" +) + +// Getter is an interface to support GET to the specified URL. +type Getter interface { + //Get file content by url string + Get(url string) (*bytes.Buffer, error) +} + +// Constructor is the function for every getter which creates a specific instance +// according to the configuration +type Constructor func(URL, CertFile, KeyFile, CAFile string) (Getter, error) + +// Provider represents any getter and the schemes that it supports. +// +// For example, an HTTP provider may provide one getter that handles both +// 'http' and 'https' schemes. +type Provider struct { + Schemes []string + New Constructor +} + +// Provides returns true if the given scheme is supported by this Provider. +func (p Provider) Provides(scheme string) bool { + for _, i := range p.Schemes { + if i == scheme { + return true + } + } + return false +} + +// Providers is a collection of Provider objects. +type Providers []Provider + +// ByScheme returns a Provider that handles the given scheme. +// +// If no provider handles this scheme, this will return an error. +func (p Providers) ByScheme(scheme string) (Constructor, error) { + for _, pp := range p { + if pp.Provides(scheme) { + return pp.New, nil + } + } + return nil, fmt.Errorf("scheme %q not supported", scheme) +} + +// All finds all of the registered getters as a list of Provider instances. +// Currently the built-in http/https getter and the discovered +// plugins with downloader notations are collected. +func All(settings environment.EnvSettings) Providers { + result := Providers{ + { + Schemes: []string{"http", "https"}, + New: newHTTPGetter, + }, + } + pluginDownloaders, _ := collectPlugins(settings) + result = append(result, pluginDownloaders...) + return result +} + +// ByScheme returns a getter for the given scheme. +// +// If the scheme is not supported, this will return an error. +func ByScheme(scheme string, settings environment.EnvSettings) (Provider, error) { + // Q: What do you call a scheme string who's the boss? + // A: Bruce Schemestring, of course. + a := All(settings) + for _, p := range a { + if p.Provides(scheme) { + return p, nil + } + } + return Provider{}, fmt.Errorf("scheme %q not supported", scheme) +} diff --git a/vendor/k8s.io/helm/pkg/getter/httpgetter.go b/vendor/k8s.io/helm/pkg/getter/httpgetter.go new file mode 100644 index 00000000..bf99b1cf --- /dev/null +++ b/vendor/k8s.io/helm/pkg/getter/httpgetter.go @@ -0,0 +1,97 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "k8s.io/helm/pkg/tlsutil" + "k8s.io/helm/pkg/version" +) + +//HttpGetter is the default HTTP(/S) backend handler +// TODO: change the name to HTTPGetter in Helm 3 +type HttpGetter struct { //nolint + client *http.Client + username string + password string +} + +//SetCredentials sets the credentials for the getter +func (g *HttpGetter) SetCredentials(username, password string) { + g.username = username + g.password = password +} + +//Get performs a Get from repo.Getter and returns the body. +func (g *HttpGetter) Get(href string) (*bytes.Buffer, error) { + return g.get(href) +} + +func (g *HttpGetter) get(href string) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + + // Set a helm specific user agent so that a repo server and metrics can + // separate helm calls from other tools interacting with repos. + req, err := http.NewRequest("GET", href, nil) + if err != nil { + return buf, err + } + req.Header.Set("User-Agent", "Helm/"+strings.TrimPrefix(version.GetVersion(), "v")) + + if g.username != "" && g.password != "" { + req.SetBasicAuth(g.username, g.password) + } + + resp, err := g.client.Do(req) + if err != nil { + return buf, err + } + if resp.StatusCode != 200 { + return buf, fmt.Errorf("Failed to fetch %s : %s", href, resp.Status) + } + + _, err = io.Copy(buf, resp.Body) + resp.Body.Close() + return buf, err +} + +// newHTTPGetter constructs a valid http/https client as Getter +func newHTTPGetter(URL, CertFile, KeyFile, CAFile string) (Getter, error) { + return NewHTTPGetter(URL, CertFile, KeyFile, CAFile) +} + +// NewHTTPGetter constructs a valid http/https client as HttpGetter +func NewHTTPGetter(URL, CertFile, KeyFile, CAFile string) (*HttpGetter, error) { + var client HttpGetter + tr := &http.Transport{ + DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + } + if (CertFile != "" && KeyFile != "") || CAFile != "" { + tlsConf, err := tlsutil.NewTLSConfig(URL, CertFile, KeyFile, CAFile) + if err != nil { + return &client, fmt.Errorf("can't create TLS config: %s", err.Error()) + } + tr.TLSClientConfig = tlsConf + } + client.client = &http.Client{Transport: tr} + return &client, nil +} diff --git a/vendor/k8s.io/helm/pkg/getter/plugingetter.go b/vendor/k8s.io/helm/pkg/getter/plugingetter.go new file mode 100644 index 00000000..c918aa74 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/getter/plugingetter.go @@ -0,0 +1,99 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "k8s.io/helm/pkg/helm/environment" + "k8s.io/helm/pkg/plugin" +) + +// collectPlugins scans for getter plugins. +// This will load plugins according to the environment. +func collectPlugins(settings environment.EnvSettings) (Providers, error) { + plugins, err := plugin.FindPlugins(settings.PluginDirs()) + if err != nil { + return nil, err + } + var result Providers + for _, plugin := range plugins { + for _, downloader := range plugin.Metadata.Downloaders { + result = append(result, Provider{ + Schemes: downloader.Protocols, + New: newPluginGetter( + downloader.Command, + settings, + plugin.Metadata.Name, + plugin.Dir, + ), + }) + } + } + return result, nil +} + +// pluginGetter is a generic type to invoke custom downloaders, +// implemented in plugins. +type pluginGetter struct { + command string + certFile, keyFile, cAFile string + settings environment.EnvSettings + name string + base string +} + +// Get runs downloader plugin command +func (p *pluginGetter) Get(href string) (*bytes.Buffer, error) { + commands := strings.Split(p.command, " ") + argv := append(commands[1:], p.certFile, p.keyFile, p.cAFile, href) + prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) + plugin.SetupPluginEnv(p.settings, p.name, p.base) + prog.Env = os.Environ() + buf := bytes.NewBuffer(nil) + prog.Stdout = buf + prog.Stderr = os.Stderr + prog.Stdin = os.Stdin + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + return nil, fmt.Errorf("plugin %q exited with error", p.command) + } + return nil, err + } + return buf, nil +} + +// newPluginGetter constructs a valid plugin getter +func newPluginGetter(command string, settings environment.EnvSettings, name, base string) Constructor { + return func(URL, CertFile, KeyFile, CAFile string) (Getter, error) { + result := &pluginGetter{ + command: command, + certFile: CertFile, + keyFile: KeyFile, + cAFile: CAFile, + settings: settings, + name: name, + base: base, + } + return result, nil + } +} diff --git a/vendor/k8s.io/helm/pkg/helm/environment/environment.go b/vendor/k8s.io/helm/pkg/helm/environment/environment.go new file mode 100644 index 00000000..9cfe80a1 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/environment/environment.go @@ -0,0 +1,168 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package environment describes the operating environment for Tiller. + +Tiller's environment encapsulates all of the service dependencies Tiller has. +These dependencies are expressed as interfaces so that alternate implementations +(mocks, etc.) can be easily generated. +*/ +package environment + +import ( + "os" + "path/filepath" + + "github.com/spf13/pflag" + + "k8s.io/client-go/util/homedir" + "k8s.io/helm/pkg/helm/helmpath" +) + +const ( + // DefaultTLSCaCert is the default value for HELM_TLS_CA_CERT + DefaultTLSCaCert = "$HELM_HOME/ca.pem" + // DefaultTLSCert is the default value for HELM_TLS_CERT + DefaultTLSCert = "$HELM_HOME/cert.pem" + // DefaultTLSKeyFile is the default value for HELM_TLS_KEY_FILE + DefaultTLSKeyFile = "$HELM_HOME/key.pem" + // DefaultTLSEnable is the default value for HELM_TLS_ENABLE + DefaultTLSEnable = false + // DefaultTLSVerify is the default value for HELM_TLS_VERIFY + DefaultTLSVerify = false +) + +// DefaultHelmHome is the default HELM_HOME. +var DefaultHelmHome = filepath.Join(homedir.HomeDir(), ".helm") + +// EnvSettings describes all of the environment settings. +type EnvSettings struct { + // TillerHost is the host and port of Tiller. + TillerHost string + // TillerConnectionTimeout is the duration (in seconds) helm will wait to establish a connection to Tiller. + TillerConnectionTimeout int64 + // TillerNamespace is the namespace in which Tiller runs. + TillerNamespace string + // Home is the local path to the Helm home directory. + Home helmpath.Home + // Debug indicates whether or not Helm is running in Debug mode. + Debug bool + // KubeContext is the name of the kubeconfig context. + KubeContext string + // KubeConfig is the path to an explicit kubeconfig file. This overwrites the value in $KUBECONFIG + KubeConfig string + // TLSEnable tells helm to communicate with Tiller via TLS + TLSEnable bool + // TLSVerify tells helm to communicate with Tiller via TLS and to verify remote certificates served by Tiller + TLSVerify bool + // TLSServerName tells helm to verify the hostname on the returned certificates from Tiller + TLSServerName string + // TLSCaCertFile is the path to a TLS CA certificate file + TLSCaCertFile string + // TLSCertFile is the path to a TLS certificate file + TLSCertFile string + // TLSKeyFile is the path to a TLS key file + TLSKeyFile string +} + +// AddFlags binds flags to the given flagset. +func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { + fs.StringVar((*string)(&s.Home), "home", DefaultHelmHome, "Location of your Helm config. Overrides $HELM_HOME") + fs.StringVar(&s.TillerHost, "host", "", "Address of Tiller. Overrides $HELM_HOST") + fs.StringVar(&s.KubeContext, "kube-context", "", "Name of the kubeconfig context to use") + fs.StringVar(&s.KubeConfig, "kubeconfig", "", "Absolute path of the kubeconfig file to be used") + fs.BoolVar(&s.Debug, "debug", false, "Enable verbose output") + fs.StringVar(&s.TillerNamespace, "tiller-namespace", "kube-system", "Namespace of Tiller") + fs.Int64Var(&s.TillerConnectionTimeout, "tiller-connection-timeout", int64(300), "The duration (in seconds) Helm will wait to establish a connection to Tiller") +} + +// AddFlagsTLS adds the flags for supporting client side TLS to the given flagset. +func (s *EnvSettings) AddFlagsTLS(fs *pflag.FlagSet) { + fs.StringVar(&s.TLSServerName, "tls-hostname", s.TillerHost, "The server name used to verify the hostname on the returned certificates from the server") + fs.StringVar(&s.TLSCaCertFile, "tls-ca-cert", DefaultTLSCaCert, "Path to TLS CA certificate file") + fs.StringVar(&s.TLSCertFile, "tls-cert", DefaultTLSCert, "Path to TLS certificate file") + fs.StringVar(&s.TLSKeyFile, "tls-key", DefaultTLSKeyFile, "Path to TLS key file") + fs.BoolVar(&s.TLSVerify, "tls-verify", DefaultTLSVerify, "Enable TLS for request and verify remote") + fs.BoolVar(&s.TLSEnable, "tls", DefaultTLSEnable, "Enable TLS for request") +} + +// Init sets values from the environment. +func (s *EnvSettings) Init(fs *pflag.FlagSet) { + for name, envar := range envMap { + setFlagFromEnv(name, envar, fs) + } +} + +// InitTLS sets TLS values from the environment. +func (s *EnvSettings) InitTLS(fs *pflag.FlagSet) { + for name, envar := range tlsEnvMap { + setFlagFromEnv(name, envar, fs) + } +} + +// envMap maps flag names to envvars +var envMap = map[string]string{ + "debug": "HELM_DEBUG", + "home": "HELM_HOME", + "host": "HELM_HOST", + "tiller-namespace": "TILLER_NAMESPACE", +} + +var tlsEnvMap = map[string]string{ + "tls-hostname": "HELM_TLS_HOSTNAME", + "tls-ca-cert": "HELM_TLS_CA_CERT", + "tls-cert": "HELM_TLS_CERT", + "tls-key": "HELM_TLS_KEY", + "tls-verify": "HELM_TLS_VERIFY", + "tls": "HELM_TLS_ENABLE", +} + +// PluginDirs is the path to the plugin directories. +func (s EnvSettings) PluginDirs() string { + if d, ok := os.LookupEnv("HELM_PLUGIN"); ok { + return d + } + return s.Home.Plugins() +} + +// HelmKeyPassphrase is the passphrase used to sign a helm chart. +func (s EnvSettings) HelmKeyPassphrase() string { + if d, ok := os.LookupEnv("HELM_KEY_PASSPHRASE"); ok { + return d + } + return "" +} + +// setFlagFromEnv looks up and sets a flag if the corresponding environment variable changed. +// if the flag with the corresponding name was set during fs.Parse(), then the environment +// variable is ignored. +func setFlagFromEnv(name, envar string, fs *pflag.FlagSet) { + if fs.Changed(name) { + return + } + if v, ok := os.LookupEnv(envar); ok { + fs.Set(name, v) + } +} + +// Deprecated +const ( + HomeEnvVar = "HELM_HOME" + PluginEnvVar = "HELM_PLUGIN" + PluginDisableEnvVar = "HELM_NO_PLUGINS" + HostEnvVar = "HELM_HOST" + DebugEnvVar = "HELM_DEBUG" +) diff --git a/vendor/k8s.io/helm/pkg/helm/helmpath/helmhome.go b/vendor/k8s.io/helm/pkg/helm/helmpath/helmhome.go new file mode 100644 index 00000000..9608ea6d --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/helmpath/helmhome.go @@ -0,0 +1,103 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helmpath + +import ( + "fmt" + "os" + "path/filepath" +) + +// Home describes the location of a CLI configuration. +// +// This helper builds paths relative to a Helm Home directory. +type Home string + +// String returns Home as a string. +// +// Implements fmt.Stringer. +func (h Home) String() string { + return os.ExpandEnv(string(h)) +} + +// Path returns Home with elements appended. +func (h Home) Path(elem ...string) string { + p := []string{h.String()} + p = append(p, elem...) + return filepath.Join(p...) +} + +// Repository returns the path to the local repository. +func (h Home) Repository() string { + return h.Path("repository") +} + +// RepositoryFile returns the path to the repositories.yaml file. +func (h Home) RepositoryFile() string { + return h.Path("repository", "repositories.yaml") +} + +// Cache returns the path to the local cache. +func (h Home) Cache() string { + return h.Path("repository", "cache") +} + +// CacheIndex returns the path to an index for the given named repository. +func (h Home) CacheIndex(name string) string { + target := fmt.Sprintf("%s-index.yaml", name) + return h.Path("repository", "cache", target) +} + +// Starters returns the path to the Helm starter packs. +func (h Home) Starters() string { + return h.Path("starters") +} + +// LocalRepository returns the location to the local repo. +// +// The local repo is the one used by 'helm serve' +// +// If additional path elements are passed, they are appended to the returned path. +func (h Home) LocalRepository(elem ...string) string { + p := []string{"repository", "local"} + p = append(p, elem...) + return h.Path(p...) +} + +// Plugins returns the path to the plugins directory. +func (h Home) Plugins() string { + return h.Path("plugins") +} + +// Archive returns the path to download chart archives. +func (h Home) Archive() string { + return h.Path("cache", "archive") +} + +// TLSCaCert returns the path to fetch the CA certificate. +func (h Home) TLSCaCert() string { + return h.Path("ca.pem") +} + +// TLSCert returns the path to fetch the client certificate. +func (h Home) TLSCert() string { + return h.Path("cert.pem") +} + +// TLSKey returns the path to fetch the client public key. +func (h Home) TLSKey() string { + return h.Path("key.pem") +} diff --git a/vendor/k8s.io/helm/pkg/plugin/hooks.go b/vendor/k8s.io/helm/pkg/plugin/hooks.go new file mode 100644 index 00000000..70ce5d12 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/plugin/hooks.go @@ -0,0 +1,35 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "k8s.io/helm/pkg/plugin" + +// Types of hooks +const ( + // Install is executed after the plugin is added. + Install = "install" + // Delete is executed after the plugin is removed. + Delete = "delete" + // Update is executed after the plugin is updated. + Update = "update" +) + +// Hooks is a map of events to commands. +type Hooks map[string]string + +// Get returns a hook for an event. +func (hooks Hooks) Get(event string) string { + h, _ := hooks[event] + return h +} diff --git a/vendor/k8s.io/helm/pkg/plugin/plugin.go b/vendor/k8s.io/helm/pkg/plugin/plugin.go new file mode 100644 index 00000000..07fcc700 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/plugin/plugin.go @@ -0,0 +1,200 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "k8s.io/helm/pkg/plugin" + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + helm_env "k8s.io/helm/pkg/helm/environment" + + "github.com/ghodss/yaml" +) + +const pluginFileName = "plugin.yaml" + +// Downloaders represents the plugins capability if it can retrieve +// charts from special sources +type Downloaders struct { + // Protocols are the list of schemes from the charts URL. + Protocols []string `json:"protocols"` + // Command is the executable path with which the plugin performs + // the actual download for the corresponding Protocols + Command string `json:"command"` +} + +// Metadata describes a plugin. +// +// This is the plugin equivalent of a chart.Metadata. +type Metadata struct { + // Name is the name of the plugin + Name string `json:"name"` + + // Version is a SemVer 2 version of the plugin. + Version string `json:"version"` + + // Usage is the single-line usage text shown in help + Usage string `json:"usage"` + + // Description is a long description shown in places like `helm help` + Description string `json:"description"` + + // Command is the command, as a single string. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. Unless IgnoreFlags is set, this will + // also merge the flags passed from Helm. + // + // Note that command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + Command string `json:"command"` + + // IgnoreFlags ignores any flags passed in from Helm + // + // For example, if the plugin is invoked as `helm --debug myplugin`, if this + // is false, `--debug` will be appended to `--command`. If this is true, + // the `--debug` flag will be discarded. + IgnoreFlags bool `json:"ignoreFlags"` + + // UseTunnel indicates that this command needs a tunnel. + // Setting this will cause a number of side effects, such as the + // automatic setting of HELM_HOST. + UseTunnel bool `json:"useTunnel"` + + // Hooks are commands that will run on events. + Hooks Hooks + + // Downloaders field is used if the plugin supply downloader mechanism + // for special protocols. + Downloaders []Downloaders `json:"downloaders"` +} + +// Plugin represents a plugin. +type Plugin struct { + // Metadata is a parsed representation of a plugin.yaml + Metadata *Metadata + // Dir is the string path to the directory that holds the plugin. + Dir string +} + +// PrepareCommand takes a Plugin.Command and prepares it for execution. +// +// It merges extraArgs into any arguments supplied in the plugin. It +// returns the name of the command and an args array. +// +// The result is suitable to pass to exec.Command. +func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string) { + parts := strings.Split(os.ExpandEnv(p.Metadata.Command), " ") + main := parts[0] + baseArgs := []string{} + if len(parts) > 1 { + baseArgs = parts[1:] + } + if !p.Metadata.IgnoreFlags { + baseArgs = append(baseArgs, extraArgs...) + } + return main, baseArgs +} + +// LoadDir loads a plugin from the given directory. +func LoadDir(dirname string) (*Plugin, error) { + data, err := ioutil.ReadFile(filepath.Join(dirname, pluginFileName)) + if err != nil { + return nil, err + } + + plug := &Plugin{Dir: dirname} + if err := yaml.Unmarshal(data, &plug.Metadata); err != nil { + return nil, err + } + return plug, nil +} + +// LoadAll loads all plugins found beneath the base directory. +// +// This scans only one directory level. +func LoadAll(basedir string) ([]*Plugin, error) { + plugins := []*Plugin{} + // We want basedir/*/plugin.yaml + scanpath := filepath.Join(basedir, "*", pluginFileName) + matches, err := filepath.Glob(scanpath) + if err != nil { + return plugins, err + } + + if matches == nil { + return plugins, nil + } + + for _, yaml := range matches { + dir := filepath.Dir(yaml) + p, err := LoadDir(dir) + if err != nil { + return plugins, err + } + plugins = append(plugins, p) + } + return plugins, nil +} + +// FindPlugins returns a list of YAML files that describe plugins. +func FindPlugins(plugdirs string) ([]*Plugin, error) { + found := []*Plugin{} + // Let's get all UNIXy and allow path separators + for _, p := range filepath.SplitList(plugdirs) { + matches, err := LoadAll(p) + if err != nil { + return matches, err + } + found = append(found, matches...) + } + return found, nil +} + +// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because +// the plugin subsystem itself needs access to the environment variables +// created here. +func SetupPluginEnv(settings helm_env.EnvSettings, + shortName, base string) { + for key, val := range map[string]string{ + "HELM_PLUGIN_NAME": shortName, + "HELM_PLUGIN_DIR": base, + "HELM_BIN": os.Args[0], + + // Set vars that may not have been set, and save client the + // trouble of re-parsing. + "HELM_PLUGIN": settings.PluginDirs(), + "HELM_HOME": settings.Home.String(), + + // Set vars that convey common information. + "HELM_PATH_REPOSITORY": settings.Home.Repository(), + "HELM_PATH_REPOSITORY_FILE": settings.Home.RepositoryFile(), + "HELM_PATH_CACHE": settings.Home.Cache(), + "HELM_PATH_LOCAL_REPOSITORY": settings.Home.LocalRepository(), + "HELM_PATH_STARTER": settings.Home.Starters(), + + "TILLER_HOST": settings.TillerHost, + "TILLER_NAMESPACE": settings.TillerNamespace, + } { + os.Setenv(key, val) + } + + if settings.Debug { + os.Setenv("HELM_DEBUG", "1") + } +} diff --git a/vendor/k8s.io/helm/pkg/provenance/doc.go b/vendor/k8s.io/helm/pkg/provenance/doc.go new file mode 100644 index 00000000..bee48494 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/provenance/doc.go @@ -0,0 +1,37 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package provenance provides tools for establishing the authenticity of a chart. + +In Helm, provenance is established via several factors. The primary factor is the +cryptographic signature of a chart. Chart authors may sign charts, which in turn +provide the necessary metadata to ensure the integrity of the chart file, the +Chart.yaml, and the referenced Docker images. + +A provenance file is clear-signed. This provides cryptographic verification that +a particular block of information (Chart.yaml, archive file, images) have not +been tampered with or altered. To learn more, read the GnuPG documentation on +clear signatures: +https://www.gnupg.org/gph/en/manual/x135.html + +The cryptography used by Helm should be compatible with OpenGPG. For example, +you should be able to verify a signature by importing the desired public key +and using `gpg --verify`, `keybase pgp verify`, or similar: + + $ gpg --verify some.sig + gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762 + gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) " [ultimate] +*/ +package provenance // import "k8s.io/helm/pkg/provenance" diff --git a/vendor/k8s.io/helm/pkg/provenance/sign.go b/vendor/k8s.io/helm/pkg/provenance/sign.go new file mode 100644 index 00000000..d0e4d06c --- /dev/null +++ b/vendor/k8s.io/helm/pkg/provenance/sign.go @@ -0,0 +1,411 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provenance + +import ( + "bytes" + "crypto" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/ghodss/yaml" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/clearsign" + "golang.org/x/crypto/openpgp/packet" + + "k8s.io/helm/pkg/chartutil" + hapi "k8s.io/helm/pkg/proto/hapi/chart" +) + +var defaultPGPConfig = packet.Config{ + DefaultHash: crypto.SHA512, +} + +// SumCollection represents a collection of file and image checksums. +// +// Files are of the form: +// FILENAME: "sha256:SUM" +// Images are of the form: +// "IMAGE:TAG": "sha256:SUM" +// Docker optionally supports sha512, and if this is the case, the hash marker +// will be 'sha512' instead of 'sha256'. +type SumCollection struct { + Files map[string]string `json:"files"` + Images map[string]string `json:"images,omitempty"` +} + +// Verification contains information about a verification operation. +type Verification struct { + // SignedBy contains the entity that signed a chart. + SignedBy *openpgp.Entity + // FileHash is the hash, prepended with the scheme, for the file that was verified. + FileHash string + // FileName is the name of the file that FileHash verifies. + FileName string +} + +// Signatory signs things. +// +// Signatories can be constructed from a PGP private key file using NewFromFiles +// or they can be constructed manually by setting the Entity to a valid +// PGP entity. +// +// The same Signatory can be used to sign or validate multiple charts. +type Signatory struct { + // The signatory for this instance of Helm. This is used for signing. + Entity *openpgp.Entity + // The keyring for this instance of Helm. This is used for verification. + KeyRing openpgp.EntityList +} + +// NewFromFiles constructs a new Signatory from the PGP key in the given filename. +// +// This will emit an error if it cannot find a valid GPG keyfile (entity) at the +// given location. +// +// Note that the keyfile may have just a public key, just a private key, or +// both. The Signatory methods may have different requirements of the keys. For +// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it +// can sign something. +func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) { + e, err := loadKey(keyfile) + if err != nil { + return nil, err + } + + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + return &Signatory{ + Entity: e, + KeyRing: ring, + }, nil +} + +// NewFromKeyring reads a keyring file and creates a Signatory. +// +// If id is not the empty string, this will also try to find an Entity in the +// keyring whose name matches, and set that as the signing entity. It will return +// an error if the id is not empty and also not found. +func NewFromKeyring(keyringfile, id string) (*Signatory, error) { + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + s := &Signatory{KeyRing: ring} + + // If the ID is empty, we can return now. + if id == "" { + return s, nil + } + + // We're going to go all GnuPG on this and look for a string that _contains_. If + // two or more keys contain the string and none are a direct match, we error + // out. + var candidate *openpgp.Entity + vague := false + for _, e := range ring { + for n := range e.Identities { + if n == id { + s.Entity = e + return s, nil + } + if strings.Contains(n, id) { + if candidate != nil { + vague = true + } + candidate = e + } + } + } + if vague { + return s, fmt.Errorf("more than one key contain the id %q", id) + } + + s.Entity = candidate + return s, nil +} + +// PassphraseFetcher returns a passphrase for decrypting keys. +// +// This is used as a callback to read a passphrase from some other location. The +// given name is the Name field on the key, typically of the form: +// +// USER_NAME (COMMENT) +type PassphraseFetcher func(name string) ([]byte, error) + +// DecryptKey decrypts a private key in the Signatory. +// +// If the key is not encrypted, this will return without error. +// +// If the key does not exist, this will return an error. +// +// If the key exists, but cannot be unlocked with the passphrase returned by +// the PassphraseFetcher, this will return an error. +// +// If the key is successfully unlocked, it will return nil. +func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { + if s.Entity == nil { + return errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return errors.New("provided key is not a private key") + } + + // Nothing else to do if key is not encrypted. + if !s.Entity.PrivateKey.Encrypted { + return nil + } + + fname := "Unknown" + for i := range s.Entity.Identities { + if i != "" { + fname = i + break + } + } + + p, err := fn(fname) + if err != nil { + return err + } + + return s.Entity.PrivateKey.Decrypt(p) +} + +// ClearSign signs a chart with the given key. +// +// This takes the path to a chart archive file and a key, and it returns a clear signature. +// +// The Signatory must have a valid Entity.PrivateKey for this to work. If it does +// not, an error will be returned. +func (s *Signatory) ClearSign(chartpath string) (string, error) { + if s.Entity == nil { + return "", errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return "", errors.New("provided key is not a private key") + } + + if fi, err := os.Stat(chartpath); err != nil { + return "", err + } else if fi.IsDir() { + return "", errors.New("cannot sign a directory") + } + + out := bytes.NewBuffer(nil) + + b, err := messageBlock(chartpath) + if err != nil { + return "", nil + } + + // Sign the buffer + w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig) + if err != nil { + return "", err + } + _, err = io.Copy(w, b) + w.Close() + return out.String(), err +} + +// Verify checks a signature and verifies that it is legit for a chart. +func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { + ver := &Verification{} + for _, fname := range []string{chartpath, sigpath} { + if fi, err := os.Stat(fname); err != nil { + return ver, err + } else if fi.IsDir() { + return ver, fmt.Errorf("%s cannot be a directory", fname) + } + } + + // First verify the signature + sig, err := s.decodeSignature(sigpath) + if err != nil { + return ver, fmt.Errorf("failed to decode signature: %s", err) + } + + by, err := s.verifySignature(sig) + if err != nil { + return ver, err + } + ver.SignedBy = by + + // Second, verify the hash of the tarball. + sum, err := DigestFile(chartpath) + if err != nil { + return ver, err + } + _, sums, err := parseMessageBlock(sig.Plaintext) + if err != nil { + return ver, err + } + + sum = "sha256:" + sum + basename := filepath.Base(chartpath) + if sha, ok := sums.Files[basename]; !ok { + return ver, fmt.Errorf("provenance does not contain a SHA for a file named %q", basename) + } else if sha != sum { + return ver, fmt.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum) + } + ver.FileHash = sum + ver.FileName = basename + + // TODO: when image signing is added, verify that here. + + return ver, nil +} + +func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + block, _ := clearsign.Decode(data) + if block == nil { + // There was no sig in the file. + return nil, errors.New("signature block not found") + } + + return block, nil +} + +// verifySignature verifies that the given block is validly signed, and returns the signer. +func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) { + return openpgp.CheckDetachedSignature( + s.KeyRing, + bytes.NewBuffer(block.Bytes), + block.ArmoredSignature.Body, + ) +} + +func messageBlock(chartpath string) (*bytes.Buffer, error) { + var b *bytes.Buffer + // Checksum the archive + chash, err := DigestFile(chartpath) + if err != nil { + return b, err + } + + base := filepath.Base(chartpath) + sums := &SumCollection{ + Files: map[string]string{ + base: "sha256:" + chash, + }, + } + + // Load the archive into memory. + chart, err := chartutil.LoadFile(chartpath) + if err != nil { + return b, err + } + + // Buffer a hash + checksums YAML file + data, err := yaml.Marshal(chart.Metadata) + if err != nil { + return b, err + } + + // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP + // clearsign block. So we use ...\n, which is the YAML document end marker. + // http://yaml.org/spec/1.2/spec.html#id2800168 + b = bytes.NewBuffer(data) + b.WriteString("\n...\n") + + data, err = yaml.Marshal(sums) + if err != nil { + return b, err + } + b.Write(data) + + return b, nil +} + +// parseMessageBlock +func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) { + // This sucks. + parts := bytes.Split(data, []byte("\n...\n")) + if len(parts) < 2 { + return nil, nil, errors.New("message block must have at least two parts") + } + + md := &hapi.Metadata{} + sc := &SumCollection{} + + if err := yaml.Unmarshal(parts[0], md); err != nil { + return md, sc, err + } + err := yaml.Unmarshal(parts[1], sc) + return md, sc, err +} + +// loadKey loads a GPG key found at a particular path. +func loadKey(keypath string) (*openpgp.Entity, error) { + f, err := os.Open(keypath) + if err != nil { + return nil, err + } + defer f.Close() + + pr := packet.NewReader(f) + return openpgp.ReadEntity(pr) +} + +func loadKeyRing(ringpath string) (openpgp.EntityList, error) { + f, err := os.Open(ringpath) + if err != nil { + return nil, err + } + defer f.Close() + return openpgp.ReadKeyRing(f) +} + +// DigestFile calculates a SHA256 hash (like Docker) for a given file. +// +// It takes the path to the archive file, and returns a string representation of +// the SHA256 sum. +// +// The intended use of this function is to generate a sum of a chart TGZ file. +func DigestFile(filename string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + return Digest(f) +} + +// Digest hashes a reader and returns a SHA256 digest. +// +// Helm uses SHA256 as its default hash for all non-cryptographic applications. +func Digest(in io.Reader) (string, error) { + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/k8s.io/helm/pkg/repo/chartrepo.go b/vendor/k8s.io/helm/pkg/repo/chartrepo.go new file mode 100644 index 00000000..e19aa52c --- /dev/null +++ b/vendor/k8s.io/helm/pkg/repo/chartrepo.go @@ -0,0 +1,284 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "k8s.io/helm/pkg/repo" + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/ghodss/yaml" + + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/provenance" +) + +// Entry represents a collection of parameters for chart repository +type Entry struct { + Name string `json:"name"` + Cache string `json:"cache"` + URL string `json:"url"` + Username string `json:"username"` + Password string `json:"password"` + CertFile string `json:"certFile"` + KeyFile string `json:"keyFile"` + CAFile string `json:"caFile"` +} + +// ChartRepository represents a chart repository +type ChartRepository struct { + Config *Entry + ChartPaths []string + IndexFile *IndexFile + Client getter.Getter +} + +// NewChartRepository constructs ChartRepository +func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) { + u, err := url.Parse(cfg.URL) + if err != nil { + return nil, fmt.Errorf("invalid chart URL format: %s", cfg.URL) + } + + getterConstructor, err := getters.ByScheme(u.Scheme) + if err != nil { + return nil, fmt.Errorf("Could not find protocol handler for: %s", u.Scheme) + } + client, err := getterConstructor(cfg.URL, cfg.CertFile, cfg.KeyFile, cfg.CAFile) + if err != nil { + return nil, fmt.Errorf("Could not construct protocol handler for: %s error: %v", u.Scheme, err) + } + + return &ChartRepository{ + Config: cfg, + IndexFile: NewIndexFile(), + Client: client, + }, nil +} + +// Load loads a directory of charts as if it were a repository. +// +// It requires the presence of an index.yaml file in the directory. +func (r *ChartRepository) Load() error { + dirInfo, err := os.Stat(r.Config.Name) + if err != nil { + return err + } + if !dirInfo.IsDir() { + return fmt.Errorf("%q is not a directory", r.Config.Name) + } + + // FIXME: Why are we recursively walking directories? + // FIXME: Why are we not reading the repositories.yaml to figure out + // what repos to use? + filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, err error) error { + if !f.IsDir() { + if strings.Contains(f.Name(), "-index.yaml") { + i, err := LoadIndexFile(path) + if err != nil { + return nil + } + r.IndexFile = i + } else if strings.HasSuffix(f.Name(), ".tgz") { + r.ChartPaths = append(r.ChartPaths, path) + } + } + return nil + }) + return nil +} + +// DownloadIndexFile fetches the index from a repository. +// +// cachePath is prepended to any index that does not have an absolute path. This +// is for pre-2.2.0 repo files. +func (r *ChartRepository) DownloadIndexFile(cachePath string) error { + parsedURL, err := url.Parse(r.Config.URL) + if err != nil { + return err + } + parsedURL.RawPath = path.Join(parsedURL.RawPath, "index.yaml") + parsedURL.Path = path.Join(parsedURL.Path, "index.yaml") + indexURL := parsedURL.String() + + r.setCredentials() + resp, err := r.Client.Get(indexURL) + if err != nil { + return err + } + + index, err := ioutil.ReadAll(resp) + if err != nil { + return err + } + + if _, err := loadIndex(index); err != nil { + return err + } + + // In Helm 2.2.0 the config.cache was accidentally switched to an absolute + // path, which broke backward compatibility. This fixes it by prepending a + // global cache path to relative paths. + // + // It is changed on DownloadIndexFile because that was the method that + // originally carried the cache path. + cp := r.Config.Cache + if !filepath.IsAbs(cp) { + cp = filepath.Join(cachePath, cp) + } + + return ioutil.WriteFile(cp, index, 0644) +} + +// If HttpGetter is used, this method sets the configured repository credentials on the HttpGetter. +func (r *ChartRepository) setCredentials() { + if t, ok := r.Client.(*getter.HttpGetter); ok { + t.SetCredentials(r.Config.Username, r.Config.Password) + } +} + +// Index generates an index for the chart repository and writes an index.yaml file. +func (r *ChartRepository) Index() error { + err := r.generateIndex() + if err != nil { + return err + } + return r.saveIndexFile() +} + +func (r *ChartRepository) saveIndexFile() error { + index, err := yaml.Marshal(r.IndexFile) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644) +} + +func (r *ChartRepository) generateIndex() error { + for _, path := range r.ChartPaths { + ch, err := chartutil.Load(path) + if err != nil { + return err + } + + digest, err := provenance.DigestFile(path) + if err != nil { + return err + } + + if !r.IndexFile.Has(ch.Metadata.Name, ch.Metadata.Version) { + r.IndexFile.Add(ch.Metadata, path, r.Config.URL, digest) + } + // TODO: If a chart exists, but has a different Digest, should we error? + } + r.IndexFile.SortEntries() + return nil +} + +// FindChartInRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories +func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) +} + +// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials for the chart repository. +func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + + // Download and write the index file to a temporary location + tempIndexFile, err := ioutil.TempFile("", "tmp-repo-file") + if err != nil { + return "", fmt.Errorf("cannot write index file for repository requested") + } + defer os.Remove(tempIndexFile.Name()) + + c := Entry{ + URL: repoURL, + Username: username, + Password: password, + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + } + r, err := NewChartRepository(&c, getters) + if err != nil { + return "", err + } + if err := r.DownloadIndexFile(tempIndexFile.Name()); err != nil { + return "", fmt.Errorf("Looks like %q is not a valid chart repository or cannot be reached: %s", repoURL, err) + } + + // Read the index file for the repository to get chart information and return chart URL + repoIndex, err := LoadIndexFile(tempIndexFile.Name()) + if err != nil { + return "", err + } + + errMsg := fmt.Sprintf("chart %q", chartName) + if chartVersion != "" { + errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion) + } + cv, err := repoIndex.Get(chartName, chartVersion) + if err != nil { + return "", fmt.Errorf("%s not found in %s repository", errMsg, repoURL) + } + + if len(cv.URLs) == 0 { + return "", fmt.Errorf("%s has no downloadable URLs", errMsg) + } + + chartURL := cv.URLs[0] + + absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL) + if err != nil { + return "", fmt.Errorf("failed to make chart URL absolute: %v", err) + } + + return absoluteChartURL, nil +} + +// ResolveReferenceURL resolves refURL relative to baseURL. +// If refURL is absolute, it simply returns refURL. +func ResolveReferenceURL(baseURL, refURL string) (string, error) { + parsedBaseURL, err := url.Parse(baseURL) + if err != nil { + return "", fmt.Errorf("failed to parse %s as URL: %v", baseURL, err) + } + + parsedRefURL, err := url.Parse(refURL) + if err != nil { + return "", fmt.Errorf("failed to parse %s as URL: %v", refURL, err) + } + + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/" + resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL) + // if the base URL contains query string parameters, + // propagate them to the child URL but only if the + // refURL is relative to baseURL + if (resolvedURL.Hostname() == parsedBaseURL.Hostname()) && (resolvedURL.Port() == parsedBaseURL.Port()) { + resolvedURL.RawQuery = parsedBaseURL.RawQuery + } + + return resolvedURL.String(), nil +} diff --git a/vendor/k8s.io/helm/pkg/repo/doc.go b/vendor/k8s.io/helm/pkg/repo/doc.go new file mode 100644 index 00000000..19ccf267 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/repo/doc.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package repo implements the Helm Chart Repository. + +A chart repository is an HTTP server that provides information on charts. A local +repository cache is an on-disk representation of a chart repository. + +There are two important file formats for chart repositories. + +The first is the 'index.yaml' format, which is expressed like this: + + apiVersion: v1 + entries: + frobnitz: + - created: 2016-09-29T12:14:34.830161306-06:00 + description: This is a frobniz. + digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: frobnitz + urls: + - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz + version: 1.2.3 + sprocket: + - created: 2016-09-29T12:14:34.830507606-06:00 + description: This is a sprocket" + digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: sprocket + urls: + - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz + version: 1.2.0 + generated: 2016-09-29T12:14:34.829721375-06:00 + +An index.yaml file contains the necessary descriptive information about what +charts are available in a repository, and how to get them. + +The second file format is the repositories.yaml file format. This file is for +facilitating local cached copies of one or more chart repositories. + +The format of a repository.yaml file is: + + apiVersion: v1 + generated: TIMESTAMP + repositories: + - name: stable + url: http://example.com/charts + cache: stable-index.yaml + - name: incubator + url: http://example.com/incubator + cache: incubator-index.yaml + +This file maps three bits of information about a repository: + + - The name the user uses to refer to it + - The fully qualified URL to the repository (index.yaml will be appended) + - The name of the local cachefile + +The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not +backwards compatible with those earlier versions. +*/ +package repo diff --git a/vendor/k8s.io/helm/pkg/repo/index.go b/vendor/k8s.io/helm/pkg/repo/index.go new file mode 100644 index 00000000..12f3308d --- /dev/null +++ b/vendor/k8s.io/helm/pkg/repo/index.go @@ -0,0 +1,342 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver" + "github.com/ghodss/yaml" + + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/provenance" + "k8s.io/helm/pkg/urlutil" +) + +var indexPath = "index.yaml" + +// APIVersionV1 is the v1 API version for index and repository files. +const APIVersionV1 = "v1" + +var ( + // ErrNoAPIVersion indicates that an API version was not specified. + ErrNoAPIVersion = errors.New("no API version specified") + // ErrNoChartVersion indicates that a chart with the given version is not found. + ErrNoChartVersion = errors.New("no chart version found") + // ErrNoChartName indicates that a chart with the given name is not found. + ErrNoChartName = errors.New("no chart name found") +) + +// ChartVersions is a list of versioned chart references. +// Implements a sorter on Version. +type ChartVersions []*ChartVersion + +// Len returns the length. +func (c ChartVersions) Len() int { return len(c) } + +// Swap swaps the position of two items in the versions slice. +func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Less returns true if the version of entry a is less than the version of entry b. +func (c ChartVersions) Less(a, b int) bool { + // Failed parse pushes to the back. + i, err := semver.NewVersion(c[a].Version) + if err != nil { + return true + } + j, err := semver.NewVersion(c[b].Version) + if err != nil { + return false + } + return i.LessThan(j) +} + +// IndexFile represents the index file in a chart repository +type IndexFile struct { + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Entries map[string]ChartVersions `json:"entries"` + PublicKeys []string `json:"publicKeys,omitempty"` +} + +// NewIndexFile initializes an index. +func NewIndexFile() *IndexFile { + return &IndexFile{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Entries: map[string]ChartVersions{}, + PublicKeys: []string{}, + } +} + +// LoadIndexFile takes a file at the given path and returns an IndexFile object +func LoadIndexFile(path string) (*IndexFile, error) { + b, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return loadIndex(b) +} + +// Add adds a file to the index +// This can leave the index in an unsorted state +func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { + u := filename + if baseURL != "" { + var err error + _, file := filepath.Split(filename) + u, err = urlutil.URLJoin(baseURL, file) + if err != nil { + u = path.Join(baseURL, file) + } + } + cr := &ChartVersion{ + URLs: []string{u}, + Metadata: md, + Digest: digest, + Created: time.Now(), + } + if ee, ok := i.Entries[md.Name]; !ok { + i.Entries[md.Name] = ChartVersions{cr} + } else { + i.Entries[md.Name] = append(ee, cr) + } +} + +// Has returns true if the index has an entry for a chart with the given name and exact version. +func (i IndexFile) Has(name, version string) bool { + _, err := i.Get(name, version) + return err == nil +} + +// SortEntries sorts the entries by version in descending order. +// +// In canonical form, the individual version records should be sorted so that +// the most recent release for every version is in the 0th slot in the +// Entries.ChartVersions array. That way, tooling can predict the newest +// version without needing to parse SemVers. +func (i IndexFile) SortEntries() { + for _, versions := range i.Entries { + sort.Sort(sort.Reverse(versions)) + } +} + +// Get returns the ChartVersion for the given name. +// +// If version is empty, this will return the chart with the latest stable version, +// prerelease versions will be skipped. +func (i IndexFile) Get(name, version string) (*ChartVersion, error) { + vs, ok := i.Entries[name] + if !ok { + return nil, ErrNoChartName + } + if len(vs) == 0 { + return nil, ErrNoChartVersion + } + + var constraint *semver.Constraints + if len(version) == 0 { + constraint, _ = semver.NewConstraint("*") + } else { + var err error + constraint, err = semver.NewConstraint(version) + if err != nil { + return nil, err + } + } + + // when customer input exact version, check whether have exact match one first + if len(version) != 0 { + for _, ver := range vs { + if version == ver.Version { + return ver, nil + } + } + } + + for _, ver := range vs { + test, err := semver.NewVersion(ver.Version) + if err != nil { + continue + } + + if constraint.Check(test) { + return ver, nil + } + } + return nil, fmt.Errorf("No chart version found for %s-%s", name, version) +} + +// WriteFile writes an index file to the given destination path. +// +// The mode on the file is set to 'mode'. +func (i IndexFile) WriteFile(dest string, mode os.FileMode) error { + b, err := yaml.Marshal(i) + if err != nil { + return err + } + return ioutil.WriteFile(dest, b, mode) +} + +// Merge merges the given index file into this index. +// +// This merges by name and version. +// +// If one of the entries in the given index does _not_ already exist, it is added. +// In all other cases, the existing record is preserved. +// +// This can leave the index in an unsorted state +func (i *IndexFile) Merge(f *IndexFile) { + for _, cvs := range f.Entries { + for _, cv := range cvs { + if !i.Has(cv.Name, cv.Version) { + e := i.Entries[cv.Name] + i.Entries[cv.Name] = append(e, cv) + } + } + } +} + +// Need both JSON and YAML annotations until we get rid of gopkg.in/yaml.v2 + +// ChartVersion represents a chart entry in the IndexFile +type ChartVersion struct { + *chart.Metadata + URLs []string `json:"urls"` + Created time.Time `json:"created,omitempty"` + Removed bool `json:"removed,omitempty"` + Digest string `json:"digest,omitempty"` +} + +// IndexDirectory reads a (flat) directory and generates an index. +// +// It indexes only charts that have been packaged (*.tgz). +// +// The index returned will be in an unsorted state +func IndexDirectory(dir, baseURL string) (*IndexFile, error) { + archives, err := filepath.Glob(filepath.Join(dir, "*.tgz")) + if err != nil { + return nil, err + } + moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz")) + if err != nil { + return nil, err + } + archives = append(archives, moreArchives...) + + index := NewIndexFile() + for _, arch := range archives { + fname, err := filepath.Rel(dir, arch) + if err != nil { + return index, err + } + + var parentDir string + parentDir, fname = filepath.Split(fname) + // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out. + parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator)) + parentURL, err := urlutil.URLJoin(baseURL, parentDir) + if err != nil { + parentURL = path.Join(baseURL, parentDir) + } + + c, err := chartutil.Load(arch) + if err != nil { + // Assume this is not a chart. + continue + } + hash, err := provenance.DigestFile(arch) + if err != nil { + return index, err + } + index.Add(c.Metadata, fname, parentURL, hash) + } + return index, nil +} + +// loadIndex loads an index file and does minimal validity checking. +// +// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails. +func loadIndex(data []byte) (*IndexFile, error) { + i := &IndexFile{} + if err := yaml.Unmarshal(data, i); err != nil { + return i, err + } + i.SortEntries() + if i.APIVersion == "" { + // When we leave Beta, we should remove legacy support and just + // return this error: + //return i, ErrNoAPIVersion + return loadUnversionedIndex(data) + } + return i, nil +} + +// unversionedEntry represents a deprecated pre-Alpha.5 format. +// +// This will be removed prior to v2.0.0 +type unversionedEntry struct { + Checksum string `json:"checksum"` + URL string `json:"url"` + Chartfile *chart.Metadata `json:"chartfile"` +} + +// loadUnversionedIndex loads a pre-Alpha.5 index.yaml file. +// +// This format is deprecated. This function will be removed prior to v2.0.0. +func loadUnversionedIndex(data []byte) (*IndexFile, error) { + fmt.Fprintln(os.Stderr, "WARNING: Deprecated index file format. Try 'helm repo update'") + i := map[string]unversionedEntry{} + + // This gets around an error in the YAML parser. Instead of parsing as YAML, + // we convert to JSON, and then decode again. + var err error + data, err = yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &i); err != nil { + return nil, err + } + + if len(i) == 0 { + return nil, ErrNoAPIVersion + } + ni := NewIndexFile() + for n, item := range i { + if item.Chartfile == nil || item.Chartfile.Name == "" { + parts := strings.Split(n, "-") + ver := "" + if len(parts) > 1 { + ver = strings.TrimSuffix(parts[1], ".tgz") + } + item.Chartfile = &chart.Metadata{Name: parts[0], Version: ver} + } + ni.Add(item.Chartfile, item.URL, "", item.Checksum) + } + return ni, nil +} diff --git a/vendor/k8s.io/helm/pkg/repo/local.go b/vendor/k8s.io/helm/pkg/repo/local.go new file mode 100644 index 00000000..caca1b9c --- /dev/null +++ b/vendor/k8s.io/helm/pkg/repo/local.go @@ -0,0 +1,137 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo + +import ( + "fmt" + htemplate "html/template" + "io/ioutil" + "net/http" + "path/filepath" + "strings" + + "github.com/ghodss/yaml" + + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/provenance" +) + +const indexHTMLTemplate = ` + + + Helm Repository + +

Helm Charts Repository

+
    +{{range $name, $ver := .Index.Entries}} +
  • {{$name}} +
  • +{{end}} +
+ +

Last Generated: {{.Index.Generated}}

+ + +` + +// RepositoryServer is an HTTP handler for serving a chart repository. +type RepositoryServer struct { + RepoPath string +} + +// ServeHTTP implements the http.Handler interface. +func (s *RepositoryServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + uri := r.URL.Path + switch uri { + case "/", "/charts/", "/charts/index.html", "/charts/index": + w.Header().Set("Content-Type", "text/html; charset=utf-8") + s.htmlIndex(w, r) + default: + file := strings.TrimPrefix(uri, "/charts/") + http.ServeFile(w, r, filepath.Join(s.RepoPath, file)) + } +} + +// StartLocalRepo starts a web server and serves files from the given path +func StartLocalRepo(path, address string) error { + if address == "" { + address = "127.0.0.1:8879" + } + s := &RepositoryServer{RepoPath: path} + return http.ListenAndServe(address, s) +} + +func (s *RepositoryServer) htmlIndex(w http.ResponseWriter, r *http.Request) { + t := htemplate.Must(htemplate.New("index.html").Parse(indexHTMLTemplate)) + // load index + lrp := filepath.Join(s.RepoPath, "index.yaml") + i, err := LoadIndexFile(lrp) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + data := map[string]interface{}{ + "Index": i, + } + if err := t.Execute(w, data); err != nil { + fmt.Fprintf(w, "Template error: %s", err) + } +} + +// AddChartToLocalRepo saves a chart in the given path and then reindexes the index file +func AddChartToLocalRepo(ch *chart.Chart, path string) error { + _, err := chartutil.Save(ch, path) + if err != nil { + return err + } + return Reindex(ch, path+"/index.yaml") +} + +// Reindex adds an entry to the index file at the given path +func Reindex(ch *chart.Chart, path string) error { + name := ch.Metadata.Name + "-" + ch.Metadata.Version + y, err := LoadIndexFile(path) + if err != nil { + return err + } + found := false + for k := range y.Entries { + if k == name { + found = true + break + } + } + if !found { + dig, err := provenance.DigestFile(path) + if err != nil { + return err + } + + y.Add(ch.Metadata, name+".tgz", "http://127.0.0.1:8879/charts", "sha256:"+dig) + + out, err := yaml.Marshal(y) + if err != nil { + return err + } + + ioutil.WriteFile(path, out, 0644) + } + return nil +} diff --git a/vendor/k8s.io/helm/pkg/repo/repo.go b/vendor/k8s.io/helm/pkg/repo/repo.go new file mode 100644 index 00000000..80166fee --- /dev/null +++ b/vendor/k8s.io/helm/pkg/repo/repo.go @@ -0,0 +1,156 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "k8s.io/helm/pkg/repo" + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "time" + + "github.com/ghodss/yaml" +) + +// ErrRepoOutOfDate indicates that the repository file is out of date, but +// is fixable. +var ErrRepoOutOfDate = errors.New("repository file is out of date") + +// RepoFile represents the repositories.yaml file in $HELM_HOME +// TODO: change type name to File in Helm 3 to resolve linter warning +type RepoFile struct { // nolint + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Repositories []*Entry `json:"repositories"` +} + +// NewRepoFile generates an empty repositories file. +// +// Generated and APIVersion are automatically set. +func NewRepoFile() *RepoFile { + return &RepoFile{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Repositories: []*Entry{}, + } +} + +// LoadRepositoriesFile takes a file at the given path and returns a RepoFile object +// +// If this returns ErrRepoOutOfDate, it also returns a recovered RepoFile that +// can be saved as a replacement to the out of date file. +func LoadRepositoriesFile(path string) (*RepoFile, error) { + b, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf( + "Couldn't load repositories file (%s).\n"+ + "You might need to run `helm init` (or "+ + "`helm init --client-only` if tiller is "+ + "already installed)", path) + } + return nil, err + } + + r := &RepoFile{} + err = yaml.Unmarshal(b, r) + if err != nil { + return nil, err + } + + // File is either corrupt, or is from before v2.0.0-Alpha.5 + if r.APIVersion == "" { + m := map[string]string{} + if err = yaml.Unmarshal(b, &m); err != nil { + return nil, err + } + r := NewRepoFile() + for k, v := range m { + r.Add(&Entry{ + Name: k, + URL: v, + Cache: fmt.Sprintf("%s-index.yaml", k), + }) + } + return r, ErrRepoOutOfDate + } + + return r, nil +} + +// Add adds one or more repo entries to a repo file. +func (r *RepoFile) Add(re ...*Entry) { + r.Repositories = append(r.Repositories, re...) +} + +// Update attempts to replace one or more repo entries in a repo file. If an +// entry with the same name doesn't exist in the repo file it will add it. +func (r *RepoFile) Update(re ...*Entry) { + for _, target := range re { + found := false + for j, repo := range r.Repositories { + if repo.Name == target.Name { + r.Repositories[j] = target + found = true + break + } + } + if !found { + r.Add(target) + } + } +} + +// Has returns true if the given name is already a repository name. +func (r *RepoFile) Has(name string) bool { + _, ok := r.Get(name) + return ok +} + +// Get returns entry by the given name if it exists. +func (r *RepoFile) Get(name string) (*Entry, bool) { + for _, entry := range r.Repositories { + if entry.Name == name { + return entry, true + } + } + return nil, false +} + +// Remove removes the entry from the list of repositories. +func (r *RepoFile) Remove(name string) bool { + cp := []*Entry{} + found := false + for _, rf := range r.Repositories { + if rf.Name == name { + found = true + continue + } + cp = append(cp, rf) + } + r.Repositories = cp + return found +} + +// WriteFile writes a repositories file to the given path. +func (r *RepoFile) WriteFile(path string, perm os.FileMode) error { + data, err := yaml.Marshal(r) + if err != nil { + return err + } + return ioutil.WriteFile(path, data, perm) +} diff --git a/vendor/k8s.io/helm/pkg/resolver/resolver.go b/vendor/k8s.io/helm/pkg/resolver/resolver.go new file mode 100644 index 00000000..653606df --- /dev/null +++ b/vendor/k8s.io/helm/pkg/resolver/resolver.go @@ -0,0 +1,177 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Masterminds/semver" + + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/helm/helmpath" + "k8s.io/helm/pkg/provenance" + "k8s.io/helm/pkg/repo" +) + +// Resolver resolves dependencies from semantic version ranges to a particular version. +type Resolver struct { + chartpath string + helmhome helmpath.Home +} + +// New creates a new resolver for a given chart and a given helm home. +func New(chartpath string, helmhome helmpath.Home) *Resolver { + return &Resolver{ + chartpath: chartpath, + helmhome: helmhome, + } +} + +// Resolve resolves dependencies and returns a lock file with the resolution. +func (r *Resolver) Resolve(reqs *chartutil.Requirements, repoNames map[string]string, d string) (*chartutil.RequirementsLock, error) { + + // Now we clone the dependencies, locking as we go. + locked := make([]*chartutil.Dependency, len(reqs.Dependencies)) + missing := []string{} + for i, d := range reqs.Dependencies { + if d.Repository == "" { + // Local chart subfolder + if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil { + return nil, err + } + + locked[i] = &chartutil.Dependency{ + Name: d.Name, + Repository: "", + Version: d.Version, + } + continue + } + if strings.HasPrefix(d.Repository, "file://") { + + if _, err := GetLocalPath(d.Repository, r.chartpath); err != nil { + return nil, err + } + + locked[i] = &chartutil.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + constraint, err := semver.NewConstraint(d.Version) + if err != nil { + return nil, fmt.Errorf("dependency %q has an invalid version/constraint format: %s", d.Name, err) + } + + // repo does not exist in cache but has url info + cacheRepoName := repoNames[d.Name] + if cacheRepoName == "" && d.Repository != "" { + locked[i] = &chartutil.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + + repoIndex, err := repo.LoadIndexFile(r.helmhome.CacheIndex(cacheRepoName)) + if err != nil { + return nil, fmt.Errorf("no cached repo found. (try 'helm repo update'). %s", err) + } + + vs, ok := repoIndex.Entries[d.Name] + if !ok { + return nil, fmt.Errorf("%s chart not found in repo %s", d.Name, d.Repository) + } + + locked[i] = &chartutil.Dependency{ + Name: d.Name, + Repository: d.Repository, + } + found := false + // The version are already sorted and hence the first one to satisfy the constraint is used + for _, ver := range vs { + v, err := semver.NewVersion(ver.Version) + if err != nil || len(ver.URLs) == 0 { + // Not a legit entry. + continue + } + if constraint.Check(v) { + found = true + locked[i].Version = v.Original() + break + } + } + + if !found { + missing = append(missing, d.Name) + } + } + if len(missing) > 0 { + return nil, fmt.Errorf("Can't get a valid version for repositories %s. Try changing the version constraint in requirements.yaml", strings.Join(missing, ", ")) + } + return &chartutil.RequirementsLock{ + Generated: time.Now(), + Digest: d, + Dependencies: locked, + }, nil +} + +// HashReq generates a hash of the requirements. +// +// This should be used only to compare against another hash generated by this +// function. +func HashReq(req *chartutil.Requirements) (string, error) { + data, err := json.Marshal(req) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// GetLocalPath generates absolute local path when use +// "file://" in repository of requirements +func GetLocalPath(repo string, chartpath string) (string, error) { + var depPath string + var err error + p := strings.TrimPrefix(repo, "file://") + + // root path is absolute + if strings.HasPrefix(p, "/") { + if depPath, err = filepath.Abs(p); err != nil { + return "", err + } + } else { + depPath = filepath.Join(chartpath, p) + } + + if _, err = os.Stat(depPath); os.IsNotExist(err) { + return "", fmt.Errorf("directory %s not found", depPath) + } else if err != nil { + return "", err + } + + return depPath, nil +} diff --git a/vendor/k8s.io/helm/pkg/tlsutil/cfg.go b/vendor/k8s.io/helm/pkg/tlsutil/cfg.go new file mode 100644 index 00000000..6c2a829d --- /dev/null +++ b/vendor/k8s.io/helm/pkg/tlsutil/cfg.go @@ -0,0 +1,89 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "os" +) + +// Options represents configurable options used to create client and server TLS configurations. +type Options struct { + CaCertFile string + // If either the KeyFile or CertFile is empty, ClientConfig() will not load them, + // preventing Helm from authenticating to Tiller. They are required to be non-empty + // when calling ServerConfig, otherwise an error is returned. + KeyFile string + CertFile string + // Client-only options + InsecureSkipVerify bool + // Overrides the server name used to verify the hostname on the returned + // certificates from the server. + ServerName string + // Server-only options + ClientAuth tls.ClientAuthType +} + +// ClientConfig returns a TLS configuration for use by a Helm client. +func ClientConfig(opts Options) (cfg *tls.Config, err error) { + var cert *tls.Certificate + var pool *x509.CertPool + + if opts.CertFile != "" || opts.KeyFile != "" { + if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("could not load x509 key pair (cert: %q, key: %q): %v", opts.CertFile, opts.KeyFile, err) + } + return nil, fmt.Errorf("could not read x509 key pair (cert: %q, key: %q): %v", opts.CertFile, opts.KeyFile, err) + } + } + if !opts.InsecureSkipVerify && opts.CaCertFile != "" { + if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil { + return nil, err + } + } + cfg = &tls.Config{ + InsecureSkipVerify: opts.InsecureSkipVerify, + Certificates: []tls.Certificate{*cert}, + ServerName: opts.ServerName, + RootCAs: pool, + } + return cfg, nil +} + +// ServerConfig returns a TLS configuration for use by the Tiller server. +func ServerConfig(opts Options) (cfg *tls.Config, err error) { + var cert *tls.Certificate + var pool *x509.CertPool + + if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("could not load x509 key pair (cert: %q, key: %q): %v", opts.CertFile, opts.KeyFile, err) + } + return nil, fmt.Errorf("could not read x509 key pair (cert: %q, key: %q): %v", opts.CertFile, opts.KeyFile, err) + } + if opts.ClientAuth >= tls.VerifyClientCertIfGiven && opts.CaCertFile != "" { + if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil { + return nil, err + } + } + + cfg = &tls.Config{MinVersion: tls.VersionTLS12, ClientAuth: opts.ClientAuth, Certificates: []tls.Certificate{*cert}, ClientCAs: pool} + return cfg, nil +} diff --git a/vendor/k8s.io/helm/pkg/tlsutil/tls.go b/vendor/k8s.io/helm/pkg/tlsutil/tls.go new file mode 100644 index 00000000..6b0052ac --- /dev/null +++ b/vendor/k8s.io/helm/pkg/tlsutil/tls.go @@ -0,0 +1,97 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "k8s.io/helm/pkg/urlutil" +) + +func newTLSConfigCommon(certFile, keyFile, caFile string) (*tls.Config, error) { + config := tls.Config{} + + if certFile != "" && keyFile != "" { + cert, err := CertFromFilePair(certFile, keyFile) + if err != nil { + return nil, err + } + config.Certificates = []tls.Certificate{*cert} + } + + if caFile != "" { + cp, err := CertPoolFromFile(caFile) + if err != nil { + return nil, err + } + config.RootCAs = cp + } + + return &config, nil +} + +// NewClientTLS returns tls.Config appropriate for client auth. +func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) { + return newTLSConfigCommon(certFile, keyFile, caFile) +} + +// NewTLSConfig returns tls.Config appropriate for client and/or server auth. +func NewTLSConfig(url, certFile, keyFile, caFile string) (*tls.Config, error) { + config, err := newTLSConfigCommon(certFile, keyFile, caFile) + if err != nil { + return nil, err + } + config.BuildNameToCertificate() + + serverName, err := urlutil.ExtractHostname(url) + if err != nil { + return nil, err + } + config.ServerName = serverName + + return config, nil +} + +// CertPoolFromFile returns an x509.CertPool containing the certificates +// in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertPoolFromFile(filename string) (*x509.CertPool, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("can't read CA file: %v", filename) + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("failed to append certificates from file: %s", filename) + } + return cp, nil +} + +// CertFromFilePair returns an tls.Certificate containing the +// certificates public/private key pair from a pair of given PEM-encoded files. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertFromFilePair(certFile, keyFile string) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, fmt.Errorf("can't load key pair from cert %s and key %s: %s", certFile, keyFile, err) + } + return &cert, err +} diff --git a/vendor/k8s.io/helm/pkg/urlutil/urlutil.go b/vendor/k8s.io/helm/pkg/urlutil/urlutil.go new file mode 100644 index 00000000..96b691c9 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/urlutil/urlutil.go @@ -0,0 +1,87 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package urlutil + +import ( + "net/url" + "path" + "path/filepath" + "strings" +) + +// URLJoin joins a base URL to one or more path components. +// +// It's like filepath.Join for URLs. If the baseURL is pathish, this will still +// perform a join. +// +// If the URL is unparsable, this returns an error. +func URLJoin(baseURL string, paths ...string) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + // We want path instead of filepath because path always uses /. + all := []string{u.Path} + all = append(all, paths...) + u.Path = path.Join(all...) + return u.String(), nil +} + +// Equal normalizes two URLs and then compares for equality. +func Equal(a, b string) bool { + au, err := url.Parse(a) + if err != nil { + a = filepath.Clean(a) + b = filepath.Clean(b) + // If urls are paths, return true only if they are an exact match + return a == b + } + bu, err := url.Parse(b) + if err != nil { + return false + } + + for _, u := range []*url.URL{au, bu} { + if u.Path == "" { + u.Path = "/" + } + u.Path = filepath.Clean(u.Path) + } + return au.String() == bu.String() +} + +// ExtractHostname returns hostname from URL +func ExtractHostname(addr string) (string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", err + } + return stripPort(u.Host), nil +} + +// stripPort from Go 1.8 because Circle is still on 1.7 +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e318ef8..6c334823 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -36,6 +36,14 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/dsnet/compress v0.0.1 +## explicit; go 1.9 +github.com/dsnet/compress +github.com/dsnet/compress/bzip2 +github.com/dsnet/compress/bzip2/internal/sais +github.com/dsnet/compress/internal +github.com/dsnet/compress/internal/errors +github.com/dsnet/compress/internal/prefix # github.com/emicklei/go-restful/v3 v3.10.1 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -92,6 +100,8 @@ github.com/gardener/gardener/hack/api-reference/template github.com/gardener/gardener/imagevector github.com/gardener/gardener/pkg/api/core github.com/gardener/gardener/pkg/api/extensions +github.com/gardener/gardener/pkg/apis/authentication +github.com/gardener/gardener/pkg/apis/authentication/v1alpha1 github.com/gardener/gardener/pkg/apis/core github.com/gardener/gardener/pkg/apis/core/install github.com/gardener/gardener/pkg/apis/core/v1beta1 @@ -156,6 +166,8 @@ github.com/gardener/gardener/pkg/utils/secrets/manager github.com/gardener/gardener/pkg/utils/timewindow github.com/gardener/gardener/pkg/utils/validation/kubernetesversion github.com/gardener/gardener/pkg/utils/version +github.com/gardener/gardener/test/framework +github.com/gardener/gardener/test/utils/access github.com/gardener/gardener/third_party/gopkg.in/yaml.v2 # github.com/gardener/hvpa-controller/api v0.5.0 ## explicit; go 1.15 @@ -244,6 +256,9 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers +# github.com/golang/snappy v0.0.4 +## explicit +github.com/golang/snappy # github.com/google/gnostic v0.5.7-v3refs ## explicit; go 1.12 github.com/google/gnostic/compiler @@ -307,6 +322,9 @@ github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.2 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mholt/archiver v3.1.1+incompatible +## explicit +github.com/mholt/archiver # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure @@ -329,6 +347,9 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/nwaples/rardecode v1.1.2 +## explicit +github.com/nwaples/rardecode # github.com/onsi/ginkgo/v2 v2.9.2 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -365,6 +386,10 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/pierrec/lz4 v2.6.1+incompatible +## explicit +github.com/pierrec/lz4 +github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -402,6 +427,15 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/ulikunitz/xz v0.5.10 +## explicit; go 1.12 +github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash +github.com/ulikunitz/xz/internal/xlog +github.com/ulikunitz/xz/lzma +# github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 +## explicit +github.com/xi2/xz # go.uber.org/atomic v1.9.0 ## explicit; go 1.13 go.uber.org/atomic @@ -424,12 +458,20 @@ go.uber.org/zap/zapcore # golang.org/x/crypto v0.6.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish +golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/openpgp +golang.org/x/crypto/openpgp/armor +golang.org/x/crypto/openpgp/clearsign +golang.org/x/crypto/openpgp/elgamal +golang.org/x/crypto/openpgp/errors +golang.org/x/crypto/openpgp/packet +golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt golang.org/x/crypto/ssh @@ -841,9 +883,11 @@ k8s.io/client-go/tools/record k8s.io/client-go/tools/record/util k8s.io/client-go/tools/reference k8s.io/client-go/tools/remotecommand +k8s.io/client-go/tools/watch k8s.io/client-go/transport k8s.io/client-go/transport/spdy k8s.io/client-go/util/cert +k8s.io/client-go/util/certificate/csr k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol @@ -915,16 +959,27 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/helm v2.16.1+incompatible ## explicit +k8s.io/helm/internal/third_party/dep/fs k8s.io/helm/pkg/chartutil +k8s.io/helm/pkg/downloader k8s.io/helm/pkg/engine +k8s.io/helm/pkg/getter +k8s.io/helm/pkg/helm/environment +k8s.io/helm/pkg/helm/helmpath k8s.io/helm/pkg/ignore k8s.io/helm/pkg/manifest +k8s.io/helm/pkg/plugin k8s.io/helm/pkg/proto/hapi/chart k8s.io/helm/pkg/proto/hapi/release k8s.io/helm/pkg/proto/hapi/version +k8s.io/helm/pkg/provenance k8s.io/helm/pkg/releaseutil +k8s.io/helm/pkg/repo +k8s.io/helm/pkg/resolver k8s.io/helm/pkg/sympath k8s.io/helm/pkg/timeconv +k8s.io/helm/pkg/tlsutil +k8s.io/helm/pkg/urlutil k8s.io/helm/pkg/version # k8s.io/klog v1.0.0 ## explicit; go 1.12