diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..37de250c3 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,24 @@ +## Description + + +- **Feature / Bug Fix**: (Brief description of the feature or issue being addressed) + +- **Related Links**: +- [Issues]() +- [Team thread]() +- [Documents]() +- [Email Subject] + +## Type of Change + + +- [ ] Bug fix +- [ ] New feature +- [ ] Documentation update required +- [ ] Code quality improvement +- [ ] Other (describe): + +## How Has This Been Tested? + + +Thank you for your contribution to AzCopy! diff --git a/ChangeLog.md b/ChangeLog.md index fa7876767..1e29217ab 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,1095 +1,1125 @@ - -# Change Log - -## Version 10.26.0 - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -### New Features - -1. AzCopy now supports distribution through package managers for Red Hat Enterprise Linux (RHEL), Ubuntu, Mariner, Debian, SUSE, Rocky and CentOS. ([#2728](https://github.com/Azure/azure-storage-azcopy/pull/2728)) - -### Dependency updates - -1. Golang 1.22.4 -> 1.22.5 -2. azidentity 1.6.0 -> 1.7.0 - -## Bug Fixes - -1. Fixed an issue where AzCopy would fail to unmarshal the `_token_refresh_source` property correctly when performing copy jobs from OAuth-attached containers. ([#2710](https://github.com/Azure/azure-storage-azcopy/pull/2710)) -2. Fixed a CI pipeline in Azure DevOps to automatically detect CVEs declared against our dependencies. ([#2705](https://github.com/Azure/azure-storage-azcopy/pull/2705)) - -## Version 10.25.1 - -### Security fixes - -1. Updated Golang to 1.22.4 to address security vulnerabilities - -### Dependency updates - -1. Golang 1.22.3 -> 1.22.4 -2. azidentity 1.5.1 -> 1.6.0 - -### Bug Fixes - -1. Fixed a regression in `list` where `--output-type=text` would not output any information -2. Adjusted parsing of `AZCOPY_OAUTH_TOKEN_INFO` to support both enum names as a string and integers (for users that took dependency upon the accidental changes in 10.25) - -## Version 10.25.0 - -### Security fixes - -1. Updated Golang version to 1.22.3 to address security vulnerabilities - -### New Features - -1. Workload Identity authentication is now available (#2619) -2. `azcopy list` now supports a `--location` flag, to support ambiguous URIs (#2595) -3. `azcopy list` now properly supports `--output-type=json` for users in automated scenarios. (#2629) - -### Bug Fixes - -1. Fixed a bug where AzCopy was not reporting performance info in `-output-type=json` (#2636) -2. Fixed a bug AzCopy would crash when operating on extremely large (16.5+TB) managed disks (#2635) -3. Fixed a bug with hash-based sync where the directory structure would not be replicated when using `--local-hash-storage-mode=HiddenFiles` with `--hash-meta-dir` (#2611) -4. Fixed a bug where attempting to use a non-S3/GCP/Azure storage URI would result in treating the URI as a local path (#2652) - -### Documentation changes - -1. Updated inaccurate helptext and filled in missing helptext (#2649) -2. Many important errors now include a link to relevant documentation to assist users in troubleshooting AzCopy (#2647) -3. Ambiguous flags (such as `--cpk-by-value`) have improved clarity in documentation (#2615) -4. A clearer error message is provided when failing a transfer due to authorization. (#2644) -5. A special error has been created when performing an Azure Files to Azure Blob Storage transfer, indicating present lack of service-side support (#2616) - -## Version 10.25.0-Preview-1 - -### Security fixes - -1. Updated version of GoLang used to 1.21 to address security vulnerabilities. - -## Version 10.24.0 - -### New Features - -1. Print summary logs at lower log levels and add BytesTransferred to the output in the `jobs show` command. ([#1319](https://github.com/Azure/azure-storage-azcopy/issues/1319)) -2. Added a flag `--put-blob-size-mb` to `copy`, `sync` and `bench` commands to specify the maximum size of a blob to be uploaded using PutBlob. ([#2561](https://github.com/Azure/azure-storage-azcopy/pull/2561)) -3. Added support for latest put blob service limits. Block blob put blob size can now be set up to 5000MB. ([#2569](https://github.com/Azure/azure-storage-azcopy/pull/2569)) -4. Updated all SDK dependencies to their latest version. ([#2599](https://github.com/Azure/azure-storage-azcopy/pull/2599)) -5. Updated summary logs to use consistent wording across all commands. ([#2602](https://github.com/Azure/azure-storage-azcopy/pull/2602)) - -### Bug Fixes - -1. Fixed an issue where AzCopy would fail to auto login with the AZCOPY_AUTO_LOGIN_TYPE environment variable set to PSCRED on certain Linux and MacOS environments. ([#2491](https://github.com/Azure/azure-storage-azcopy/issues/2491))([#2555](https://github.com/Azure/azure-storage-azcopy/issues/2555)) -2. Fixed a bug where page blob download optimizer would behave incorrectly on highly fragemented blobs if the service times out. ([#2445](https://github.com/Azure/azure-storage-azcopy/issues/2445)) -3. Ignore 404 errors on retried deletes. ([#2554](https://github.com/Azure/azure-storage-azcopy/pull/2554)) -4. Fixed a bug where the `VersionID` property would not be honored on the `list` command. ([#2007](https://github.com/Azure/azure-storage-azcopy/issues/2007)) -5. Fixed a bug where ADLS Gen2 paths with encoded special characters would fail to transfer. ([#2549](https://github.com/Azure/azure-storage-azcopy/issues/2549)) -6. Fixed an issue where ACL copying would fail when specifying an ADLS Gen2 account with the blob endpoint. ([#2546](https://github.com/Azure/azure-storage-azcopy/issues/2546)) -7. Fixed an issue where the snapshot ID would not be preserved when testing which authentication type to use for managed disks. ([#2547](https://github.com/Azure/azure-storage-azcopy/issues/2547)) -8. Fixed an issue where `copy` would panic if a root directory is specified as the destination. ([#2036](https://github.com/Azure/azure-storage-azcopy/issues/2036)) - -### Documentation - -1. Removed the azcopy login/logout deprecation notice. ([#2589](https://github.com/Azure/azure-storage-azcopy/pull/2589)) -2. Added a warning for customers using Shared Key for Azure Datalake transfers to indicate that Shared Key authentication will be deprecated and removed in a future release. ([#2569](https://github.com/Azure/azure-storage-azcopy/pull/2567)) -3. Updated the list help text to clearly indicate the services and authentication types supported.([#2563](https://github.com/Azure/azure-storage-azcopy/pull/2563)) - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.23.0 - -### New Features - -1. Added support to ignore the error and output a summary if a cancelled job has already completed through the use of the --ignore-error-if-completed flag. ([#2519](https://github.com/Azure/azure-storage-azcopy/pull/2519)) -2. Added support for high throughput append blob. Append blob block size can now be set to up to 100 MB. ([#2480](https://github.com/Azure/azure-storage-azcopy/pull/2480)) -3. Added support to exclude containers when transferring from account to account through the use of the --exclude-container flag. ([#2504](https://github.com/Azure/azure-storage-azcopy/pull/2504)) - -### Bug Fixes - -1. Fixed an issue where specifying AZCOPY_AUTO_LOGIN_TYPE in any form other than uppercase would be incorrectly parsed. ([#2499](https://github.com/Azure/azure-storage-azcopy/pull/2499)) -2. Fixed an issue where a failure to rename a file from the temporary prefix to the file name would not be considered to be a failed transfer. ([#2481](https://github.com/Azure/azure-storage-azcopy/pull/2481)) -3. Fixed an issue where closing the log would panic for benchmark jobs. ([#2537](https://github.com/Azure/azure-storage-azcopy/issues/2537)) -4. Fixed an issue where --preserve-posix-properties would not work on download. ([#2497](https://github.com/Azure/azure-storage-azcopy/issues/2497)) -5. Fixed an issue where --decompress would not be honored in Linux. ([#2392](https://github.com/Azure/azure-storage-azcopy/issues/2392)) -6. Fixed an issue where log files would miss the .log extension. ([#2529](https://github.com/Azure/azure-storage-azcopy/issues/2529)) -7. Fixed an issue where AzCopy would fail to set metadata properties on a read only directory when using the --force-if-read-only flag. ([#2515](https://github.com/Azure/azure-storage-azcopy/pull/2515)) -8. Fixed an issue where the AzCopy log location on resumed jobs would be reported incorrectly. ([#2466](https://github.com/Azure/azure-storage-azcopy/issues/2466)) -9. Fixed an issue with preserving SMB properties in Linux. ([#2530](https://github.com/Azure/azure-storage-azcopy/pull/2530)) -10. Fixed an issue where long-running service to service copies using OAuth at the source would result in the token expiring too early. ([#2513](https://github.com/Azure/azure-storage-azcopy/pull/2513)) -11. Fixed an issue where AzCopy would try to create folders that already existed, resulting in many unnecessary requests. ([#2511](https://github.com/Azure/azure-storage-azcopy/pull/2511)) - -### Documentation - -1. Updated --include-directory-stub inline help to match public documentation. ([#2488](https://github.com/Azure/azure-storage-azcopy/pull/2488)) - - -## Version 10.22.2 - -### Bug Fixes - -1. Fixed an issue where AzCopy operations pointed at a snapshot or version object would operation on the base object instead. -2. Fixed an issue where AzCopy would download only the base blob when the --list-of-versions flag was used. - -## Version 10.22.1 - -### Bug Fixes - -1. Fixed a regression with Azurite support. ([#2485](https://github.com/Azure/azure-storage-azcopy/issues/2485)) -2. Fixed an issue where AZCOPY_OAUTH_TOKEN_INFO would be refreshed too often. ([#2503](https://github.com/Azure/azure-storage-azcopy/pull/2503)) -3. Fixed an issue where commands would lag for multiple seconds. ([#2482](https://github.com/Azure/azure-storage-azcopy/issues/2482)) -4. Fixed an issue where azcopy version would crash. ([#2483](https://github.com/Azure/azure-storage-azcopy/issues/2483)) - -### Documentation - -1. Updated documentation to include AZCLI and PSCRED auto login types. ([#2494](https://github.com/Azure/azure-storage-azcopy/pull/2494)) - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.22.0 - -### New Features - -1. Migrated to the latest [azdatalake SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake). -2. Added support for OAuth when performing File -> File and Blob -> File copy/sync and File make/list/remove ([#2302](https://github.com/Azure/azure-storage-azcopy/issues/2302)). -3. Added support to set tier on premium block blob accounts. ([#2337](https://github.com/Azure/azure-storage-azcopy/issues/2337)) -4. Added support to cache latest AzCopy version and check the remote version every 24 hours instead of every run. ([#2426](https://github.com/Azure/azure-storage-azcopy/pull/2426)) -5. Updated all SDK dependencies to their latest version and the default service version to `2023-08-03` for all services. ([#2402](https://github.com/Azure/azure-storage-azcopy/pull/2402)) -6. Added support to rotate AzCopy logs. ([#2213](https://github.com/Azure/azure-storage-azcopy/issues/2213)) -7. Added support to authenticate with Powershell and Azure CLI credentials. ([#2433](https://github.com/Azure/azure-storage-azcopy/pull/2433)) - -### Bug Fixes - -1. Fixed an issue where http headers and access tier would sometimes be sent as empty headers. -2. Fixed an issue where AzCopy would panic when passing an un-parseable URL. ([#2404](https://github.com/Azure/azure-storage-azcopy/issues/2404)) -3. Fixed an issue where Object ID would be set as Resource ID when using MSI. ([#2395](https://github.com/Azure/azure-storage-azcopy/issues/2395)) -4. Fixed an issue where the percent complete stat could round incorrectly. ([#1078](https://github.com/Azure/azure-storage-azcopy/issues/1078)) -5. Fixed an issue where `no transfers were scheduled` would be logged as an error, it is now logged as a warning. ([#874](https://github.com/Azure/azure-storage-azcopy/issues/874)) -6. Fixed an issue where non canonicalized headers would not be printed in the log. ([#2454](https://github.com/Azure/azure-storage-azcopy/pull/2454)) -7. Fixed an issue where cold tier would not be recognized as an allowed tier. ([#2447](https://github.com/Azure/azure-storage-azcopy/issues/2447)) -8. Fixed an issue where s2s append blob copies would fail with `AppendPositionConditionNotMet` error on retry after first experiencing a service timeout error. ([#2430](https://github.com/Azure/azure-storage-azcopy/pull/2430)) -9. Fixed an issue where AZCOPY_OAUTH_TOKEN_INFO would not be refreshed. ([#2434](https://github.com/Azure/azure-storage-azcopy/issues/2434)) - -### Documentation - -1. Updated `--preserve-permissions` documentation to indicate the correct roles necessary to perform the operation. ([#2440](https://github.com/Azure/azure-storage-azcopy/pull/2440)) -2. Updated help message for `sync` and `copy` to include all ADLS Gen2 supported auth methods. ([#2440](https://github.com/Azure/azure-storage-azcopy/pull/2440)) - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.22.0-Preview - -### New Features - -1. Migrated to the latest [azdatalake SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake). - -### Bug Fixes - -1. Fixed an issue where http headers and access tier would sometimes be sent as empty headers. -2. Fixed an issue where AzCopy would panic when passing an un-parseable URL. ([#2404](https://github.com/Azure/azure-storage-azcopy/issues/2404)) - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.21.2 - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.21.1 - -### Bug Fixes - -1. Fixed an issue where validating destination length would fail a job instead of logging the error if read permissions are not provided. - -## Version 10.21.0 - -### New Features - -1. Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). -2. Migrated to the latest [azfile SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile). -3. Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). -4. Added support for sync with Azure Data Lake Storage Gen2. ([#2376](https://github.com/Azure/azure-storage-azcopy/pull/2376)) - -### Bug Fixes - -1. Fixed an issue where ACL data would not copy when specifying `*.dfs.core.windows.net` endpoints ([#2347](https://github.com/Azure/azure-storage-azcopy/pull/2347)). -2. Fixed an issue where Sync would incorrectly log that _all_ files, even those that didn't get overwritten, would be overwritten. ([#2372](https://github.com/Azure/azure-storage-azcopy/pull/2372)) - -### Documentation - -1. Updated `--dry-run` documentation to indicate the effects of `--overwrite` are ignored. ([#2325](https://github.com/Azure/azure-storage-azcopy/pull/2325)) - -### Special notes - -1. Due to the migration from ADAL to MSAL, tenant ID must now be set when authorizing with single tenant applications created after 10/15/2018. - -## Version 10.21.0-Preview - -### New Features - -1. Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). -2. Migrated to the latest [azfile SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile). -3. Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). -4. Deprecated support for object IDs in MSI. Client ID or Resource ID can be used as an alternative. - -### Special notes - -1. Due to the migration from ADAL to MSAL, tenant ID must now be set when authorizing with single tenant applications created after 10/15/2018. - -## Version 10.20.1 - -### Bug Fixes - -1. Fixed an issue where LMT data is not returned on `list` command for Azure Files. - -## Version 10.20.0 - -### New Features - -1. Mac M1/ARM64 Support -1. Force small blobs to use PutBlob for any source. -2. Support to delete CPK encrypted blobs. -3. Support to follow symlinks when `--preserve-smb-permissions` is enabled. -4. Support to return LMT data on `list` command for Azure Files. - -### Bug Fixes - -1. Fixed an issue where source trailing dot header was passed when source for a S2S copy is not file service -2. Gracefully handle File Share trailing dot paths to Windows/Blob (that do not support trailing dot) by skipping such files -3. Allow trailing dot option to be ignored instead of erroring out in situations it does not apply. -4. Issue [#2186](https://github.com/Azure/azure-storage-azcopy/issues/2186) where AzCopy would panic when using `--include-before` and `--include-after` flags on remove file share resources. -5. Issue [#2183](https://github.com/Azure/azure-storage-azcopy/issues/2183) where AzCopy would panic when providing Azure Files URLs with no SAS token. -6. Fixed a bug where AzCopy would automatically assume a HNS path to be a file if the path did not have a slash terminator. -7. Fixed an issue where `--skip-version-check` would not be honored for `login`,` logout`, `help` commands. [#2299](https://github.com/Azure/azure-storage-azcopy/issues/2299) - -### Documentation - -1. Add a log for LMTs when a mismatch is encountered. -2. Added documentation indicating the `login` and `logout` commands will be deprecated in the future. - -### Security fixes - -1. Updated dependencies to address security vulnerabilities. - -## Version 10.19.0-Preview - -### New Features - -***Mac M1/ARM64 Support*** - -## Version 10.19.0 - -### New Features - -1. Support for new Cold Tier feature for Azure Blobs (--block-blob-tier=Cold) -2. Support preserving a trailing dot ('.') in names of files and directories in Azure Files (default is `--trailing-dot=Enable`) -3. Alternate modes to preserve hash for hash-based sync ([#2214](https://github.com/Azure/azure-storage-azcopy/issues/2214)) (default is OS-dependent, either `--local-hash-storage-mode=XAttr` on MacOS/Linux or `--local-hash-storage-mode=AlternateDataStreams` on Windows) - - OS-specific hashing modes are expected to be available on all filesystems the source would traverse. (`user_xattr` enabled on filesystems on Unix systems, `FILE_NAMED_STREAMS` flag expected on Windows volumes) - - HiddenFiles provides an OS-agnostic method to store hash data; to prevent "dirtying" the source, also specify `--hash-meta-dir` directing AzCopy to store & read hidden hash metadata files elsewhere. -4. Support 'force-if-readonly' flag for Sync. (`false` by default) -5. Preserve posix properties while uploading or downloading from HNS enabled accounts (`--preserve-posix-properties`, `false` by default.) - -### Bug Fixes - -1. Fix situation where large-files would hang infinitely with low value for 'cap-mbps' -2. Issue [#2074](https://github.com/Azure/azure-storage-azcopy/issues/2074) where AzCopy would hang after cancelling -3. Issue [#1888](https://github.com/Azure/azure-storage-azcopy/issues/1888) where directories with empty name are incorrectly handled. -4. Cancel HNS delete jobs [#2117](https://github.com/Azure/azure-storage-azcopy/issues/2117) -5. Fix issue where large chunks could not be scheduled [#2228](https://github.com/Azure/azure-storage-azcopy/issues/2228) -6. Fixed segfault on MacOS [#1790](https://github.com/Azure/azure-storage-azcopy/issues/1790) -7. Fixed panic on attempt to create AzCopy dir [#2191](https://github.com/Azure/azure-storage-azcopy/issues/2191) - -## Version 10.18.1 - -### Bug fixes - -1. Fixed a data race when utilizing hash-based sync. ([Issue 2146](https://github.com/Azure/azure-storage-azcopy/issues/2146)) -2. Fixed the destination naming behavior for container-to-container transfers while using --preserve-permissions ([Issue 2141](https://github.com/Azure/azure-storage-azcopy/issues/2141)) -3. Temporarily disabled hostname lookup before enumeration ([Issue 2144](https://github.com/Azure/azure-storage-azcopy/issues/2144)) - -### Documentation - -1. Modified `--from-to` flag to be more clear ([Issue 2153](https://github.com/Azure/azure-storage-azcopy/issues/2153)) - -## Version 10.18.0 - -### New features - -1. Added support for `Content-MD5` in `list` command. User can now list the MD5 hash of the blobs in the target container. -2. Added support to resume incomplete blobs. User can now resume the upload of a blob which was interrupted in the middle. -3. Added support for download of POSIX properties. -4. Added support for persisting symlink data. - -### Bug fixes - -1. Fixed [Issue 2120](https://github.com/Azure/azure-storage-azcopy/pull/2120) -2. Fixed [Issue 2062](https://github.com/Azure/azure-storage-azcopy/pull/2062) -3. Fixed [Issue 2046](https://github.com/Azure/azure-storage-azcopy/pull/2048) -4. Fixed [Issue 1762](https://github.com/Azure/azure-storage-azcopy/pull/2125) - -### Documentation - -1. Added example for `--include-pattern`. -2. Added documentation for `--compare-hash`. - -### Security fixes - -1. CPK-related headers are now sanitized from the logs. -2. Updated dependencies to address security vulnerabilities. - -## Version 10.17.0 - -### New features - -1. Added support for hash-based sync. AzCopy sync can now take two new flags `--compare-hash` and `--missing-hash-policy=Generate`, which which user will be able to transfer only those files which differ in their MD5 hash. - -### Bug fixes -1. Fixed [issue 1994](https://github.com/Azure/azure-storage-azcopy/pull/1994): Error in calculation of block size -2. Fixed [issue 1957](https://github.com/Azure/azure-storage-azcopy/pull/1957): Repeated Authentication token refresh -3. Fixed [issue 1870](https://github.com/Azure/azure-storage-azcopy/pull/1870): Fixed issue where CPK would not be injected on retries -4. Fixed [issue 1946](https://github.com/Azure/azure-storage-azcopy/issues/1946): Fixed Metadata parsing -5: Fixed [issue 1931](https://github.com/Azure/azure-storage-azcopy/issues/1931) - -## Version 10.16.2 - -### Bug Fixes - -1. Fixed an issue where sync would always re-download files as we were comparing against the service LMT, not the SMB LMT -2. Fixed a crash when copying objects service to service using a user delegation SAS token -3. Fixed a crash when deleting folders that may have a raw path string - -## Version 10.16.1 - -### Documentation changes - -1. `all` was historically an available status option for `jobs show` but is now documented. - -### Bug Fixes - -1. Fixed a hard crash when persisting ACLs from remote filesystems on Windows. -2. Fixed a hard crash when deleting folders containing a `%` in the name from Azure Files. -3. Fixed a bug which made Managed Disks data access authentication mode unusable with auto login. - -## Version 10.16.0 - -### New features - -1. Added time-based flag for remove to include files modified before/after certain date/time. -2. Added --output-level flag which allows users to set output verbosity. -3. Added --preserve-posix-properties flag that allows user to persist the results of statx(2)/stat(2) syscall on upload. -4. Implemented setprops command that allows users to set specific properties of Blobs, BlobFS, and Files. -5. Implemented multi-auth for managed disks (SAS+OAuth) when the managed disk export account requests it. - -### Bug fixes -1. Fixed [issue 1506](https://github.com/Azure/azure-storage-azcopy/issues/1506): Added input watcher to resolve issue since job could not be resumed. -2. Fixed [issue 1794](https://github.com/Azure/azure-storage-azcopy/issues/1794): Moved log-level to root.go so log-level arguments do not get ignored. -3. Fixed [issue 1824](https://github.com/Azure/azure-storage-azcopy/issues/1824): Avoid creating .azcopy under HOME if plan/log location is specified elsewhere. -4. Fixed [issue 1830](https://github.com/Azure/azure-storage-azcopy/issues/1830), [issue 1412](https://github.com/Azure/azure-storage-azcopy/issues/1418), and [issue 873](https://github.com/Azure/azure-storage-azcopy/issues/873): Improved error message for when AzCopy cannot determine if source is directory. -5. Fixed [issue 1777](https://github.com/Azure/azure-storage-azcopy/issues/1777): Fixed job list to handle respective output-type correctly. -6. Fixed win64 alignment issue. - -## Version 10.15.0 - -### New features - -1. Added support for OAuth forwarding when performing Blob -> Blob copy. -2. Allow users to dynamically change the bandwidth cap via messages through the STDIN. -3. GCS -> Blob is now GA. -4. Enable MinIO(S3) logs in DEBUG mode. -6. Upgraded Go version to 1.17.9. - -### Bug fixes -1. Resolved alignment of atomicSuccessfulBytesInActiveFiles. -2. Fixed issue where last-write-time was still getting persisted even when --preserve-smb-info is false. -3. Fixed issue where concurrency was always AUTO for Azure Files despite explicit override. -4. Removed outdated load command following the deprecation of the cflsload package. - -## Version 10.14.1 - -### Bug fixes -1. Fixed issue #1625 where a panic occurs during sync scanning. -2. Fixed remove issue when account has versioning enabled. - -## Version 10.14.0 - -### New features -1. Feature to [permanently delete](https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob#remarks) soft-deleted - snapshots/versions of the blobs has been added (preview). `--permanent-delete=none/snapshots/version/snapshotsandversions`. -2. Feature to preserve properties and ACLs when copying to Azure file share root directory. -3. Pin all APIs to use the default service version `2020-04-08` and let users decide the service version via - `AZCOPY_DEFAULT_SERVICE_API_VERSION` environment variable. Previously, few APIs were not respecting the `AZCOPY_DEFAULT_SERVICE_API_VERSION` environment variable. - -### Bug fixes -1. Fixed issue in which AzCopy failed to copy to classic blob container with `preserve blob access tier`. -2. Fixed [issue 1630](https://github.com/Azure/azure-storage-azcopy/issues/1630) : AzCopy created extra empty - directories at destination while performing S2S transfer from one ADLS Gen2 account to another ADLS Gen2 account. -3. Changed the way AzCopy was using to obtain and set ACLs to ensure accuracy. -4. Clarify error message for `azcopy sync` when source or destination cannot be detected. -5. Report error when client provided key(CPK) encryption is applied to DFS endpoint. -6. Fixed [issue 1596](https://github.com/Azure/azure-storage-azcopy/issues/1596) : AzCopy failed to transfer files - (with '/.' in their path) from AWS S3 to Azure blob storage. -7. Fixed [issue 1474](https://github.com/Azure/azure-storage-azcopy/issues/1474) : AzCopy panicked when trying to re-create an already open plan file. -8. Improved handling of Auth error against single file. -9. Fixed [issue 1640](https://github.com/Azure/azure-storage-azcopy/issues/1640) : Recursive copy from GCS bucket to Azure container failed - with `FileIgnored` error when using `--exclude-path`. -10. Fixed [issue 1655](https://github.com/Azure/azure-storage-azcopy/issues/1655) : AzCopy panicked when using `--include-before` flag. -11. Fixed [issue 1609](https://github.com/Azure/azure-storage-azcopy/issues/1609) : `blockid` converted to lower case in AzCopy logs. -12. Fixed [issue 1643](https://github.com/Azure/azure-storage-azcopy/issues/1643), [issue 1661](https://github.com/Azure/azure-storage-azcopy/issues/1661) : Updated Golang version to `1.16.10` to fix security vulnerabilities in Golang packages. - -## Version 10.13.0 - -### New features -1. Added Arc VM support for authorization via managed identity. -2. Widen managed disk scenario to all md- accounts instead of just md-impexp- accounts. -3. The concurrency is now set to AUTO for Azure Files by default to avoid throttling. -4. Decrease the number of create directory calls for Azure Files to avoid throttling. -5. Added the from-to flag for sync. - -## Bug fixes -1. Fixed the memory usage issue with generating the list of skipped/failed transfers in JSON output. -2. Fixed ADLS Gen2 ACL copying where intermediate folders were missed. -3. Fixed the S3 to Blob scenario using the login command. -4. Fixed dry-run for dfs endpoints. -5. Fixed incorrect percentage-done shown while resuming job. -6. Fixed login issues on the ARM platforms. -7. Fixed incorrect progress status for the sync command. -8. Fixed concurrency map access problem for folder creation tracker. -9. Fixed resuming with a public source. - -## Version 10.12.2 - -## Bug fixes -1. Fix deleting blobs that are of a different type than the specified copy -2. Fix --delete-destination on Windows download - -## Version 10.12.1 - -### Bug fixes -1. Fixed the problem of always receiving overwrite prompt on azure files folders. - -## Version 10.12.0 - -### Bug fixes -1. Fixed the problem of always receiving overwrite prompt on azure files folders. - -## Version 10.12.0 - -### New features -1. Added support for include and exclude regex flags, which allow pattern matching on the entire paths. -2. Added dry run mode for copy, remove, and sync. This feature allows the user to visualize the changes before committing them. -3. For SMB aware locations, preserve-smb-info flag is now true by default. -4. Improved how folder lmts are obtained to allow time-based filters for folders. -5. Added support for ACL copying between HNS enabled accounts. The preserve-smb-permissions flag is now deprecated and has been renamed to preserve-permissions. - -### Bug fixes -1. Allow from-to to be set for the remove command. -2. Fixed the problem where resume command did not honor AZCOPY_DEFAULT_SERVICE_API_VERSION. -3. Fixed the new version check. -4. Fixed sync issue on Windows where paths are case-insensitive. -5. Added prompt for invalid characters when importing from S3. -6. Fixed bug where public S3 buckets cannot be listed. -7. Sanitize SAS tokens in JSON output for skipped and failed transfers. -8. Improved folder property preservation across resumes. - -## Version 10.11.0 - -### New features -1. Improved performance for copying small blobs (with size less than `256MiB`) with [Put Blob from URL](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url). -1. Added mirror mode support in sync operation via `mirror-mode` flag. The new mode disables last-modified-time based comparisons and overwrites the conflicting files and blobs at the destination if this flag is set to true. -1. Added flag `disable-auto-decoding` to avoid automatic decoding of URL-encoded illegal characters when uploading from Windows. These illegal characters could have encoded as a result of downloading them onto Windows which does not support them. -1. Support custom mime type mapping via environment variable `AZCOPY_CONTENT_TYPE_MAP`. -1. Output message on the CLI when AzCopy detects a proxy for each domain. -1. Interpret DFS endpoints as Blob endpoint automatically when performing service-to-service copy. - -### Bug fixes -1. Tolerate enumeration errors for Azure Files and not fail the entire job when a directory is deleted/modified during scanning. -1. Log skipped transfers to the scanning log. -1. Fixed pipe upload by adding missing fields such as Metadata, Blob Index Tags, Client Provided Key, Blob Access Tier, etc. -1. Fixed issue of clean up for the benchmark command. - -## Version 10.10.0 - -### New features -1. Support sync for Local/Blob <-> Azure File. -1. Download to temporary file path (.azDownload-[jobID]-[name]) before renaming to the original path. -1. Support CPK by name and CPK by value. -1. Offer knob to disable application logging (Syslog/Windows Event Log). -1. Trust zonal DNS suffix for OAuth by default. -1. Added include-directory-stub flag for the copy command, to allow copying of blobs with metadata of `hdi_isfolder:true`. -1. Display more fields for the list command, please refer to the help message for example. -1. Provide environment variable to set request try timeout, to allow faster retries. - -### Bug fixes -1. Improve job progress updating mechanism to improve scalability for larger jobs. -1. Time limit the container creation step, to avoid hanging and improve UX. -1. Set SMB info/permission again after file upload and copy, to fully preserve the integrity of the permission string and last-write-time. -1. Fixed module import problem for V10. - -## Version 10.9.0 - -### New features -1. Added preview support for importing from GCP Storage to Azure Block Blobs. -1. Added scanning logs which have low output by default but can become verbose if desired to help in debugging. -1. Support preservation of tags when copying blobs. -1. Added last modified time info to the list command. - -### Bug fixes -1. Removed unexpected conflict prompt for file share folders with special characters in the name, such as ";". - -## Version 10.8.0 - -### New features -1. Added option to [disable parallel blob listing](https://github.com/Azure/azure-storage-azcopy/pull/1263) -1. Added support for uploading [large files](https://github.com/Azure/azure-storage-azcopy/pull/1254/files) up to 4TiB. Please refer the [public documentation](https://docs.microsoft.com/en-us/rest/api/storageservices/create-file) for more information -1. Added support for `include-before`flag. Refer [this](https://github.com/Azure/azure-storage-azcopy/issues/1075) for more information - -### Bug fixes - -1. Fixed issue [#1246](https://github.com/Azure/azure-storage-azcopy/issues/1246) of security vulnerability in x/text package -1. Fixed issue [share snapshot->share copy](https://github.com/Azure/azure-storage-azcopy/pull/1258) with smb permissions - -## Version 10.7.0 - -### New features -1. Added support for auto-login when performing data commands(copy/sync/list/make/remove). Please refer to our documentation for more info. -1. Added ``blob-tags`` flag for setting [blob index tags](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-index-how-to?tabs=azure-portal) when performing copy command. Please note that we support setting blob tags only when tags are explicitly specified. Refer to the [public documentations](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks) to know more. - -### Bug fixes - -1. Fixed issue [#1139](https://github.com/Azure/azure-storage-azcopy/issues/1139) to preserve content-type in service-to-service transfer. -1. Fixed issue to allow snapshot restoring. -1. Fixed issue with setting content-type of an empty file when performing copy command. - -### Improvements -1. Added support for setting tier directly at the time of [upload](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks) API call instead of performing a separate [set tier](https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier) API call. - -## Version 10.6.1 - -### Bug fixes - -1. Fix issue [#971](https://github.com/Azure/azure-storage-azcopy/issues/971) with scanning directories on a public container -1. Fix issue with piping where source and destinations were reversed -1. Allow piping to use OAuth login -1. Fix issue where transfers with ``overwrite`` flag set to ``IfSourceNewer`` would work incorrectly -1. Fix issue [#1139](https://github.com/Azure/azure-storage-azcopy/issues/1139), incorrect content type in BlobStorage -1. Issue [#1192](https://github.com/Azure/azure-storage-azcopy/issues/1192), intermittent panic when AzCopy job is abort -1. Fix issue with auto-detected content types for 0 length files - -## Version 10.6.0 - -### New features - -1. ``azcopy sync`` now supports the persistence of ACLs between supported resources (Azure Files) using the ``--preserve-smb-permissions`` flag. -1. ``azcopy sync`` now supports the persistence of SMB property info between supported resources (Azure Files) using the ``--preserve-smb-info`` flag. The information that can be preserved is Created Time, Last Write Time and Attributes (e.g. Read Only). -1. Added support for [higher block & blob size](https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#remarks) - - For service version ``2019-12-12`` or higher, the block size can now be less than or equal to ``4000 MiB``. The maximum size of a block blob therefore can be ``190.7 TiB (4000 MiB X 50,000 blocks)`` -1. Added support for [Blob Versioning](https://docs.microsoft.com/en-us/azure/storage/blobs/versioning-overview) - - Added ``list-of-versions`` flag (specifies a file where each version id is listed on a separate line) to download/delete versions of a blob from Azure Storage. - - Download/Delete a version of blob by directly specifying its version id in the source blob URL. - -### Bug fixes - -1. Logging input command at ERROR level. - -## Version 10.5.1 - -### New features - -- Allow more accurate values for job status in `jobs` commands, e.g. completed with failed or skipped transfers. - -### Bug fixes - -- Fixed issue with removing blobs with hdi_isfolder=true metadata when the list-of-files flag is used. -- Manually unfurl symbolic links to fix long file path issue on UNC locations. - - -## Version 10.5.0 - -### New features - -1. Improved scanning performance for most cases by adding support for parallel local and Blob enumeration. -1. Added download support for the benchmark command. -1. A new way to quickly copy only files changed after a certain date/time. The `copy` command now accepts -the parameter `--include-after`. It takes an ISO 8601-formatted date, and will copy only those files that were -changed on or after the given date/time. When processing large numbers of files, this is faster than `sync` or -`--overwrite=IfSourceNewer`. But it does require the user to specify the date to be used. E.g. `2020-08-19T15:04:00Z` -for a UTC time, `2020-08-19T15:04` for a time in the local timezone of the machine running Azcopy, -or `2020-08-19` for midnight (00:00), also in the local timezone. -1. When detecting content type for common static website files, use the commonly correct values instead of looking them up in the registry. -1. Allow the remove command to delete blob directory stubs which have metadata hdi_isfolder=true. -1. The S3 to Blob feature now has GA support. -1. Added support for load command on Linux based on Microsoft Avere's CLFSLoad extension. -1. Each job now logs its start time precisely in the log file, using ISO 8601 format. This is useful if you want to -use that start date as the `--include-after` parameter to a later job on the same directory. Look for "ISO 8601 START TIME" -in the log. -1. Stop treating zero-item job as failure, to improve the user experience. -1. Improved the naming of files being generated in benchmark command, by reversing the digits. -Doing so allows the names to not be an alphabetic series, which used to negatively impact the performance on the service side. -1. Azcopy can now detect when setting a blob tier would be impossible. If azcopy cannot check the destination account type, a new transfer failure status will be set: `TierAvailabilityCheckFailure` - -### Bug fixes - -1. Fixed the persistence of last-write-time (as part of SMB info when uploading) for Azure Files. It was using the creation time erroneously. -1. Fixed the SAS timestamp parsing issue. -1. Transfers to the File Service with a read-only SAS were failing because we try listing properties for the parent directories. -The user experience is improved by ignoring this benign error and try creating parent directories directly. -1. Fixed issue with mixed SAS and AD authentication in the sync command. -1. Fixed file creation error on Linux when decompression is turned on. -1. Fixed issue on Windows for files with extended charset such as [%00 - %19, %0A-%0F, %1A-%1F]. -1. Enabled recovering from unexpectedEOF error. -1. Fixed issue in which attribute filters does not work if source path contains an asterisk in it. -1. Fixed issue of unexpected upload destination when uploading a whole drive in Windows (e.g. "D:\"). - - -## Version 10.4.3 - -### Bug fixes - -1. Fixed bug where AzCopy errored if a filename ended with slash character. (E.g. backslash at end of a Linux filename.) - -## Version 10.4.2 - -### Bug fixes - -1. Fixed bug in overwrite prompt for folders. - -## Version 10.4.1 - -### New features - -1. Added overwrite prompt support for folder property transfers. -1. Perform proxy lookup when the source is S3. - -### Bug fixes - -1. When downloading from Azure Files to Windows with the `--preserve-smb-permissions` flag, sometimes -the resulting permissions were not correct. This was fixed by limiting the concurrent SetNamedSecurityInfo operations. -1. Added check to avoid overwriting the file itself when performing copy operations. - -## Version 10.4 - -### New features - -1. `azcopy copy` now supports the persistence of ACLs between supported resources (Windows and Azure Files) using the `--persist-smb-permissions` flag. -1. `azcopy copy` now supports the persistence of SMB property info between supported resources (Windows and Azure Files) -using the `--persist-smb-info` flag. The information that can be preserved is Created Time, Last Write Time and Attributes (e.g. Read Only). -1. AzCopy can now transfer empty folders, and also transfer the properties of folders. This applies when both the source -and destination support real folders (Blob Storage does not, because it only supports virtual folders). -1. On Windows, AzCopy can now activate the special privileges `SeBackupPrivilege` and `SeRestorePrivilege`. Most admin-level -accounts have these privileges in a deactivated state, as do all members of the "Backup Operators" security group. -If you run AzCopy as one of those users -and supply the new flag `--backup`, AzCopy will activate the privileges. (Use an elevated command prompt, if running as Admin). -At upload time, this allows AzCopy to read files -which you wouldn't otherwise have permission to see. At download time, it works with the `--preserve-smb-permissions` flag -to allow preservation of permissions where the Owner is not the user running AzCopy. The `--backup` flag will report a failure -if the privileges cannot be activated. -1. Status output from AzCopy `copy`, `sync`, `jobs list`, and `jobs status` now contains information about folders. - This includes new properties in the JSON output of copy, sync, list and jobs status commands, when `--output-type - json` is used. -1. Empty folders are deleted when using `azcopy rm` on Azure Files. -1. Snapshots of Azure File Shares are supported, for read-only access, in `copy`,`sync` and `list`. To use, add a - `sharesnapshot` parameter at end of URL for your Azure Files source. Remember to separate it from the existing query - string parameters (i.e. the SAS token) with a `&`. E.g. - `https://.file.core.windows.net/sharename?st=2020-03-03T20%3A53%3A48Z&se=2020-03-04T20%3A53%3A48Z&sp=rl&sv=2018-03-28&sr=s&sig=REDACTED&sharesnapshot=2020-03-03T20%3A24%3A13.0000000Z` -1. Benchmark mode is now supported for Azure Files and ADLS Gen 2 (in addition to the existing benchmark support for - Blob Storage). -1. A special performance optimization is introduced, but only for NON-recursive cases in this release. An `--include-pattern` that contains only `*` wildcards will be performance optimized when - querying blob storage without the recursive flag. The section before the first `*` will be used as a server-side prefix, to filter the search results more efficiently. E.g. `--include-pattern abc*` will be implemented -as a prefix search for "abc". In a more complex example, `--include-pattern abc*123`, will be implemented as a prefix search for `abc`, followed by normal filtering for all matches of `abc*123`. To non-recursively process blobs -contained directly in a container or virtual directory include `/*` at the end of the URL (before the query string). E.g. `http://account.blob.core.windows.net/container/*?`. -1. The `--cap-mbps` parameter now parses floating-point numbers. This will allow you to limit your maximum throughput to a fraction of a megabit per second. - -### Special notes - -1. A more user-friendly error message is returned when an unknown source/destination combination is supplied -1. AzCopy has upgraded to service revision `2019-02-02`. Users targeting local emulators, Azure Stack, or other private/special - instances of Azure Storage may need to intentionally downgrade their service revision using the environment variable - `AZCOPY_DEFAULT_SERVICE_API_VERSION`. Prior to this release, the default service revision was `2018-03-28`. -1. For Azure Files to Azure Files transfers, --persist-smb-permissions and --persist-smb-info are available on all OS's. -(But for for uploads and downloads, those flags are only available on Windows.) -1. AzCopy now includes a list of trusted domain suffixes for Azure Active Directory (AAD) authentication. - After `azcopy login`, the resulting token will only be sent to locations that appear in the list. The list is: - `*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net`. - If necessary, you can add to the the list with the command-line flag: `--trusted-microsoft-suffixes`. For security, - you should only add Microsoft Azure domains. -1. When transferring over a million files, AzCopy will reduces its progress reporting frequency from every 2 seconds to every 2 minutes. - -### Breaking changes - -1. To accommodate interfacing with JavaScript programs (and other languages that have similar issue with number precision), - all the numbers in the JSON output have been converted to strings (i.e. with quotes around them). -1. The TransferStatus value `SkippedFileAlreadyExists` has been renamed `SkippedEntityExists` and may now be used both - for when files are skipped and for when the setting of folder properties is skipped. This affects the input and - output of `azcopy jobs show` and the status values shown in the JSON output format from `copy` and `sync`. -1. The format and content of authentication information messages, in the JSON output format, e.g. - "Using OAuth token for authentication" has been changed. - -### Bug fixes - -1. AzCopy can now overwrite even Read-Only and Hidden files when downloading to Windows. (The read-only case requires the use of - the new `--force-if-read-only` flag.) -1. Fixed a nil dereference when a prefetching error occurs in a upload -1. Fixed a nil dereference when attempting to close a log file while log-level is none -1. AzCopy's scanning of Azure Files sources, for download or Service to Service transfers, is now much faster. -1. Sources and destinations that are identified by their IPv4 address can now be used. This enables usage with storage - emulators. Note that the `from-to` flag is typically needed when using such sources or destinations. E.g. `--from-to - BlobLocal` if downloading from a blob storage emulator to local disk. -1. Filenames containing the character `:` can now safely be downloaded on Windows and uploaded to Azure Files -1. Objects with names containing `+` can now safely be used in imported S3 object names -1. The `check-length` flag is now exposed in benchmark mode, so that length checking can be turned off for more speed, - when benchmarking with small file sizes. (When using large file sizes, the overhead of the length check is - insignificant.) -1. The in-app documentation for Service Principal Authentication has been corrected, to now include the application-id - parameter. -1. ALL filter types are now disallowed when running `azcopy rm` against ADLS Gen2 endpoints. Previously -include/exclude patterns were disallowed, but exclude-path was not. That was incorrect. All should have been -disallowed because none (other than include-path) are respected. -1. Fixed empty page range optimization when uploading Managed Disks. In an edge case, there was previously a risk of data corruption if the user uploaded two different images into the same Managed Disk resource one after the other. - -## Version 10.3.4 - -### New features - -1. Fixed feature parity issue by adding support for "ifSourceNewer" option on the `overwrite` flag. It serves as a replacement of the '\XO' flag in V8. - -### Bug fixes - -1. Fixed `jobs clean` command on Windows which was previously crashing when the `with-status` flag was used. - -## Version 10.3.3 - -### New features - -1. `azcopy list` is now supported on Azure Files and ADLS Gen 2, in addition to Blob Storage. -1. The `--exclude-path` flag is now supported in the `sync` command. -1. Added new environment variable `AZCOPY_USER_AGENT_PREFIX` to allow a prefix to be appended to the user agent strings. - -### Bug fixes - -1. Content properties (such as Content-Encoding and Cache-Control) are now included when syncing Blob -> Blob and Azure - Files -> Azure Files -1. Custom metadata is now included when syncing Blob -> Blob and Azure Files -> Azure Files -1. The `azcopy list` command no longer repeats parts of its output. (Previously it would sometimes repeat itself and show the same blob multiple times in the output.) -1. The `--aad-endpoint` parameter is now visible, instead of hidden. It allows use of Azure Active Directory - authentication in national clouds (e.g. Azure China). -1. On Windows, AzCopy now caches information about which proxy server should be used, instead of looking it up every - time. This significantly reduces CPU - usage when transferring many small files. It also solves a rare bug when transfers got permanently "stuck" with - one uncompleted file. -1. When uploading to a write-only destination, there is now a clearer error message when the built-in file length check - fails. The message says how to fix the problem using `--check-length=false`. -1. Size checks on managed disk imports are now clearer, and all run at the start of the import process instead of the end. - -## Version 10.3.2 - -### Bug fixes - -1. Jobs could not be cancelled while scanning was still in progress. -1. Downloading large managed disks (8 TB and above) failed with errors. -1. Downloading large page blobs might make no progress for the first 15 or 20 minutes. -1. There was a rare error where the final output could under-report the total number of files in the job. That error has been fixed. -1. When using JSON output mode, the output from the rm command on ADLS Gen2 was inconsistent with the output from other commands -1. After authentication errors, files in progress were not cleaned up (deleted) at the destination. If there was an - authentication failure during a job (e.g. a SAS token expired while in use) this could result in files being left - behind that had incomplete contents (even though their size looked correct). -1. The AUTO concurrency option, for automatically tuning concurrency as AzCopy runs, started working too late if scanning (aka enumeration) took a long time. This resulted in reduced throughput when using this setting. -1. It was not possible to access the root of Windows drives with lowercase drive letters. E.g. d:\ -1. Service to Service transfers would fail when using environment variable to specify OAuth authentication. -1. Certain errors parsing URLs were not reported clearly. -1. When downloading to NUL (/dev/null on Linux), files of zero length no longer trigger errors. (Downloads to NUL can be used in performance testing and bulk MD5 checking. - -## Version 10.3.1 - -### New features - -1. Added helpful deprecation notice for legacy include/exclude flags. -1. Added back request ID at log level INFO. -1. Added back cancel-from-stdin option for partner integration. -1. Added flag to define delete snapshot options for the remove command. - -### Bug fix - -1. Fixed race condition in shutdown of decompressingWriter. -1. Made progress reporting more accurate. - -## Version 10.3.0 - -### Breaking changes - -1. The `*` character is no longer supported as a wildcard in URLs, except for the two exceptions - noted below. It remains supported in local file paths. - 1. The first exception is that `/*` is still allowed at the very end of the "path" section of a - URL. This is illustrated by the difference between these two source URLs: - `https://account/container/virtual?SAS` and - `https://account/container/virtualDir/*?SAS`. The former copies the virtual directory - `virtualDir` by creating a folder of that name at the destination. The latter copies the - _contents_ of `virtual` dir directly into the target without creating a folder named - "virtualDir".' - 1. The second exception is when you are transferring multiple _whole_ containers (or S3 buckets). You can - use * as a wildcard in the container or bucket name. -1. The `--include` and `--exclude` parameters have been replaced by `--include-pattern` and - `--exclude-pattern` (for filenames) and `--include-path` and `--exclude-path` (for paths, - including directory and filenames). - The new parameters have behaviour that is better defined in complex situations (such as - recursion). The `*` wildcard is supported in the pattern parameters, but _not_ in the path ones. -1. There have been two breaking changes to the JSON output that is produced if you request - JSON-formatted output. The `sync` command's output in JSON has changed for consistency reasons, - and the final message type, for `copy` and `sync` has changed its name from `Exit` to `EndOfJob`. - Tools using the JSON output format to integrate AzCopy should be aware. -1. If downloading to "null" on Windows the target must now be named "NUL", according to standard - Windows conventions. "/dev/null" remains correct on Linux. (This feature can be used to test - throughput or check MD5s without saving the downloaded data.) -1. The file format of the (still undocumented) `--list-of-files` parameter is changed. (It remains - undocmented because, for simplicity, users are - encouraged to use the new `--include-pattern` and `--include-path` parameters instead.) - -### New features - -1. `sync` is supported from Blob Storage to Blob Storage, and from Azure Files to Azure Files. -1. `copy` is supported from Azure Files to Azure Files, and from Blob Storage to Azure Files. -1. Percent complete is displayed as each job runs. -1. VHD files are auto-detected as page blobs. -1. A new benchmark mode allows quick and easy performance benchmarking of your network connection to - Blob Storage. Run AzCopy with the parameters `bench --help` for details. This feature is in - Preview status. -1. The location for AzCopy's "plan" files can be specified with the environment variable - `AZCOPY_JOB_PLAN_LOCATION`. (If you move the plan files and also move the log files using the existing - `AZCOPY_LOG_LOCATION`, then AzCopy will not store anything under your home directory on Linux and - MacOS. On Windows AzCopy will keep just one small encrypted file under `c:\users\\.azcopy`) -1. Log files and plan files can be cleaned up to save disk space, using AzCopy's new `jobs rm` and - `jobs clean` commands. -1. When listing jobs with `jobs show`, the status of each job is included in the output. -1. The `--overwrite` parameter now supports the value of "prompt" to prompt the user on a - file-by-file basis. (The old values of true and false are also supported.) -1. The environment variable `AZCOPY_CONCURRENCY_VALUE` can now be set to "AUTO". This is expected to be - useful for customers with small networks, or those running AzCopy on - moderately-powered machines and transfer blobs between accounts. This feature is in preview status. -1. When uploading from Windows, files can be filtered by Windows-specific file attributes (such as - "Archive", "Hidden" etc) -1. Memory usage can be controlled by setting the new environment variable `AZCOPY_BUFFER_GB`. - Decimal values are supported. Actual usage will be the value specified, plus some overhead. -1. An extra integrity check has been added: the length of the - completed destination file is checked against that of the source. -1. When downloading, AzCopy can automatically decompress blobs (or Azure Files) that have a - `Content-Encoding` of `gzip` or `deflate`. To enable this behaviour, supply the `--decompress` - parameter. -1. The number of disk files accessed concurrently can be controlled with the new - `AZCOPY_CONCURRENT_FILES` environment variable. This is an advanced setting, which generally - should not be modified. It does not affect the number of HTTP connections, which is still - controlled by `AZCOPY_CONCURRENCY_VALUE`. -1. The values of key environment variables are listed at the start of the log file. -1. An official Windows 32-bit build is now released, in addition to the usual 64-bit builds for - Linux, Mac and Windows. -1. If you need to refer a literal `*` in the name of a blob or Azure Files file, e.g. for a blob - named "\*", escape the `*` using standard URL escaping. To do this, replace the `*` with the following - character sequence: %2A - -### Bug fixes - -1. When an AzCopy job is cancelled with CTRL-C, any partially-updated files are now deleted from - the destination. Previous releases of AzCopy v10 would just immediately exit, leaving destination files - potentially containing an unknown mix of old and new data. E.g. if uploading a new version of a file - over top of an old version, cancellation could result in the file being left with some parts - containing old data, and some containing new data. This issue affected downloads to local disk and - uploads to Azure Files, ADLS Gen 2, page blobs and append blobs. The bug did not affect transfers to block - blobs. -1. If a transfer to a brand-new block blob is cancelled before it completes, the uncommitted blocks are now cleaned up - immediately. Previous versions would leave them, for automatic garbage collection to delete 7 days later. -1. Long pathnames (over 260 characters) are now supported everywhere on Windows, including on UNC - shares. -1. Safety is improved in the rare cases where two source files correspond to just one destination file. This can happen - when transferring to a case-insensitive destination, when the new `--decompress` flag removes an extension but - there's already a file without the extension, and in very rare cases related to escaping of filenames with illegal - characters. The bug fix ensures that the single resulting file contains data from only _one_ of the source files. -1. When supplying a `--content-type` on the command line it's no longer necessary to also specify - `--no-guess-mime-type`. -1. There is now no hard-coded limit on the number of files that can be processed by the `sync` - command. The number that can be processed (without paging of memory to disk) depends only on the - amount of RAM available. -1. Transfer of sparse page blobs has been improved, so that for many sparse page blobs only the - populated pages will transferred. The one exception is blobs which have had a very high number - of updates, but which still have significant sparse sections. Those blobs may not be - transferred optimally in this release. Handling of such blobs will be improved in a future release. -1. Accessing root of drive (e.g. `d:\`) no longer causes an error. -1. On slow networks, there are no longer excessive log messages sent to the Event Log (Windows) and - SysLog (Linux). -1. If AzCopy can't check whether it's up to date, it will no longer hang. (Previously, it could hang - if its version check URL, https://aka.ms/azcopyv10-version-metadata, was unreachable due to - network routing restrictions.) -1. High concurrency values are supported (e.g. over 1000 connections). While these values are seldom - needed, they are occasionally useful - e.g. for service-to-service transfer of files around 1 MB - in size. -1. Files skipped due to "overwrite=false" are no longer logged as "failed". -1. Logging is more concise at the default log level. -1. Error message text, returned by Blob and File services, is now included in the log. -1. A log file is created for copy jobs even when there was nothing to copy. -1. In the log, UPLOAD SUCCESSFUL messages now include the name of the successful file. -1. Clear error messages are given to show that AzCopy does not currently support Customer-Provided - Encryption Keys. -1. On Windows, downloading a filename with characters not supported by the operating system will - result in those characters being URL-encoded to construct a Windows-compatible filename. The - encoding process is reversed if the file is uploaded. -1. Uploading a single file to ADLS Gen 2 works now. -1. The `remove` command no longer hangs when removing blobs that have snapshots. Instead it will fail to - delete them, and report the failures clearly. -1. Jobs downloading from ADLS Gen 2 that result in no scheduled transfers will no longer hang. - - -## Version 10.2.1 - -### Bug fix - -1. Fixed outputting error message for SPN login failures. - -## Version 10.2.0 - -### Bug fix - -1. Security: fixed signature redaction in logs to include all error types: the log entries for network failures and HTTP errors could include SAS tokens. In previous releases, the SAS tokens were not always redacted correctly and could be written to the AzCopy log file and also to the Windows Event Log or the Linux Syslog. Now, SAS tokens are correctly redacted when logging those errors. Note that errors returned by the Storage service itself - such as authentication errors and bad container names – were already redacted correctly. -1. Added error to using Azure Files without a SAS token (invalid auth configuration). -1. AzCopy v10 now outputs a sensible error & warning when attempting to authenticate a storage account business-to-business. -1. `--log-level=none` now drops no logs, and has a listing in `--help`. -1. Fixed bug where piping was not picking up the service version override, making it not work well against Azure Stack. -1. Fixed a timeout when uploading particularly large files to ADLSG2. -1. Fixed single wildcard match uploads. - -### New features - -1. Enabled copying from page/block/append blob to another blob of a different type. -1. AzCopy now grabs proxy details (sans authentication) from the Windows Registry using `mattn/go-ieproxy`. -1. Service Principal Authentication is now available under `azcopy login`-- check `azcopy env` for details on client secrets/cert passwords. -1. SAS tokens are supported on HNS (Hierarchical Namespace/Azure Data Lake Generation 2) Storage Accounts. -1. Added support for custom headers on ADLS Gen 2. -1. Added support for fractional block size for copy and sync. -1. Use different log output for skipped files (so they don't look like failures). -1. Added bandwidth cap (--cap-mbps) to limit AzCopy's network usage, check `azcopy cp -h` for details. -1. Added ADLS Gen2 support for rm command. - -## Version 10.1.2 - -### Breaking change - -1. Jobs created with earlier releases cannot be resumed with this release. We recommend -you update to this release only when you have no partially-completed jobs that you want to resume. - -### Bug fix - -1. Files with `Content-Encoding: gzip` are now downloaded in compressed form. Previous versions tried to save a - decompressed version of the file. But they incorrectly truncated it at the original _compressed_ length, so the - downloaded file was not complete. - - By changing AzCopy to save the compressed version, that problem is solved, and Content-MD5 checks now work for such files. (It is - assumed that the Content-MD5 hash is the hash of the _compressed_ file.) - -### New features - -1. Headers for Content-Disposition, Content-Language and Cache-Control can now be set when uploading -files to Blob Storage and to Azure Files. Run `azcopy copy --help` to see command line parameter -information, including those needed to set the new headers. -1. On-screen job summary is output to the log file at end of job, so that the log will include those summary statistics. - -## Version 10.1.1 - -### Bug fixes - -1. Fixed typo in local traverser (error handling in walk). -1. Fixed memory alignment issue for atomic functions on 32 bit system. - -## Version 10.1.0 (GA) - -### Breaking changes - -1. The `--block-size` parameter has been replaced by `--block-size-mb`. The old parameter took a number of _bytes_; the - new one takes a number of Megabytes (MiB). -1. The following command line parameters have been renamed, for clarity - * `--output` is now `--output-type` - * `--md5-validation` is now called `--check-md5` - * `--s2s-source-change-validation` is now called `--s2s-detect-source-changed` - * `--s2s-invalid-metadata-handle` is is now called `--s2s-handle-invalid-metadata` - * `--quota` (in the `make` command) is now called `--quota-gb`. Note that the values were always in GB, the new name - simply clarifies that fact - -### New features - -1. AzCopy is now able to be configured to use older API versions. This enables (limited) support for Azure Stack. -1. Listing command now shows file sizes. - -### Bug fixes - -1. AzCopy v10 now works correctly with ADLS Gen 2 folders that contain spaces in their names. -1. When cancelling with CRTL-C, status of in-progress transfers is now correctly recorded. -1. For security, the Service-to-Service (S2S) feature will only work if both the source and destination connections are - HTTPS. -1. Use of the `--overwrite` parameter is clarified in the in-application help. -1. Fixed incorrect behavior with setting file descriptor limits on platforms including OS X and BSD. -1. On Linux and OS X, log files are now created with same file permissions as all other files created by AzCopy. -1. ThirdPartyNotice.txt is updated. -1. Load DLL in a more secure manner compatible with Go's sysdll registration. -1. Fixed support for relative paths and shorthands. -1. Fixed bug in pattern matching for blob download when recursive is off. - -## Version 10.0.9 (Release Candidate) - -### Breaking changes - -1. For creating MD5 hashes when uploading, version 10.x now has the OPPOSITE default to version - AzCopy 8.x. Specifically, as of version 10.0.9, MD5 hashes are NOT created by default. To create - Content-MD5 hashes when uploading, you must now specify `--put-md5` on the command line. - -### New features - -1. Can migrate data directly from Amazon Web Services (AWS). In this high-performance data path - the data is read directly from AWS by the Azure Storage service. It does not need to pass through - the machine running AzCopy. The copy happens synchronously, so you can see its exact progress. -1. Can migrate data directly from Azure Files or Azure Blobs (any blob type) to Azure Blobs (any - blob type). In this high-performance data path the data is read directly from the source by the - Azure Storage service. It does not need to pass through the machine running AzCopy. The copy - happens synchronously, so you can see its exact progress. -1. Sync command prompts with 4 options about deleting unneeded files from the target: Yes, No, All or - None. (Deletion only happens if the `--delete-destination` flag is specified). -1. Can download to /dev/null. This throws the data away - but is useful for testing raw network - performance unconstrained by disk; and also for validating MD5 hashes in bulk (when run in a cloud - VM in the same region as the Storage account) - -### Bug fixes - -1. Fixed memory leak when downloading large files -1. Fixed performance when downloading a single large file -1. Fixed bug with "too many open files" on Linux -1. Fixed memory leak when uploading sparse files (files with big blocks of zeros) to Page Blobs and - Azure Files. -1. Fixed issue where application crashed after being throttled by Azure Storage Service. (The - primary fix here is for Page Blobs, but a secondary part of the fix also helps with Block Blobs.) -1. Fixed functionality and usabilty issues with `remove` command -1. Improved performance for short-duration jobs (e.g. those lasting less than a minute) -1. Prevent unnecessary error message that sometimes appeared when cancelling a job -1. Various improvements to the online help and error messages. - - -## Version 10.0.8: - -1. Rewrote sync command to eliminate numerous bugs and improve usability (see wiki for details) -1. Implemented various improvements to memory management -1. Added MD5 validation support (available options: NoCheck, LogOnly, FailIfDifferent, FailIfDifferentOrMissing) -1. Added last modified time checks for source to guarantee transfer integrity -1. Formalized outputs in JSON and elevated the output flag to the root level -1. Eliminated outputs to STDERR (for new version notifications), which were causing problems for certain CI systems -1. Improved log format for Windows -1. Optimized plan file sizes -1. Improved command line parameter names as follows (to be consistent with naming pattern of other parameters): - 1. fromTo -> from-to - 1. blobType -> blob-type - 1. excludedBlobType -> excluded-blob-type - 1. outputRaw (in "list" command) -> output - 1. stdIn-enable (reserved for internal use) -> stdin-enable + +# Change Log + +## Version 10.28.0-Preview + +### Bug Fixes +1. Fixed an issue where AzCopy would not persist tokens when logging in via Device Code. ([#2361](https://github.com/Azure/azure-storage-azcopy/issues/2361)) + +## Version 10.27.0 + +### New Features +1. Added ability to specify concurrency of piped uploads via the AZCOPY_CONCURRENCY_VALUE environment variable. ([#2821](https://github.com/Azure/azure-storage-azcopy/pull/2821)) +2. Update message returned when transfer failure is related to tags permission issues. ([#2798](https://github.com/Azure/azure-storage-azcopy/issues/2798)) +3. Optimized number of calls to create directory. ([#2828](https://github.com/Azure/azure-storage-azcopy/pull/2828)) + +### Dependency updates +1. Golang 1.22.5 -> 1.23.1 +2. azidentity 1.7.0 -> 1.8.0 +3. azcore 1.13.0 -> 1.16.0 + +### Bug Fixes +1. Fixed an issue where piped downloads in Linux would append AzCopy version information if on an older version. ([#2774](https://github.com/Azure/azure-storage-azcopy/pull/2774)) +2. Fixed an issue where sync with --hash-meta-dir would cause original files to be hidden. ([#2758](https://github.com/Azure/azure-storage-azcopy/issues/2758)) +3. Fixed an issue where jobs list would result in a segmentation violation. ([#2714](https://github.com/Azure/azure-storage-azcopy/issues/2714)) +4. Fixed an issue where list for File would not allow OAuth. ([#2821](https://github.com/Azure/azure-storage-azcopy/pull/2821)) +5. Fixed an issue where folders would be handled improperly on FNS accounts. ([#2809](https://github.com/Azure/azure-storage-azcopy/pull/2809)) +6. Fixed an issue where --dry-run would sometimes panic due to closing a log file that was already closed. ([#2832](https://github.com/Azure/azure-storage-azcopy/pull/2832)) +7. Fixed an issue where sync --delete-destination-files was overwriting all destination files. ([#2818](https://github.com/Azure/azure-storage-azcopy/pull/2818)) +8. Fixed an issue where AzCopy would panic due to sending on an already closed channel. ([#2703](https://github.com/Azure/azure-storage-azcopy/issues/2703)) + +### Documentation +1. Updated in line text help message to say that source and destinations cannot be modified during transfers. ([#2826](https://github.com/Azure/azure-storage-azcopy/pull/2826)) + +## Version 10.26.0 + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +### New Features + +1. AzCopy now supports distribution through package managers for Red Hat Enterprise Linux (RHEL), Ubuntu, Mariner, Debian, SUSE, Rocky and CentOS. ([#2728](https://github.com/Azure/azure-storage-azcopy/pull/2728)) + +### Dependency updates + +1. Golang 1.22.4 -> 1.22.5 +2. azidentity 1.6.0 -> 1.7.0 + +### Bug Fixes + +1. Fixed an issue where AzCopy would fail to unmarshal the `_token_refresh_source` property correctly when performing copy jobs from OAuth-attached containers. ([#2710](https://github.com/Azure/azure-storage-azcopy/pull/2710)) +2. Fixed a CI pipeline in Azure DevOps to automatically detect CVEs declared against our dependencies. ([#2705](https://github.com/Azure/azure-storage-azcopy/pull/2705)) + +## Version 10.25.1 + +### Security fixes + +1. Updated Golang to 1.22.4 to address security vulnerabilities + +### Dependency updates + +1. Golang 1.22.3 -> 1.22.4 +2. azidentity 1.5.1 -> 1.6.0 + +### Bug Fixes + +1. Fixed a regression in `list` where `--output-type=text` would not output any information +2. Adjusted parsing of `AZCOPY_OAUTH_TOKEN_INFO` to support both enum names as a string and integers (for users that took dependency upon the accidental changes in 10.25) + +## Version 10.25.0 + +### Security fixes + +1. Updated Golang version to 1.22.3 to address security vulnerabilities + +### New Features + +1. Workload Identity authentication is now available (#2619) +2. `azcopy list` now supports a `--location` flag, to support ambiguous URIs (#2595) +3. `azcopy list` now properly supports `--output-type=json` for users in automated scenarios. (#2629) + +### Bug Fixes + +1. Fixed a bug where AzCopy was not reporting performance info in `-output-type=json` (#2636) +2. Fixed a bug AzCopy would crash when operating on extremely large (16.5+TB) managed disks (#2635) +3. Fixed a bug with hash-based sync where the directory structure would not be replicated when using `--local-hash-storage-mode=HiddenFiles` with `--hash-meta-dir` (#2611) +4. Fixed a bug where attempting to use a non-S3/GCP/Azure storage URI would result in treating the URI as a local path (#2652) + +### Documentation changes + +1. Updated inaccurate helptext and filled in missing helptext (#2649) +2. Many important errors now include a link to relevant documentation to assist users in troubleshooting AzCopy (#2647) +3. Ambiguous flags (such as `--cpk-by-value`) have improved clarity in documentation (#2615) +4. A clearer error message is provided when failing a transfer due to authorization. (#2644) +5. A special error has been created when performing an Azure Files to Azure Blob Storage transfer, indicating present lack of service-side support (#2616) + +## Version 10.25.0-Preview-1 + +### Security fixes + +1. Updated version of GoLang used to 1.21 to address security vulnerabilities. + +## Version 10.24.0 + +### New Features + +1. Print summary logs at lower log levels and add BytesTransferred to the output in the `jobs show` command. ([#1319](https://github.com/Azure/azure-storage-azcopy/issues/1319)) +2. Added a flag `--put-blob-size-mb` to `copy`, `sync` and `bench` commands to specify the maximum size of a blob to be uploaded using PutBlob. ([#2561](https://github.com/Azure/azure-storage-azcopy/pull/2561)) +3. Added support for latest put blob service limits. Block blob put blob size can now be set up to 5000MB. ([#2569](https://github.com/Azure/azure-storage-azcopy/pull/2569)) +4. Updated all SDK dependencies to their latest version. ([#2599](https://github.com/Azure/azure-storage-azcopy/pull/2599)) +5. Updated summary logs to use consistent wording across all commands. ([#2602](https://github.com/Azure/azure-storage-azcopy/pull/2602)) + +### Bug Fixes + +1. Fixed an issue where AzCopy would fail to auto login with the AZCOPY_AUTO_LOGIN_TYPE environment variable set to PSCRED on certain Linux and MacOS environments. ([#2491](https://github.com/Azure/azure-storage-azcopy/issues/2491))([#2555](https://github.com/Azure/azure-storage-azcopy/issues/2555)) +2. Fixed a bug where page blob download optimizer would behave incorrectly on highly fragemented blobs if the service times out. ([#2445](https://github.com/Azure/azure-storage-azcopy/issues/2445)) +3. Ignore 404 errors on retried deletes. ([#2554](https://github.com/Azure/azure-storage-azcopy/pull/2554)) +4. Fixed a bug where the `VersionID` property would not be honored on the `list` command. ([#2007](https://github.com/Azure/azure-storage-azcopy/issues/2007)) +5. Fixed a bug where ADLS Gen2 paths with encoded special characters would fail to transfer. ([#2549](https://github.com/Azure/azure-storage-azcopy/issues/2549)) +6. Fixed an issue where ACL copying would fail when specifying an ADLS Gen2 account with the blob endpoint. ([#2546](https://github.com/Azure/azure-storage-azcopy/issues/2546)) +7. Fixed an issue where the snapshot ID would not be preserved when testing which authentication type to use for managed disks. ([#2547](https://github.com/Azure/azure-storage-azcopy/issues/2547)) +8. Fixed an issue where `copy` would panic if a root directory is specified as the destination. ([#2036](https://github.com/Azure/azure-storage-azcopy/issues/2036)) + +### Documentation + +1. Removed the azcopy login/logout deprecation notice. ([#2589](https://github.com/Azure/azure-storage-azcopy/pull/2589)) +2. Added a warning for customers using Shared Key for Azure Datalake transfers to indicate that Shared Key authentication will be deprecated and removed in a future release. ([#2569](https://github.com/Azure/azure-storage-azcopy/pull/2567)) +3. Updated the list help text to clearly indicate the services and authentication types supported.([#2563](https://github.com/Azure/azure-storage-azcopy/pull/2563)) + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.23.0 + +### New Features + +1. Added support to ignore the error and output a summary if a cancelled job has already completed through the use of the --ignore-error-if-completed flag. ([#2519](https://github.com/Azure/azure-storage-azcopy/pull/2519)) +2. Added support for high throughput append blob. Append blob block size can now be set to up to 100 MB. ([#2480](https://github.com/Azure/azure-storage-azcopy/pull/2480)) +3. Added support to exclude containers when transferring from account to account through the use of the --exclude-container flag. ([#2504](https://github.com/Azure/azure-storage-azcopy/pull/2504)) + +### Bug Fixes + +1. Fixed an issue where specifying AZCOPY_AUTO_LOGIN_TYPE in any form other than uppercase would be incorrectly parsed. ([#2499](https://github.com/Azure/azure-storage-azcopy/pull/2499)) +2. Fixed an issue where a failure to rename a file from the temporary prefix to the file name would not be considered to be a failed transfer. ([#2481](https://github.com/Azure/azure-storage-azcopy/pull/2481)) +3. Fixed an issue where closing the log would panic for benchmark jobs. ([#2537](https://github.com/Azure/azure-storage-azcopy/issues/2537)) +4. Fixed an issue where --preserve-posix-properties would not work on download. ([#2497](https://github.com/Azure/azure-storage-azcopy/issues/2497)) +5. Fixed an issue where --decompress would not be honored in Linux. ([#2392](https://github.com/Azure/azure-storage-azcopy/issues/2392)) +6. Fixed an issue where log files would miss the .log extension. ([#2529](https://github.com/Azure/azure-storage-azcopy/issues/2529)) +7. Fixed an issue where AzCopy would fail to set metadata properties on a read only directory when using the --force-if-read-only flag. ([#2515](https://github.com/Azure/azure-storage-azcopy/pull/2515)) +8. Fixed an issue where the AzCopy log location on resumed jobs would be reported incorrectly. ([#2466](https://github.com/Azure/azure-storage-azcopy/issues/2466)) +9. Fixed an issue with preserving SMB properties in Linux. ([#2530](https://github.com/Azure/azure-storage-azcopy/pull/2530)) +10. Fixed an issue where long-running service to service copies using OAuth at the source would result in the token expiring too early. ([#2513](https://github.com/Azure/azure-storage-azcopy/pull/2513)) +11. Fixed an issue where AzCopy would try to create folders that already existed, resulting in many unnecessary requests. ([#2511](https://github.com/Azure/azure-storage-azcopy/pull/2511)) + +### Documentation + +1. Updated --include-directory-stub inline help to match public documentation. ([#2488](https://github.com/Azure/azure-storage-azcopy/pull/2488)) + + +## Version 10.22.2 + +### Bug Fixes + +1. Fixed an issue where AzCopy operations pointed at a snapshot or version object would operation on the base object instead. +2. Fixed an issue where AzCopy would download only the base blob when the --list-of-versions flag was used. + +## Version 10.22.1 + +### Bug Fixes + +1. Fixed a regression with Azurite support. ([#2485](https://github.com/Azure/azure-storage-azcopy/issues/2485)) +2. Fixed an issue where AZCOPY_OAUTH_TOKEN_INFO would be refreshed too often. ([#2503](https://github.com/Azure/azure-storage-azcopy/pull/2503)) +3. Fixed an issue where commands would lag for multiple seconds. ([#2482](https://github.com/Azure/azure-storage-azcopy/issues/2482)) +4. Fixed an issue where azcopy version would crash. ([#2483](https://github.com/Azure/azure-storage-azcopy/issues/2483)) + +### Documentation + +1. Updated documentation to include AZCLI and PSCRED auto login types. ([#2494](https://github.com/Azure/azure-storage-azcopy/pull/2494)) + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.22.0 + +### New Features + +1. Migrated to the latest [azdatalake SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake). +2. Added support for OAuth when performing File -> File and Blob -> File copy/sync and File make/list/remove ([#2302](https://github.com/Azure/azure-storage-azcopy/issues/2302)). +3. Added support to set tier on premium block blob accounts. ([#2337](https://github.com/Azure/azure-storage-azcopy/issues/2337)) +4. Added support to cache latest AzCopy version and check the remote version every 24 hours instead of every run. ([#2426](https://github.com/Azure/azure-storage-azcopy/pull/2426)) +5. Updated all SDK dependencies to their latest version and the default service version to `2023-08-03` for all services. ([#2402](https://github.com/Azure/azure-storage-azcopy/pull/2402)) +6. Added support to rotate AzCopy logs. ([#2213](https://github.com/Azure/azure-storage-azcopy/issues/2213)) +7. Added support to authenticate with Powershell and Azure CLI credentials. ([#2433](https://github.com/Azure/azure-storage-azcopy/pull/2433)) + +### Bug Fixes + +1. Fixed an issue where http headers and access tier would sometimes be sent as empty headers. +2. Fixed an issue where AzCopy would panic when passing an un-parseable URL. ([#2404](https://github.com/Azure/azure-storage-azcopy/issues/2404)) +3. Fixed an issue where Object ID would be set as Resource ID when using MSI. ([#2395](https://github.com/Azure/azure-storage-azcopy/issues/2395)) +4. Fixed an issue where the percent complete stat could round incorrectly. ([#1078](https://github.com/Azure/azure-storage-azcopy/issues/1078)) +5. Fixed an issue where `no transfers were scheduled` would be logged as an error, it is now logged as a warning. ([#874](https://github.com/Azure/azure-storage-azcopy/issues/874)) +6. Fixed an issue where non canonicalized headers would not be printed in the log. ([#2454](https://github.com/Azure/azure-storage-azcopy/pull/2454)) +7. Fixed an issue where cold tier would not be recognized as an allowed tier. ([#2447](https://github.com/Azure/azure-storage-azcopy/issues/2447)) +8. Fixed an issue where s2s append blob copies would fail with `AppendPositionConditionNotMet` error on retry after first experiencing a service timeout error. ([#2430](https://github.com/Azure/azure-storage-azcopy/pull/2430)) +9. Fixed an issue where AZCOPY_OAUTH_TOKEN_INFO would not be refreshed. ([#2434](https://github.com/Azure/azure-storage-azcopy/issues/2434)) + +### Documentation + +1. Updated `--preserve-permissions` documentation to indicate the correct roles necessary to perform the operation. ([#2440](https://github.com/Azure/azure-storage-azcopy/pull/2440)) +2. Updated help message for `sync` and `copy` to include all ADLS Gen2 supported auth methods. ([#2440](https://github.com/Azure/azure-storage-azcopy/pull/2440)) + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.22.0-Preview + +### New Features + +1. Migrated to the latest [azdatalake SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake). + +### Bug Fixes + +1. Fixed an issue where http headers and access tier would sometimes be sent as empty headers. +2. Fixed an issue where AzCopy would panic when passing an un-parseable URL. ([#2404](https://github.com/Azure/azure-storage-azcopy/issues/2404)) + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.21.2 + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.21.1 + +### Bug Fixes + +1. Fixed an issue where validating destination length would fail a job instead of logging the error if read permissions are not provided. + +## Version 10.21.0 + +### New Features + +1. Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). +2. Migrated to the latest [azfile SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile). +3. Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). +4. Added support for sync with Azure Data Lake Storage Gen2. ([#2376](https://github.com/Azure/azure-storage-azcopy/pull/2376)) + +### Bug Fixes + +1. Fixed an issue where ACL data would not copy when specifying `*.dfs.core.windows.net` endpoints ([#2347](https://github.com/Azure/azure-storage-azcopy/pull/2347)). +2. Fixed an issue where Sync would incorrectly log that _all_ files, even those that didn't get overwritten, would be overwritten. ([#2372](https://github.com/Azure/azure-storage-azcopy/pull/2372)) + +### Documentation + +1. Updated `--dry-run` documentation to indicate the effects of `--overwrite` are ignored. ([#2325](https://github.com/Azure/azure-storage-azcopy/pull/2325)) + +### Special notes + +1. Due to the migration from ADAL to MSAL, tenant ID must now be set when authorizing with single tenant applications created after 10/15/2018. + +## Version 10.21.0-Preview + +### New Features + +1. Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob). +2. Migrated to the latest [azfile SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile). +3. Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). +4. Deprecated support for object IDs in MSI. Client ID or Resource ID can be used as an alternative. + +### Special notes + +1. Due to the migration from ADAL to MSAL, tenant ID must now be set when authorizing with single tenant applications created after 10/15/2018. + +## Version 10.20.1 + +### Bug Fixes + +1. Fixed an issue where LMT data is not returned on `list` command for Azure Files. + +## Version 10.20.0 + +### New Features + +1. Mac M1/ARM64 Support +1. Force small blobs to use PutBlob for any source. +2. Support to delete CPK encrypted blobs. +3. Support to follow symlinks when `--preserve-smb-permissions` is enabled. +4. Support to return LMT data on `list` command for Azure Files. + +### Bug Fixes + +1. Fixed an issue where source trailing dot header was passed when source for a S2S copy is not file service +2. Gracefully handle File Share trailing dot paths to Windows/Blob (that do not support trailing dot) by skipping such files +3. Allow trailing dot option to be ignored instead of erroring out in situations it does not apply. +4. Issue [#2186](https://github.com/Azure/azure-storage-azcopy/issues/2186) where AzCopy would panic when using `--include-before` and `--include-after` flags on remove file share resources. +5. Issue [#2183](https://github.com/Azure/azure-storage-azcopy/issues/2183) where AzCopy would panic when providing Azure Files URLs with no SAS token. +6. Fixed a bug where AzCopy would automatically assume a HNS path to be a file if the path did not have a slash terminator. +7. Fixed an issue where `--skip-version-check` would not be honored for `login`,` logout`, `help` commands. [#2299](https://github.com/Azure/azure-storage-azcopy/issues/2299) + +### Documentation + +1. Add a log for LMTs when a mismatch is encountered. +2. Added documentation indicating the `login` and `logout` commands will be deprecated in the future. + +### Security fixes + +1. Updated dependencies to address security vulnerabilities. + +## Version 10.19.0-Preview + +### New Features + +***Mac M1/ARM64 Support*** + +## Version 10.19.0 + +### New Features + +1. Support for new Cold Tier feature for Azure Blobs (--block-blob-tier=Cold) +2. Support preserving a trailing dot ('.') in names of files and directories in Azure Files (default is `--trailing-dot=Enable`) +3. Alternate modes to preserve hash for hash-based sync ([#2214](https://github.com/Azure/azure-storage-azcopy/issues/2214)) (default is OS-dependent, either `--local-hash-storage-mode=XAttr` on MacOS/Linux or `--local-hash-storage-mode=AlternateDataStreams` on Windows) + - OS-specific hashing modes are expected to be available on all filesystems the source would traverse. (`user_xattr` enabled on filesystems on Unix systems, `FILE_NAMED_STREAMS` flag expected on Windows volumes) + - HiddenFiles provides an OS-agnostic method to store hash data; to prevent "dirtying" the source, also specify `--hash-meta-dir` directing AzCopy to store & read hidden hash metadata files elsewhere. +4. Support 'force-if-readonly' flag for Sync. (`false` by default) +5. Preserve posix properties while uploading or downloading from HNS enabled accounts (`--preserve-posix-properties`, `false` by default.) + +### Bug Fixes + +1. Fix situation where large-files would hang infinitely with low value for 'cap-mbps' +2. Issue [#2074](https://github.com/Azure/azure-storage-azcopy/issues/2074) where AzCopy would hang after cancelling +3. Issue [#1888](https://github.com/Azure/azure-storage-azcopy/issues/1888) where directories with empty name are incorrectly handled. +4. Cancel HNS delete jobs [#2117](https://github.com/Azure/azure-storage-azcopy/issues/2117) +5. Fix issue where large chunks could not be scheduled [#2228](https://github.com/Azure/azure-storage-azcopy/issues/2228) +6. Fixed segfault on MacOS [#1790](https://github.com/Azure/azure-storage-azcopy/issues/1790) +7. Fixed panic on attempt to create AzCopy dir [#2191](https://github.com/Azure/azure-storage-azcopy/issues/2191) + +## Version 10.18.1 + +### Bug fixes + +1. Fixed a data race when utilizing hash-based sync. ([Issue 2146](https://github.com/Azure/azure-storage-azcopy/issues/2146)) +2. Fixed the destination naming behavior for container-to-container transfers while using --preserve-permissions ([Issue 2141](https://github.com/Azure/azure-storage-azcopy/issues/2141)) +3. Temporarily disabled hostname lookup before enumeration ([Issue 2144](https://github.com/Azure/azure-storage-azcopy/issues/2144)) + +### Documentation + +1. Modified `--from-to` flag to be more clear ([Issue 2153](https://github.com/Azure/azure-storage-azcopy/issues/2153)) + +## Version 10.18.0 + +### New features + +1. Added support for `Content-MD5` in `list` command. User can now list the MD5 hash of the blobs in the target container. +2. Added support to resume incomplete blobs. User can now resume the upload of a blob which was interrupted in the middle. +3. Added support for download of POSIX properties. +4. Added support for persisting symlink data. + +### Bug fixes + +1. Fixed [Issue 2120](https://github.com/Azure/azure-storage-azcopy/pull/2120) +2. Fixed [Issue 2062](https://github.com/Azure/azure-storage-azcopy/pull/2062) +3. Fixed [Issue 2046](https://github.com/Azure/azure-storage-azcopy/pull/2048) +4. Fixed [Issue 1762](https://github.com/Azure/azure-storage-azcopy/pull/2125) + +### Documentation + +1. Added example for `--include-pattern`. +2. Added documentation for `--compare-hash`. + +### Security fixes + +1. CPK-related headers are now sanitized from the logs. +2. Updated dependencies to address security vulnerabilities. + +## Version 10.17.0 + +### New features + +1. Added support for hash-based sync. AzCopy sync can now take two new flags `--compare-hash` and `--missing-hash-policy=Generate`, which which user will be able to transfer only those files which differ in their MD5 hash. + +### Bug fixes +1. Fixed [issue 1994](https://github.com/Azure/azure-storage-azcopy/pull/1994): Error in calculation of block size +2. Fixed [issue 1957](https://github.com/Azure/azure-storage-azcopy/pull/1957): Repeated Authentication token refresh +3. Fixed [issue 1870](https://github.com/Azure/azure-storage-azcopy/pull/1870): Fixed issue where CPK would not be injected on retries +4. Fixed [issue 1946](https://github.com/Azure/azure-storage-azcopy/issues/1946): Fixed Metadata parsing +5: Fixed [issue 1931](https://github.com/Azure/azure-storage-azcopy/issues/1931) + +## Version 10.16.2 + +### Bug Fixes + +1. Fixed an issue where sync would always re-download files as we were comparing against the service LMT, not the SMB LMT +2. Fixed a crash when copying objects service to service using a user delegation SAS token +3. Fixed a crash when deleting folders that may have a raw path string + +## Version 10.16.1 + +### Documentation changes + +1. `all` was historically an available status option for `jobs show` but is now documented. + +### Bug Fixes + +1. Fixed a hard crash when persisting ACLs from remote filesystems on Windows. +2. Fixed a hard crash when deleting folders containing a `%` in the name from Azure Files. +3. Fixed a bug which made Managed Disks data access authentication mode unusable with auto login. + +## Version 10.16.0 + +### New features + +1. Added time-based flag for remove to include files modified before/after certain date/time. +2. Added --output-level flag which allows users to set output verbosity. +3. Added --preserve-posix-properties flag that allows user to persist the results of statx(2)/stat(2) syscall on upload. +4. Implemented setprops command that allows users to set specific properties of Blobs, BlobFS, and Files. +5. Implemented multi-auth for managed disks (SAS+OAuth) when the managed disk export account requests it. + +### Bug fixes +1. Fixed [issue 1506](https://github.com/Azure/azure-storage-azcopy/issues/1506): Added input watcher to resolve issue since job could not be resumed. +2. Fixed [issue 1794](https://github.com/Azure/azure-storage-azcopy/issues/1794): Moved log-level to root.go so log-level arguments do not get ignored. +3. Fixed [issue 1824](https://github.com/Azure/azure-storage-azcopy/issues/1824): Avoid creating .azcopy under HOME if plan/log location is specified elsewhere. +4. Fixed [issue 1830](https://github.com/Azure/azure-storage-azcopy/issues/1830), [issue 1412](https://github.com/Azure/azure-storage-azcopy/issues/1418), and [issue 873](https://github.com/Azure/azure-storage-azcopy/issues/873): Improved error message for when AzCopy cannot determine if source is directory. +5. Fixed [issue 1777](https://github.com/Azure/azure-storage-azcopy/issues/1777): Fixed job list to handle respective output-type correctly. +6. Fixed win64 alignment issue. + +## Version 10.15.0 + +### New features + +1. Added support for OAuth forwarding when performing Blob -> Blob copy. +2. Allow users to dynamically change the bandwidth cap via messages through the STDIN. +3. GCS -> Blob is now GA. +4. Enable MinIO(S3) logs in DEBUG mode. +6. Upgraded Go version to 1.17.9. + +### Bug fixes +1. Resolved alignment of atomicSuccessfulBytesInActiveFiles. +2. Fixed issue where last-write-time was still getting persisted even when --preserve-smb-info is false. +3. Fixed issue where concurrency was always AUTO for Azure Files despite explicit override. +4. Removed outdated load command following the deprecation of the cflsload package. + +## Version 10.14.1 + +### Bug fixes +1. Fixed issue #1625 where a panic occurs during sync scanning. +2. Fixed remove issue when account has versioning enabled. + +## Version 10.14.0 + +### New features +1. Feature to [permanently delete](https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob#remarks) soft-deleted + snapshots/versions of the blobs has been added (preview). `--permanent-delete=none/snapshots/version/snapshotsandversions`. +2. Feature to preserve properties and ACLs when copying to Azure file share root directory. +3. Pin all APIs to use the default service version `2020-04-08` and let users decide the service version via + `AZCOPY_DEFAULT_SERVICE_API_VERSION` environment variable. Previously, few APIs were not respecting the `AZCOPY_DEFAULT_SERVICE_API_VERSION` environment variable. + +### Bug fixes +1. Fixed issue in which AzCopy failed to copy to classic blob container with `preserve blob access tier`. +2. Fixed [issue 1630](https://github.com/Azure/azure-storage-azcopy/issues/1630) : AzCopy created extra empty + directories at destination while performing S2S transfer from one ADLS Gen2 account to another ADLS Gen2 account. +3. Changed the way AzCopy was using to obtain and set ACLs to ensure accuracy. +4. Clarify error message for `azcopy sync` when source or destination cannot be detected. +5. Report error when client provided key(CPK) encryption is applied to DFS endpoint. +6. Fixed [issue 1596](https://github.com/Azure/azure-storage-azcopy/issues/1596) : AzCopy failed to transfer files + (with '/.' in their path) from AWS S3 to Azure blob storage. +7. Fixed [issue 1474](https://github.com/Azure/azure-storage-azcopy/issues/1474) : AzCopy panicked when trying to re-create an already open plan file. +8. Improved handling of Auth error against single file. +9. Fixed [issue 1640](https://github.com/Azure/azure-storage-azcopy/issues/1640) : Recursive copy from GCS bucket to Azure container failed + with `FileIgnored` error when using `--exclude-path`. +10. Fixed [issue 1655](https://github.com/Azure/azure-storage-azcopy/issues/1655) : AzCopy panicked when using `--include-before` flag. +11. Fixed [issue 1609](https://github.com/Azure/azure-storage-azcopy/issues/1609) : `blockid` converted to lower case in AzCopy logs. +12. Fixed [issue 1643](https://github.com/Azure/azure-storage-azcopy/issues/1643), [issue 1661](https://github.com/Azure/azure-storage-azcopy/issues/1661) : Updated Golang version to `1.16.10` to fix security vulnerabilities in Golang packages. + +## Version 10.13.0 + +### New features +1. Added Arc VM support for authorization via managed identity. +2. Widen managed disk scenario to all md- accounts instead of just md-impexp- accounts. +3. The concurrency is now set to AUTO for Azure Files by default to avoid throttling. +4. Decrease the number of create directory calls for Azure Files to avoid throttling. +5. Added the from-to flag for sync. + +## Bug fixes +1. Fixed the memory usage issue with generating the list of skipped/failed transfers in JSON output. +2. Fixed ADLS Gen2 ACL copying where intermediate folders were missed. +3. Fixed the S3 to Blob scenario using the login command. +4. Fixed dry-run for dfs endpoints. +5. Fixed incorrect percentage-done shown while resuming job. +6. Fixed login issues on the ARM platforms. +7. Fixed incorrect progress status for the sync command. +8. Fixed concurrency map access problem for folder creation tracker. +9. Fixed resuming with a public source. + +## Version 10.12.2 + +## Bug fixes +1. Fix deleting blobs that are of a different type than the specified copy +2. Fix --delete-destination on Windows download + +## Version 10.12.1 + +### Bug fixes +1. Fixed the problem of always receiving overwrite prompt on azure files folders. + +## Version 10.12.0 + +### Bug fixes +1. Fixed the problem of always receiving overwrite prompt on azure files folders. + +## Version 10.12.0 + +### New features +1. Added support for include and exclude regex flags, which allow pattern matching on the entire paths. +2. Added dry run mode for copy, remove, and sync. This feature allows the user to visualize the changes before committing them. +3. For SMB aware locations, preserve-smb-info flag is now true by default. +4. Improved how folder lmts are obtained to allow time-based filters for folders. +5. Added support for ACL copying between HNS enabled accounts. The preserve-smb-permissions flag is now deprecated and has been renamed to preserve-permissions. + +### Bug fixes +1. Allow from-to to be set for the remove command. +2. Fixed the problem where resume command did not honor AZCOPY_DEFAULT_SERVICE_API_VERSION. +3. Fixed the new version check. +4. Fixed sync issue on Windows where paths are case-insensitive. +5. Added prompt for invalid characters when importing from S3. +6. Fixed bug where public S3 buckets cannot be listed. +7. Sanitize SAS tokens in JSON output for skipped and failed transfers. +8. Improved folder property preservation across resumes. + +## Version 10.11.0 + +### New features +1. Improved performance for copying small blobs (with size less than `256MiB`) with [Put Blob from URL](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url). +1. Added mirror mode support in sync operation via `mirror-mode` flag. The new mode disables last-modified-time based comparisons and overwrites the conflicting files and blobs at the destination if this flag is set to true. +1. Added flag `disable-auto-decoding` to avoid automatic decoding of URL-encoded illegal characters when uploading from Windows. These illegal characters could have encoded as a result of downloading them onto Windows which does not support them. +1. Support custom mime type mapping via environment variable `AZCOPY_CONTENT_TYPE_MAP`. +1. Output message on the CLI when AzCopy detects a proxy for each domain. +1. Interpret DFS endpoints as Blob endpoint automatically when performing service-to-service copy. + +### Bug fixes +1. Tolerate enumeration errors for Azure Files and not fail the entire job when a directory is deleted/modified during scanning. +1. Log skipped transfers to the scanning log. +1. Fixed pipe upload by adding missing fields such as Metadata, Blob Index Tags, Client Provided Key, Blob Access Tier, etc. +1. Fixed issue of clean up for the benchmark command. + +## Version 10.10.0 + +### New features +1. Support sync for Local/Blob <-> Azure File. +1. Download to temporary file path (.azDownload-[jobID]-[name]) before renaming to the original path. +1. Support CPK by name and CPK by value. +1. Offer knob to disable application logging (Syslog/Windows Event Log). +1. Trust zonal DNS suffix for OAuth by default. +1. Added include-directory-stub flag for the copy command, to allow copying of blobs with metadata of `hdi_isfolder:true`. +1. Display more fields for the list command, please refer to the help message for example. +1. Provide environment variable to set request try timeout, to allow faster retries. + +### Bug fixes +1. Improve job progress updating mechanism to improve scalability for larger jobs. +1. Time limit the container creation step, to avoid hanging and improve UX. +1. Set SMB info/permission again after file upload and copy, to fully preserve the integrity of the permission string and last-write-time. +1. Fixed module import problem for V10. + +## Version 10.9.0 + +### New features +1. Added preview support for importing from GCP Storage to Azure Block Blobs. +1. Added scanning logs which have low output by default but can become verbose if desired to help in debugging. +1. Support preservation of tags when copying blobs. +1. Added last modified time info to the list command. + +### Bug fixes +1. Removed unexpected conflict prompt for file share folders with special characters in the name, such as ";". + +## Version 10.8.0 + +### New features +1. Added option to [disable parallel blob listing](https://github.com/Azure/azure-storage-azcopy/pull/1263) +1. Added support for uploading [large files](https://github.com/Azure/azure-storage-azcopy/pull/1254/files) up to 4TiB. Please refer the [public documentation](https://docs.microsoft.com/en-us/rest/api/storageservices/create-file) for more information +1. Added support for `include-before`flag. Refer [this](https://github.com/Azure/azure-storage-azcopy/issues/1075) for more information + +### Bug fixes + +1. Fixed issue [#1246](https://github.com/Azure/azure-storage-azcopy/issues/1246) of security vulnerability in x/text package +1. Fixed issue [share snapshot->share copy](https://github.com/Azure/azure-storage-azcopy/pull/1258) with smb permissions + +## Version 10.7.0 + +### New features +1. Added support for auto-login when performing data commands(copy/sync/list/make/remove). Please refer to our documentation for more info. +1. Added ``blob-tags`` flag for setting [blob index tags](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-index-how-to?tabs=azure-portal) when performing copy command. Please note that we support setting blob tags only when tags are explicitly specified. Refer to the [public documentations](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks) to know more. + +### Bug fixes + +1. Fixed issue [#1139](https://github.com/Azure/azure-storage-azcopy/issues/1139) to preserve content-type in service-to-service transfer. +1. Fixed issue to allow snapshot restoring. +1. Fixed issue with setting content-type of an empty file when performing copy command. + +### Improvements +1. Added support for setting tier directly at the time of [upload](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks) API call instead of performing a separate [set tier](https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier) API call. + +## Version 10.6.1 + +### Bug fixes + +1. Fix issue [#971](https://github.com/Azure/azure-storage-azcopy/issues/971) with scanning directories on a public container +1. Fix issue with piping where source and destinations were reversed +1. Allow piping to use OAuth login +1. Fix issue where transfers with ``overwrite`` flag set to ``IfSourceNewer`` would work incorrectly +1. Fix issue [#1139](https://github.com/Azure/azure-storage-azcopy/issues/1139), incorrect content type in BlobStorage +1. Issue [#1192](https://github.com/Azure/azure-storage-azcopy/issues/1192), intermittent panic when AzCopy job is abort +1. Fix issue with auto-detected content types for 0 length files + +## Version 10.6.0 + +### New features + +1. ``azcopy sync`` now supports the persistence of ACLs between supported resources (Azure Files) using the ``--preserve-smb-permissions`` flag. +1. ``azcopy sync`` now supports the persistence of SMB property info between supported resources (Azure Files) using the ``--preserve-smb-info`` flag. The information that can be preserved is Created Time, Last Write Time and Attributes (e.g. Read Only). +1. Added support for [higher block & blob size](https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#remarks) + - For service version ``2019-12-12`` or higher, the block size can now be less than or equal to ``4000 MiB``. The maximum size of a block blob therefore can be ``190.7 TiB (4000 MiB X 50,000 blocks)`` +1. Added support for [Blob Versioning](https://docs.microsoft.com/en-us/azure/storage/blobs/versioning-overview) + - Added ``list-of-versions`` flag (specifies a file where each version id is listed on a separate line) to download/delete versions of a blob from Azure Storage. + - Download/Delete a version of blob by directly specifying its version id in the source blob URL. + +### Bug fixes + +1. Logging input command at ERROR level. + +## Version 10.5.1 + +### New features + +- Allow more accurate values for job status in `jobs` commands, e.g. completed with failed or skipped transfers. + +### Bug fixes + +- Fixed issue with removing blobs with hdi_isfolder=true metadata when the list-of-files flag is used. +- Manually unfurl symbolic links to fix long file path issue on UNC locations. + + +## Version 10.5.0 + +### New features + +1. Improved scanning performance for most cases by adding support for parallel local and Blob enumeration. +1. Added download support for the benchmark command. +1. A new way to quickly copy only files changed after a certain date/time. The `copy` command now accepts +the parameter `--include-after`. It takes an ISO 8601-formatted date, and will copy only those files that were +changed on or after the given date/time. When processing large numbers of files, this is faster than `sync` or +`--overwrite=IfSourceNewer`. But it does require the user to specify the date to be used. E.g. `2020-08-19T15:04:00Z` +for a UTC time, `2020-08-19T15:04` for a time in the local timezone of the machine running Azcopy, +or `2020-08-19` for midnight (00:00), also in the local timezone. +1. When detecting content type for common static website files, use the commonly correct values instead of looking them up in the registry. +1. Allow the remove command to delete blob directory stubs which have metadata hdi_isfolder=true. +1. The S3 to Blob feature now has GA support. +1. Added support for load command on Linux based on Microsoft Avere's CLFSLoad extension. +1. Each job now logs its start time precisely in the log file, using ISO 8601 format. This is useful if you want to +use that start date as the `--include-after` parameter to a later job on the same directory. Look for "ISO 8601 START TIME" +in the log. +1. Stop treating zero-item job as failure, to improve the user experience. +1. Improved the naming of files being generated in benchmark command, by reversing the digits. +Doing so allows the names to not be an alphabetic series, which used to negatively impact the performance on the service side. +1. Azcopy can now detect when setting a blob tier would be impossible. If azcopy cannot check the destination account type, a new transfer failure status will be set: `TierAvailabilityCheckFailure` + +### Bug fixes + +1. Fixed the persistence of last-write-time (as part of SMB info when uploading) for Azure Files. It was using the creation time erroneously. +1. Fixed the SAS timestamp parsing issue. +1. Transfers to the File Service with a read-only SAS were failing because we try listing properties for the parent directories. +The user experience is improved by ignoring this benign error and try creating parent directories directly. +1. Fixed issue with mixed SAS and AD authentication in the sync command. +1. Fixed file creation error on Linux when decompression is turned on. +1. Fixed issue on Windows for files with extended charset such as [%00 - %19, %0A-%0F, %1A-%1F]. +1. Enabled recovering from unexpectedEOF error. +1. Fixed issue in which attribute filters does not work if source path contains an asterisk in it. +1. Fixed issue of unexpected upload destination when uploading a whole drive in Windows (e.g. "D:\"). + + +## Version 10.4.3 + +### Bug fixes + +1. Fixed bug where AzCopy errored if a filename ended with slash character. (E.g. backslash at end of a Linux filename.) + +## Version 10.4.2 + +### Bug fixes + +1. Fixed bug in overwrite prompt for folders. + +## Version 10.4.1 + +### New features + +1. Added overwrite prompt support for folder property transfers. +1. Perform proxy lookup when the source is S3. + +### Bug fixes + +1. When downloading from Azure Files to Windows with the `--preserve-smb-permissions` flag, sometimes +the resulting permissions were not correct. This was fixed by limiting the concurrent SetNamedSecurityInfo operations. +1. Added check to avoid overwriting the file itself when performing copy operations. + +## Version 10.4 + +### New features + +1. `azcopy copy` now supports the persistence of ACLs between supported resources (Windows and Azure Files) using the `--persist-smb-permissions` flag. +1. `azcopy copy` now supports the persistence of SMB property info between supported resources (Windows and Azure Files) +using the `--persist-smb-info` flag. The information that can be preserved is Created Time, Last Write Time and Attributes (e.g. Read Only). +1. AzCopy can now transfer empty folders, and also transfer the properties of folders. This applies when both the source +and destination support real folders (Blob Storage does not, because it only supports virtual folders). +1. On Windows, AzCopy can now activate the special privileges `SeBackupPrivilege` and `SeRestorePrivilege`. Most admin-level +accounts have these privileges in a deactivated state, as do all members of the "Backup Operators" security group. +If you run AzCopy as one of those users +and supply the new flag `--backup`, AzCopy will activate the privileges. (Use an elevated command prompt, if running as Admin). +At upload time, this allows AzCopy to read files +which you wouldn't otherwise have permission to see. At download time, it works with the `--preserve-smb-permissions` flag +to allow preservation of permissions where the Owner is not the user running AzCopy. The `--backup` flag will report a failure +if the privileges cannot be activated. +1. Status output from AzCopy `copy`, `sync`, `jobs list`, and `jobs status` now contains information about folders. + This includes new properties in the JSON output of copy, sync, list and jobs status commands, when `--output-type + json` is used. +1. Empty folders are deleted when using `azcopy rm` on Azure Files. +1. Snapshots of Azure File Shares are supported, for read-only access, in `copy`,`sync` and `list`. To use, add a + `sharesnapshot` parameter at end of URL for your Azure Files source. Remember to separate it from the existing query + string parameters (i.e. the SAS token) with a `&`. E.g. + `https://.file.core.windows.net/sharename?st=2020-03-03T20%3A53%3A48Z&se=2020-03-04T20%3A53%3A48Z&sp=rl&sv=2018-03-28&sr=s&sig=REDACTED&sharesnapshot=2020-03-03T20%3A24%3A13.0000000Z` +1. Benchmark mode is now supported for Azure Files and ADLS Gen 2 (in addition to the existing benchmark support for + Blob Storage). +1. A special performance optimization is introduced, but only for NON-recursive cases in this release. An `--include-pattern` that contains only `*` wildcards will be performance optimized when + querying blob storage without the recursive flag. The section before the first `*` will be used as a server-side prefix, to filter the search results more efficiently. E.g. `--include-pattern abc*` will be implemented +as a prefix search for "abc". In a more complex example, `--include-pattern abc*123`, will be implemented as a prefix search for `abc`, followed by normal filtering for all matches of `abc*123`. To non-recursively process blobs +contained directly in a container or virtual directory include `/*` at the end of the URL (before the query string). E.g. `http://account.blob.core.windows.net/container/*?`. +1. The `--cap-mbps` parameter now parses floating-point numbers. This will allow you to limit your maximum throughput to a fraction of a megabit per second. + +### Special notes + +1. A more user-friendly error message is returned when an unknown source/destination combination is supplied +1. AzCopy has upgraded to service revision `2019-02-02`. Users targeting local emulators, Azure Stack, or other private/special + instances of Azure Storage may need to intentionally downgrade their service revision using the environment variable + `AZCOPY_DEFAULT_SERVICE_API_VERSION`. Prior to this release, the default service revision was `2018-03-28`. +1. For Azure Files to Azure Files transfers, --persist-smb-permissions and --persist-smb-info are available on all OS's. +(But for for uploads and downloads, those flags are only available on Windows.) +1. AzCopy now includes a list of trusted domain suffixes for Azure Active Directory (AAD) authentication. + After `azcopy login`, the resulting token will only be sent to locations that appear in the list. The list is: + `*.core.windows.net;*.core.chinacloudapi.cn;*.core.cloudapi.de;*.core.usgovcloudapi.net`. + If necessary, you can add to the the list with the command-line flag: `--trusted-microsoft-suffixes`. For security, + you should only add Microsoft Azure domains. +1. When transferring over a million files, AzCopy will reduces its progress reporting frequency from every 2 seconds to every 2 minutes. + +### Breaking changes + +1. To accommodate interfacing with JavaScript programs (and other languages that have similar issue with number precision), + all the numbers in the JSON output have been converted to strings (i.e. with quotes around them). +1. The TransferStatus value `SkippedFileAlreadyExists` has been renamed `SkippedEntityExists` and may now be used both + for when files are skipped and for when the setting of folder properties is skipped. This affects the input and + output of `azcopy jobs show` and the status values shown in the JSON output format from `copy` and `sync`. +1. The format and content of authentication information messages, in the JSON output format, e.g. + "Using OAuth token for authentication" has been changed. + +### Bug fixes + +1. AzCopy can now overwrite even Read-Only and Hidden files when downloading to Windows. (The read-only case requires the use of + the new `--force-if-read-only` flag.) +1. Fixed a nil dereference when a prefetching error occurs in a upload +1. Fixed a nil dereference when attempting to close a log file while log-level is none +1. AzCopy's scanning of Azure Files sources, for download or Service to Service transfers, is now much faster. +1. Sources and destinations that are identified by their IPv4 address can now be used. This enables usage with storage + emulators. Note that the `from-to` flag is typically needed when using such sources or destinations. E.g. `--from-to + BlobLocal` if downloading from a blob storage emulator to local disk. +1. Filenames containing the character `:` can now safely be downloaded on Windows and uploaded to Azure Files +1. Objects with names containing `+` can now safely be used in imported S3 object names +1. The `check-length` flag is now exposed in benchmark mode, so that length checking can be turned off for more speed, + when benchmarking with small file sizes. (When using large file sizes, the overhead of the length check is + insignificant.) +1. The in-app documentation for Service Principal Authentication has been corrected, to now include the application-id + parameter. +1. ALL filter types are now disallowed when running `azcopy rm` against ADLS Gen2 endpoints. Previously +include/exclude patterns were disallowed, but exclude-path was not. That was incorrect. All should have been +disallowed because none (other than include-path) are respected. +1. Fixed empty page range optimization when uploading Managed Disks. In an edge case, there was previously a risk of data corruption if the user uploaded two different images into the same Managed Disk resource one after the other. + +## Version 10.3.4 + +### New features + +1. Fixed feature parity issue by adding support for "ifSourceNewer" option on the `overwrite` flag. It serves as a replacement of the '\XO' flag in V8. + +### Bug fixes + +1. Fixed `jobs clean` command on Windows which was previously crashing when the `with-status` flag was used. + +## Version 10.3.3 + +### New features + +1. `azcopy list` is now supported on Azure Files and ADLS Gen 2, in addition to Blob Storage. +1. The `--exclude-path` flag is now supported in the `sync` command. +1. Added new environment variable `AZCOPY_USER_AGENT_PREFIX` to allow a prefix to be appended to the user agent strings. + +### Bug fixes + +1. Content properties (such as Content-Encoding and Cache-Control) are now included when syncing Blob -> Blob and Azure + Files -> Azure Files +1. Custom metadata is now included when syncing Blob -> Blob and Azure Files -> Azure Files +1. The `azcopy list` command no longer repeats parts of its output. (Previously it would sometimes repeat itself and show the same blob multiple times in the output.) +1. The `--aad-endpoint` parameter is now visible, instead of hidden. It allows use of Azure Active Directory + authentication in national clouds (e.g. Azure China). +1. On Windows, AzCopy now caches information about which proxy server should be used, instead of looking it up every + time. This significantly reduces CPU + usage when transferring many small files. It also solves a rare bug when transfers got permanently "stuck" with + one uncompleted file. +1. When uploading to a write-only destination, there is now a clearer error message when the built-in file length check + fails. The message says how to fix the problem using `--check-length=false`. +1. Size checks on managed disk imports are now clearer, and all run at the start of the import process instead of the end. + +## Version 10.3.2 + +### Bug fixes + +1. Jobs could not be cancelled while scanning was still in progress. +1. Downloading large managed disks (8 TB and above) failed with errors. +1. Downloading large page blobs might make no progress for the first 15 or 20 minutes. +1. There was a rare error where the final output could under-report the total number of files in the job. That error has been fixed. +1. When using JSON output mode, the output from the rm command on ADLS Gen2 was inconsistent with the output from other commands +1. After authentication errors, files in progress were not cleaned up (deleted) at the destination. If there was an + authentication failure during a job (e.g. a SAS token expired while in use) this could result in files being left + behind that had incomplete contents (even though their size looked correct). +1. The AUTO concurrency option, for automatically tuning concurrency as AzCopy runs, started working too late if scanning (aka enumeration) took a long time. This resulted in reduced throughput when using this setting. +1. It was not possible to access the root of Windows drives with lowercase drive letters. E.g. d:\ +1. Service to Service transfers would fail when using environment variable to specify OAuth authentication. +1. Certain errors parsing URLs were not reported clearly. +1. When downloading to NUL (/dev/null on Linux), files of zero length no longer trigger errors. (Downloads to NUL can be used in performance testing and bulk MD5 checking. + +## Version 10.3.1 + +### New features + +1. Added helpful deprecation notice for legacy include/exclude flags. +1. Added back request ID at log level INFO. +1. Added back cancel-from-stdin option for partner integration. +1. Added flag to define delete snapshot options for the remove command. + +### Bug fix + +1. Fixed race condition in shutdown of decompressingWriter. +1. Made progress reporting more accurate. + +## Version 10.3.0 + +### Breaking changes + +1. The `*` character is no longer supported as a wildcard in URLs, except for the two exceptions + noted below. It remains supported in local file paths. + 1. The first exception is that `/*` is still allowed at the very end of the "path" section of a + URL. This is illustrated by the difference between these two source URLs: + `https://account/container/virtual?SAS` and + `https://account/container/virtualDir/*?SAS`. The former copies the virtual directory + `virtualDir` by creating a folder of that name at the destination. The latter copies the + _contents_ of `virtual` dir directly into the target without creating a folder named + "virtualDir".' + 1. The second exception is when you are transferring multiple _whole_ containers (or S3 buckets). You can + use * as a wildcard in the container or bucket name. +1. The `--include` and `--exclude` parameters have been replaced by `--include-pattern` and + `--exclude-pattern` (for filenames) and `--include-path` and `--exclude-path` (for paths, + including directory and filenames). + The new parameters have behaviour that is better defined in complex situations (such as + recursion). The `*` wildcard is supported in the pattern parameters, but _not_ in the path ones. +1. There have been two breaking changes to the JSON output that is produced if you request + JSON-formatted output. The `sync` command's output in JSON has changed for consistency reasons, + and the final message type, for `copy` and `sync` has changed its name from `Exit` to `EndOfJob`. + Tools using the JSON output format to integrate AzCopy should be aware. +1. If downloading to "null" on Windows the target must now be named "NUL", according to standard + Windows conventions. "/dev/null" remains correct on Linux. (This feature can be used to test + throughput or check MD5s without saving the downloaded data.) +1. The file format of the (still undocumented) `--list-of-files` parameter is changed. (It remains + undocmented because, for simplicity, users are + encouraged to use the new `--include-pattern` and `--include-path` parameters instead.) + +### New features + +1. `sync` is supported from Blob Storage to Blob Storage, and from Azure Files to Azure Files. +1. `copy` is supported from Azure Files to Azure Files, and from Blob Storage to Azure Files. +1. Percent complete is displayed as each job runs. +1. VHD files are auto-detected as page blobs. +1. A new benchmark mode allows quick and easy performance benchmarking of your network connection to + Blob Storage. Run AzCopy with the parameters `bench --help` for details. This feature is in + Preview status. +1. The location for AzCopy's "plan" files can be specified with the environment variable + `AZCOPY_JOB_PLAN_LOCATION`. (If you move the plan files and also move the log files using the existing + `AZCOPY_LOG_LOCATION`, then AzCopy will not store anything under your home directory on Linux and + MacOS. On Windows AzCopy will keep just one small encrypted file under `c:\users\\.azcopy`) +1. Log files and plan files can be cleaned up to save disk space, using AzCopy's new `jobs rm` and + `jobs clean` commands. +1. When listing jobs with `jobs show`, the status of each job is included in the output. +1. The `--overwrite` parameter now supports the value of "prompt" to prompt the user on a + file-by-file basis. (The old values of true and false are also supported.) +1. The environment variable `AZCOPY_CONCURRENCY_VALUE` can now be set to "AUTO". This is expected to be + useful for customers with small networks, or those running AzCopy on + moderately-powered machines and transfer blobs between accounts. This feature is in preview status. +1. When uploading from Windows, files can be filtered by Windows-specific file attributes (such as + "Archive", "Hidden" etc) +1. Memory usage can be controlled by setting the new environment variable `AZCOPY_BUFFER_GB`. + Decimal values are supported. Actual usage will be the value specified, plus some overhead. +1. An extra integrity check has been added: the length of the + completed destination file is checked against that of the source. +1. When downloading, AzCopy can automatically decompress blobs (or Azure Files) that have a + `Content-Encoding` of `gzip` or `deflate`. To enable this behaviour, supply the `--decompress` + parameter. +1. The number of disk files accessed concurrently can be controlled with the new + `AZCOPY_CONCURRENT_FILES` environment variable. This is an advanced setting, which generally + should not be modified. It does not affect the number of HTTP connections, which is still + controlled by `AZCOPY_CONCURRENCY_VALUE`. +1. The values of key environment variables are listed at the start of the log file. +1. An official Windows 32-bit build is now released, in addition to the usual 64-bit builds for + Linux, Mac and Windows. +1. If you need to refer a literal `*` in the name of a blob or Azure Files file, e.g. for a blob + named "\*", escape the `*` using standard URL escaping. To do this, replace the `*` with the following + character sequence: %2A + +### Bug fixes + +1. When an AzCopy job is cancelled with CTRL-C, any partially-updated files are now deleted from + the destination. Previous releases of AzCopy v10 would just immediately exit, leaving destination files + potentially containing an unknown mix of old and new data. E.g. if uploading a new version of a file + over top of an old version, cancellation could result in the file being left with some parts + containing old data, and some containing new data. This issue affected downloads to local disk and + uploads to Azure Files, ADLS Gen 2, page blobs and append blobs. The bug did not affect transfers to block + blobs. +1. If a transfer to a brand-new block blob is cancelled before it completes, the uncommitted blocks are now cleaned up + immediately. Previous versions would leave them, for automatic garbage collection to delete 7 days later. +1. Long pathnames (over 260 characters) are now supported everywhere on Windows, including on UNC + shares. +1. Safety is improved in the rare cases where two source files correspond to just one destination file. This can happen + when transferring to a case-insensitive destination, when the new `--decompress` flag removes an extension but + there's already a file without the extension, and in very rare cases related to escaping of filenames with illegal + characters. The bug fix ensures that the single resulting file contains data from only _one_ of the source files. +1. When supplying a `--content-type` on the command line it's no longer necessary to also specify + `--no-guess-mime-type`. +1. There is now no hard-coded limit on the number of files that can be processed by the `sync` + command. The number that can be processed (without paging of memory to disk) depends only on the + amount of RAM available. +1. Transfer of sparse page blobs has been improved, so that for many sparse page blobs only the + populated pages will transferred. The one exception is blobs which have had a very high number + of updates, but which still have significant sparse sections. Those blobs may not be + transferred optimally in this release. Handling of such blobs will be improved in a future release. +1. Accessing root of drive (e.g. `d:\`) no longer causes an error. +1. On slow networks, there are no longer excessive log messages sent to the Event Log (Windows) and + SysLog (Linux). +1. If AzCopy can't check whether it's up to date, it will no longer hang. (Previously, it could hang + if its version check URL, https://aka.ms/azcopyv10-version-metadata, was unreachable due to + network routing restrictions.) +1. High concurrency values are supported (e.g. over 1000 connections). While these values are seldom + needed, they are occasionally useful - e.g. for service-to-service transfer of files around 1 MB + in size. +1. Files skipped due to "overwrite=false" are no longer logged as "failed". +1. Logging is more concise at the default log level. +1. Error message text, returned by Blob and File services, is now included in the log. +1. A log file is created for copy jobs even when there was nothing to copy. +1. In the log, UPLOAD SUCCESSFUL messages now include the name of the successful file. +1. Clear error messages are given to show that AzCopy does not currently support Customer-Provided + Encryption Keys. +1. On Windows, downloading a filename with characters not supported by the operating system will + result in those characters being URL-encoded to construct a Windows-compatible filename. The + encoding process is reversed if the file is uploaded. +1. Uploading a single file to ADLS Gen 2 works now. +1. The `remove` command no longer hangs when removing blobs that have snapshots. Instead it will fail to + delete them, and report the failures clearly. +1. Jobs downloading from ADLS Gen 2 that result in no scheduled transfers will no longer hang. + + +## Version 10.2.1 + +### Bug fix + +1. Fixed outputting error message for SPN login failures. + +## Version 10.2.0 + +### Bug fix + +1. Security: fixed signature redaction in logs to include all error types: the log entries for network failures and HTTP errors could include SAS tokens. In previous releases, the SAS tokens were not always redacted correctly and could be written to the AzCopy log file and also to the Windows Event Log or the Linux Syslog. Now, SAS tokens are correctly redacted when logging those errors. Note that errors returned by the Storage service itself - such as authentication errors and bad container names – were already redacted correctly. +1. Added error to using Azure Files without a SAS token (invalid auth configuration). +1. AzCopy v10 now outputs a sensible error & warning when attempting to authenticate a storage account business-to-business. +1. `--log-level=none` now drops no logs, and has a listing in `--help`. +1. Fixed bug where piping was not picking up the service version override, making it not work well against Azure Stack. +1. Fixed a timeout when uploading particularly large files to ADLSG2. +1. Fixed single wildcard match uploads. + +### New features + +1. Enabled copying from page/block/append blob to another blob of a different type. +1. AzCopy now grabs proxy details (sans authentication) from the Windows Registry using `mattn/go-ieproxy`. +1. Service Principal Authentication is now available under `azcopy login`-- check `azcopy env` for details on client secrets/cert passwords. +1. SAS tokens are supported on HNS (Hierarchical Namespace/Azure Data Lake Generation 2) Storage Accounts. +1. Added support for custom headers on ADLS Gen 2. +1. Added support for fractional block size for copy and sync. +1. Use different log output for skipped files (so they don't look like failures). +1. Added bandwidth cap (--cap-mbps) to limit AzCopy's network usage, check `azcopy cp -h` for details. +1. Added ADLS Gen2 support for rm command. + +## Version 10.1.2 + +### Breaking change + +1. Jobs created with earlier releases cannot be resumed with this release. We recommend +you update to this release only when you have no partially-completed jobs that you want to resume. + +### Bug fix + +1. Files with `Content-Encoding: gzip` are now downloaded in compressed form. Previous versions tried to save a + decompressed version of the file. But they incorrectly truncated it at the original _compressed_ length, so the + downloaded file was not complete. + + By changing AzCopy to save the compressed version, that problem is solved, and Content-MD5 checks now work for such files. (It is + assumed that the Content-MD5 hash is the hash of the _compressed_ file.) + +### New features + +1. Headers for Content-Disposition, Content-Language and Cache-Control can now be set when uploading +files to Blob Storage and to Azure Files. Run `azcopy copy --help` to see command line parameter +information, including those needed to set the new headers. +1. On-screen job summary is output to the log file at end of job, so that the log will include those summary statistics. + +## Version 10.1.1 + +### Bug fixes + +1. Fixed typo in local traverser (error handling in walk). +1. Fixed memory alignment issue for atomic functions on 32 bit system. + +## Version 10.1.0 (GA) + +### Breaking changes + +1. The `--block-size` parameter has been replaced by `--block-size-mb`. The old parameter took a number of _bytes_; the + new one takes a number of Megabytes (MiB). +1. The following command line parameters have been renamed, for clarity + * `--output` is now `--output-type` + * `--md5-validation` is now called `--check-md5` + * `--s2s-source-change-validation` is now called `--s2s-detect-source-changed` + * `--s2s-invalid-metadata-handle` is is now called `--s2s-handle-invalid-metadata` + * `--quota` (in the `make` command) is now called `--quota-gb`. Note that the values were always in GB, the new name + simply clarifies that fact + +### New features + +1. AzCopy is now able to be configured to use older API versions. This enables (limited) support for Azure Stack. +1. Listing command now shows file sizes. + +### Bug fixes + +1. AzCopy v10 now works correctly with ADLS Gen 2 folders that contain spaces in their names. +1. When cancelling with CRTL-C, status of in-progress transfers is now correctly recorded. +1. For security, the Service-to-Service (S2S) feature will only work if both the source and destination connections are + HTTPS. +1. Use of the `--overwrite` parameter is clarified in the in-application help. +1. Fixed incorrect behavior with setting file descriptor limits on platforms including OS X and BSD. +1. On Linux and OS X, log files are now created with same file permissions as all other files created by AzCopy. +1. ThirdPartyNotice.txt is updated. +1. Load DLL in a more secure manner compatible with Go's sysdll registration. +1. Fixed support for relative paths and shorthands. +1. Fixed bug in pattern matching for blob download when recursive is off. + +## Version 10.0.9 (Release Candidate) + +### Breaking changes + +1. For creating MD5 hashes when uploading, version 10.x now has the OPPOSITE default to version + AzCopy 8.x. Specifically, as of version 10.0.9, MD5 hashes are NOT created by default. To create + Content-MD5 hashes when uploading, you must now specify `--put-md5` on the command line. + +### New features + +1. Can migrate data directly from Amazon Web Services (AWS). In this high-performance data path + the data is read directly from AWS by the Azure Storage service. It does not need to pass through + the machine running AzCopy. The copy happens synchronously, so you can see its exact progress. +1. Can migrate data directly from Azure Files or Azure Blobs (any blob type) to Azure Blobs (any + blob type). In this high-performance data path the data is read directly from the source by the + Azure Storage service. It does not need to pass through the machine running AzCopy. The copy + happens synchronously, so you can see its exact progress. +1. Sync command prompts with 4 options about deleting unneeded files from the target: Yes, No, All or + None. (Deletion only happens if the `--delete-destination` flag is specified). +1. Can download to /dev/null. This throws the data away - but is useful for testing raw network + performance unconstrained by disk; and also for validating MD5 hashes in bulk (when run in a cloud + VM in the same region as the Storage account) + +### Bug fixes + +1. Fixed memory leak when downloading large files +1. Fixed performance when downloading a single large file +1. Fixed bug with "too many open files" on Linux +1. Fixed memory leak when uploading sparse files (files with big blocks of zeros) to Page Blobs and + Azure Files. +1. Fixed issue where application crashed after being throttled by Azure Storage Service. (The + primary fix here is for Page Blobs, but a secondary part of the fix also helps with Block Blobs.) +1. Fixed functionality and usabilty issues with `remove` command +1. Improved performance for short-duration jobs (e.g. those lasting less than a minute) +1. Prevent unnecessary error message that sometimes appeared when cancelling a job +1. Various improvements to the online help and error messages. + + +## Version 10.0.8: + +1. Rewrote sync command to eliminate numerous bugs and improve usability (see wiki for details) +1. Implemented various improvements to memory management +1. Added MD5 validation support (available options: NoCheck, LogOnly, FailIfDifferent, FailIfDifferentOrMissing) +1. Added last modified time checks for source to guarantee transfer integrity +1. Formalized outputs in JSON and elevated the output flag to the root level +1. Eliminated outputs to STDERR (for new version notifications), which were causing problems for certain CI systems +1. Improved log format for Windows +1. Optimized plan file sizes +1. Improved command line parameter names as follows (to be consistent with naming pattern of other parameters): + 1. fromTo -> from-to + 1. blobType -> blob-type + 1. excludedBlobType -> excluded-blob-type + 1. outputRaw (in "list" command) -> output + 1. stdIn-enable (reserved for internal use) -> stdin-enable diff --git a/README.md b/README.md index a14b45492..352d5893f 100644 --- a/README.md +++ b/README.md @@ -64,19 +64,19 @@ The general format of the AzCopy commands is: `azcopy [command] [arguments] --[f * `copy` - Copies source data to a destination location. The supported directions are: - Local File System <-> Azure Blob (SAS or OAuth authentication) - - Local File System <-> Azure Files (Share/directory SAS authentication) + - Local File System <-> Azure Files (Share/directory SAS or OAuth authentication) - Local File System <-> Azure Data Lake Storage (ADLS Gen2) (SAS, OAuth, or SharedKey authentication) - - Azure Blob (SAS or public) -> Azure Blob (SAS or OAuth authentication) - - Azure Blob (SAS or public) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Blob (SAS or OAuth authentication) + - Azure Blob (SAS, OAuth or public authentication) -> Azure Blob (SAS or OAuth authentication) + - Azure Blob (SAS, OAuth or public authentication) -> Azure Files (SAS or OAuth authentication) + - Azure Files (SAS or OAuth authentication) -> Azure Files (SAS or OAuth authentication) + - Azure Files (SAS or OAuth authentication) -> Azure Blob (SAS or OAuth authentication) - AWS S3 (Access Key) -> Azure Block Blob (SAS or OAuth authentication) - Google Cloud Storage (Service Account Key) -> Azure Block Blob (SAS or OAuth authentication) [Preview] * `sync` - Replicate source to the destination location. The supported directions are: - Local File System <-> Azure Blob (SAS or OAuth authentication) - - Local File System <-> Azure Files (Share/directory SAS authentication) - - Azure Blob (SAS or public) -> Azure Files (SAS) + - Local File System <-> Azure Files (Share/directory SAS or OAuth authentication) + - Azure Blob (SAS, OAuth or public authentication) -> Azure Files (SAS or OAuth authentication) * `login` - Log in to Azure Active Directory (AD) to access Azure Storage resources. @@ -151,4 +151,4 @@ provided by the bot. You will only need to do this once across all repos using o This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. \ No newline at end of file +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 36eca2819..e9d62dbcd 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -114,364 +114,15 @@ jobs: verbosity: 'Verbose' alertWarningLevel: 'High' - - job: E2E_Test - timeoutInMinutes: 360 - # Creating strategies for GOOS: Windows Server 2019 /macOS X Mojave 10.15/Ubuntu 20.04 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-latest' - build_name: 'azcopy_linux_amd64' - display_name: "Linux" - Windows: - imageName: 'windows-latest' - build_name: 'azcopy_windows_amd64.exe' - display_name: "Windows" - type: 'windows' - MacOS: - imageName: 'macos-latest' - build_name: 'azcopy_darwin_amd64' - display_name: "MacOS" - pool: - vmImage: $(imageName) - - steps: - - task: PowerShell@2 - inputs: - targetType: 'inline' - script: 'Install-Module -Name Az.Accounts -Scope CurrentUser -Repository PSGallery -AllowClobber -Force' - pwsh: 'true' - displayName: 'Install Powershell Az Module' - - task: GoTool@0 - inputs: - version: $(AZCOPY_GOLANG_VERSION_COVERAGE) - - script: | - go install github.com/jstemmer/go-junit-report@v0.9.1 - go install github.com/axw/gocov/gocov@v1.1.0 - go install github.com/AlekSi/gocov-xml@v1.0.0 - go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b - displayName: 'Installing dependencies' - - bash: | - echo "##vso[task.setvariable variable=CGO_ENABLED]0" - displayName: 'Set CGO_ENABLED for Windows' - condition: eq(variables.type, 'windows') - - bash: | - npm install -g azurite - mkdir azurite - azurite --silent --location azurite --debug azurite\debug.log & - displayName: 'Install and Run Azurite' - # Running E2E Tests on AMD64 - - task: AzureCLI@2 - inputs: - azureSubscription: azcopytestworkloadidentity - addSpnToEnvironment: true - scriptType: pscore - scriptLocation: inlineScript - inlineScript: | - # Create coverage directory - if (-Not (Test-Path -Path "./coverage")) { - New-Item -Path "./coverage" -ItemType Directory - } - - # Print "Building executable" - Write-Output "Building executable" - - # Set platform-specific environment variables and tags - $tags = "" - $suffix = "" - $build_name = "" - $display_name = "" - if ($IsWindows) { - $env:GOOS = "windows" - $env:GOARCH = "amd64" - $suffix = ".exe" - $build_name = "azcopy_windows_amd64.exe" - $display_name = "Windows" - } elseif ($IsLinux) { - $env:GOOS = "linux" - $env:GOARCH = "amd64" - $tags = "netgo" - $build_name = "azcopy_linux_amd64" - $display_name = "Linux" - } elseif ($IsMacOS) { - $env:GOOS = "darwin" - $env:GOARCH = "amd64" - $env:CGO_ENABLED = "1" - $build_name = "azcopy_darwin_amd64" - $display_name = "MacOS" - } else { - Write-Error "Unsupported operating system" - exit 1 - } - - # Build the Go program - if ($tags -ne "") { - go build -cover -tags $tags -o $build_name - } else { - go build -cover -o $build_name - } - - # Print "Running tests" - Write-Output "Running tests" - - # Run tests and pipe output to test.txt - go test -timeout=2h -v -tags olde2etest ./e2etest | Tee-Object -FilePath test.txt - - # Save the exit code from the previous command - $exitCode = $LASTEXITCODE - - # Print the contents of test.txt - # Get-Content test.txt - - # Print "Generating junit report" - Write-Output "Generating junit report" - - # Pipe info in test.txt to go-junit-report and save output to report.xml - Get-Content test.txt | & "$(go env GOPATH)/bin/go-junit-report" > "${display_name}_report.xml" - - # Print "Formatting coverage directory to legacy txt format" - Write-Output "Formatting coverage directory to legacy txt format" - - # Format coverage data to text format - go tool covdata textfmt -i=coverage -o "${display_name}_coverage.txt" - - # Print "Formatting coverage to json format" - Write-Output "Formatting coverage to json format" - - # Convert coverage.txt to coverage.json - & "$(go env GOPATH)/bin/gocov$suffix" convert "${display_name}_coverage.txt" > "${display_name}_coverage.json" - - # Print "Formatting coverage to xml format" - Write-Output "Formatting coverage to xml format" - - # Convert coverage.json to coverage.xml - Get-Content "${display_name}_coverage.json" | & "$(go env GOPATH)/bin/gocov-xml$suffix" > "${display_name}_coverage.xml" - - # Return the exit code from step 5 - exit $exitCode - env: - AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) - AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS) - AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS) - AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME) - AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY) - AZCOPY_E2E_LOG_OUTPUT: '$(System.DefaultWorkingDirectory)/logs' - AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG) - AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG) - CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) - CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) - AZCOPY_E2E_EXECUTABLE_PATH: $(System.DefaultWorkingDirectory)/$(build_name) - GOCOVERDIR: '$(System.DefaultWorkingDirectory)/coverage' - NEW_E2E_SUBSCRIPTION_ID: $(AZCOPY_NEW_E2E_SUBSCRIPTION_ID) - NEW_E2E_AZCOPY_PATH: $(System.DefaultWorkingDirectory)/$(build_name) - NEW_E2E_ENVIRONMENT: "AzurePipeline" - displayName: 'E2E Test $(display_name) - AMD64 with Workload Identity' - - - task: PublishBuildArtifacts@1 - displayName: 'Publish logs' - condition: succeededOrFailed() - inputs: - pathToPublish: '$(System.DefaultWorkingDirectory)/logs' - artifactName: logs - - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testRunner: JUnit - testResultsFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_report.xml - testRunTitle: 'Go on $(display_name)' - - - task: PublishCodeCoverageResults@1 - condition: succeededOrFailed() - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.xml - additionalCodeCoverageFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.html - - - job: New_E2E_Framework - timeoutInMinutes: 360 - # Creating strategies for GOOS: Windows Server 2019 /macOS X Mojave 10.15/Ubuntu 20.04 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-latest' - build_name: 'azcopy_linux_amd64' - display_name: "Linux" - Windows: - imageName: 'windows-latest' - build_name: 'azcopy_windows_amd64.exe' - display_name: "Windows" - type: 'windows' - MacOS: - imageName: 'macos-latest' - build_name: 'azcopy_darwin_amd64' - display_name: "MacOS" - pool: - vmImage: $(imageName) - - steps: - - task: PowerShell@2 - inputs: - targetType: 'inline' - script: 'Install-Module -Name Az.Accounts -Scope CurrentUser -Repository PSGallery -AllowClobber -Force' - pwsh: 'true' - displayName: 'Install Powershell Az Module' - - task: GoTool@0 - inputs: - version: $(AZCOPY_GOLANG_VERSION_COVERAGE) - - script: | - go install github.com/jstemmer/go-junit-report@v0.9.1 - go install github.com/axw/gocov/gocov@v1.1.0 - go install github.com/AlekSi/gocov-xml@v1.0.0 - go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b - displayName: 'Installing dependencies' - - bash: | - echo "##vso[task.setvariable variable=CGO_ENABLED]0" - displayName: 'Set CGO_ENABLED for Windows' - condition: eq(variables.type, 'windows') - - bash: | - npm install -g azurite - mkdir azurite - azurite --silent --location azurite --debug azurite\debug.log & - displayName: 'Install and Run Azurite' - # Running E2E Tests on AMD64 - - task: AzureCLI@2 - inputs: - azureSubscription: azcopytestworkloadidentity - addSpnToEnvironment: true - scriptType: pscore - scriptLocation: inlineScript - inlineScript: | - # Create coverage directory - if (-Not (Test-Path -Path "./coverage")) { - New-Item -Path "./coverage" -ItemType Directory - } - - # Create log directory - if (-Not (Test-Path -Path "${env:AZCOPY_E2E_LOG_OUTPUT}")) { - New-Item -Path "${env:AZCOPY_E2E_LOG_OUTPUT}" -ItemType Directory - } - - # Print "Building executable" - Write-Output "Building executable" - - # Set platform-specific environment variables and tags - $tags = "" - $suffix = "" - $build_name = "" - $display_name = "" - if ($IsWindows) { - $env:GOOS = "windows" - $env:GOARCH = "amd64" - $suffix = ".exe" - $build_name = "azcopy_windows_amd64.exe" - $display_name = "Windows" - } elseif ($IsLinux) { - $env:GOOS = "linux" - $env:GOARCH = "amd64" - $tags = "netgo" - $build_name = "azcopy_linux_amd64" - $display_name = "Linux" - } elseif ($IsMacOS) { - $env:GOOS = "darwin" - $env:GOARCH = "amd64" - $env:CGO_ENABLED = "1" - $build_name = "azcopy_darwin_amd64" - $display_name = "MacOS" - } else { - Write-Error "Unsupported operating system" - exit 1 - } - - # Build the Go program - if ($tags -ne "") { - go build -cover -tags $tags -o $build_name - } else { - go build -cover -o $build_name - } - - # Print "Running tests" - Write-Output "Running tests" - - # Run tests and pipe output to test.txt - go test -timeout=2h -v -run "TestNewE2E/.*" ./e2etest | Tee-Object -FilePath test.txt - - # Save the exit code from the previous command - $exitCode = $LASTEXITCODE - - # Print the contents of test.txt - # Get-Content test.txt - - # Print "Generating junit report" - Write-Output "Generating junit report" - - # Pipe info in test.txt to go-junit-report and save output to report.xml - Get-Content test.txt | & "$(go env GOPATH)/bin/go-junit-report" > "${display_name}_report.xml" - - # Print "Formatting coverage directory to legacy txt format" - Write-Output "Formatting coverage directory to legacy txt format" - - # Format coverage data to text format - go tool covdata textfmt -i=coverage -o "${display_name}_coverage.txt" - - # Print "Formatting coverage to json format" - Write-Output "Formatting coverage to json format" - - # Convert coverage.txt to coverage.json - & "$(go env GOPATH)/bin/gocov$suffix" convert "${display_name}_coverage.txt" > "${display_name}_coverage.json" - - # Print "Formatting coverage to xml format" - Write-Output "Formatting coverage to xml format" - - # Convert coverage.json to coverage.xml - Get-Content "${display_name}_coverage.json" | & "$(go env GOPATH)/bin/gocov-xml$suffix" > "${display_name}_coverage.xml" - - # Return the exit code from step 5 - exit $exitCode - env: - AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) - AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS) - AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS) - AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME) - AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY) - AZCOPY_E2E_LOG_OUTPUT: '$(System.DefaultWorkingDirectory)/logs' - AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG) - AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG) - CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) - CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) - AZCOPY_E2E_EXECUTABLE_PATH: $(System.DefaultWorkingDirectory)/$(build_name) - GOCOVERDIR: '$(System.DefaultWorkingDirectory)/coverage' - NEW_E2E_SUBSCRIPTION_ID: $(AZCOPY_NEW_E2E_SUBSCRIPTION_ID) - NEW_E2E_AZCOPY_PATH: $(System.DefaultWorkingDirectory)/$(build_name) - NEW_E2E_ENVIRONMENT: "AzurePipeline" - displayName: 'E2E Test $(display_name) - AMD64 with Workload Identity' - - - task: PublishBuildArtifacts@1 - displayName: 'Publish logs' - condition: succeededOrFailed() - inputs: - pathToPublish: '$(System.DefaultWorkingDirectory)/logs' - artifactName: logs - - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testRunner: JUnit - testResultsFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_report.xml - testRunTitle: 'Go on $(display_name)' - - - task: PublishCodeCoverageResults@1 - condition: succeededOrFailed() - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.xml - additionalCodeCoverageFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.html + - template: azurePipelineTemplates/run-e2e.yml + parameters: + name: 'Old_E2E' + test_cli_param: '-tags olde2etest' + + - template: azurePipelineTemplates/run-e2e.yml + parameters: + name: 'New_E2E' + test_cli_param: '-run "TestNewE2E/.*"' - job: Test_On_Ubuntu variables: diff --git a/azurePipelineTemplates/run-e2e.yml b/azurePipelineTemplates/run-e2e.yml new file mode 100644 index 000000000..8d5d6ec2e --- /dev/null +++ b/azurePipelineTemplates/run-e2e.yml @@ -0,0 +1,188 @@ +parameters: + - name: name + type: string + - name: test_cli_param + type: string + +jobs: + - job: ${{ parameters.name }} + timeoutInMinutes: 360 + # Creating strategies for GOOS: Windows Server 2019 /macOS X Mojave 10.15/Ubuntu 20.04 + strategy: + matrix: + Ubuntu-20: + imageName: 'ubuntu-latest' + build_name: 'azcopy_linux_amd64' + display_name: "Linux" + Windows: + imageName: 'windows-latest' + build_name: 'azcopy_windows_amd64.exe' + display_name: "Windows" + type: 'windows' + MacOS: + imageName: 'macos-latest' + build_name: 'azcopy_darwin_amd64' + display_name: "MacOS" + pool: + vmImage: $(imageName) + + steps: + - task: PowerShell@2 + inputs: + targetType: 'inline' + script: 'Install-Module -Name Az.Accounts -Scope CurrentUser -Repository PSGallery -AllowClobber -Force' + pwsh: 'true' + displayName: 'Install Powershell Az Module' + - task: GoTool@0 + inputs: + version: $(AZCOPY_GOLANG_VERSION_COVERAGE) + - script: | + go install github.com/jstemmer/go-junit-report@v0.9.1 + go install github.com/axw/gocov/gocov@v1.1.0 + go install github.com/AlekSi/gocov-xml@v1.0.0 + go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b + displayName: 'Installing dependencies' + - bash: | + echo "##vso[task.setvariable variable=CGO_ENABLED]0" + displayName: 'Set CGO_ENABLED for Windows' + condition: eq(variables.type, 'windows') + - bash: | + npm install -g azurite + mkdir azurite + azurite --silent --location azurite --debug azurite\debug.log & + displayName: 'Install and Run Azurite' + # Running E2E Tests on AMD64 + - task: AzureCLI@2 + inputs: + azureSubscription: azcopytestworkloadidentity + addSpnToEnvironment: true + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + # Create coverage directory + if (-Not (Test-Path -Path "./coverage")) { + New-Item -Path "./coverage" -ItemType Directory + } + + # Create log directory + if (-Not (Test-Path -Path "${env:AZCOPY_E2E_LOG_OUTPUT}")) { + New-Item -Path "${env:AZCOPY_E2E_LOG_OUTPUT}" -ItemType Directory + } + + # Print "Building executable" + Write-Output "Building executable" + + # Set platform-specific environment variables and tags + $tags = "" + $suffix = "" + $build_name = "" + $display_name = "" + if ($IsWindows) { + $env:GOOS = "windows" + $env:GOARCH = "amd64" + $suffix = ".exe" + $build_name = "azcopy_windows_amd64.exe" + $display_name = "Windows" + } elseif ($IsLinux) { + $env:GOOS = "linux" + $env:GOARCH = "amd64" + $tags = "netgo" + $build_name = "azcopy_linux_amd64" + $display_name = "Linux" + } elseif ($IsMacOS) { + $env:GOOS = "darwin" + $env:GOARCH = "amd64" + $env:CGO_ENABLED = "1" + $build_name = "azcopy_darwin_amd64" + $display_name = "MacOS" + } else { + Write-Error "Unsupported operating system" + exit 1 + } + + # Build the Go program + if ($tags -ne "") { + go build -cover -tags $tags -o $build_name + } else { + go build -cover -o $build_name + } + + # Print "Running tests" + Write-Output "Running tests" + + # Run tests and pipe output to test.txt + go test -timeout=2h -v ${{ parameters.test_cli_param }} ./e2etest | Tee-Object -FilePath test.txt + + # Save the exit code from the previous command + $exitCode = $LASTEXITCODE + + # Print the contents of test.txt + # Get-Content test.txt + + # Print "Generating junit report" + Write-Output "Generating junit report" + + # Pipe info in test.txt to go-junit-report and save output to report.xml + Get-Content test.txt | & "$(go env GOPATH)/bin/go-junit-report" > "${display_name}_report.xml" + + # Print "Formatting coverage directory to legacy txt format" + Write-Output "Formatting coverage directory to legacy txt format" + + # Format coverage data to text format + go tool covdata textfmt -i=coverage -o "${display_name}_coverage.txt" + + # Print "Formatting coverage to json format" + Write-Output "Formatting coverage to json format" + + # Convert coverage.txt to coverage.json + & "$(go env GOPATH)/bin/gocov$suffix" convert "${display_name}_coverage.txt" > "${display_name}_coverage.json" + + # Print "Formatting coverage to xml format" + Write-Output "Formatting coverage to xml format" + + # Convert coverage.json to coverage.xml + Get-Content "${display_name}_coverage.json" | & "$(go env GOPATH)/bin/gocov-xml$suffix" > "${display_name}_coverage.xml" + + # Return the exit code from step 5 + exit $exitCode + env: + AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) + AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) + AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS) + AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS) + AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME) + AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY) + AZCOPY_E2E_LOG_OUTPUT: '$(System.DefaultWorkingDirectory)/logs' + AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG) + AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_SNAPSHOT_CONFIG) + AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) + AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_SNAPSHOT_CONFIG) + CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) + CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) + AZCOPY_E2E_EXECUTABLE_PATH: $(System.DefaultWorkingDirectory)/$(build_name) + GOCOVERDIR: '$(System.DefaultWorkingDirectory)/coverage' + NEW_E2E_SUBSCRIPTION_ID: $(AZCOPY_NEW_E2E_SUBSCRIPTION_ID) + NEW_E2E_AZCOPY_PATH: $(System.DefaultWorkingDirectory)/$(build_name) + NEW_E2E_ENVIRONMENT: "AzurePipeline" + displayName: 'E2E Test $(display_name) - AMD64 with Workload Identity' + + - task: PublishBuildArtifacts@1 + displayName: 'Publish logs' + condition: succeededOrFailed() + inputs: + pathToPublish: '$(System.DefaultWorkingDirectory)/logs' + artifactName: logs + + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testRunner: JUnit + testResultsFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_report.xml + testRunTitle: 'Go on $(display_name)' + + - task: PublishCodeCoverageResults@1 + condition: succeededOrFailed() + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.xml + additionalCodeCoverageFiles: $(System.DefaultWorkingDirectory)/**/$(display_name)_coverage.html \ No newline at end of file diff --git a/cmd/copy.go b/cmd/copy.go index 54c53e72e..c7a87d54d 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -1831,7 +1831,7 @@ func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot if jobDone { exitCode := cca.getSuccessExitCode() - if summary.TransfersFailed > 0 { + if summary.TransfersFailed > 0 || summary.JobStatus == common.EJobStatus.Cancelled() || summary.JobStatus == common.EJobStatus.Cancelling() { exitCode = common.EExitCode.Error() } @@ -2120,7 +2120,7 @@ func init() { cpCmd.PersistentFlags().BoolVar(&raw.s2sSourceChangeValidation, "s2s-detect-source-changed", false, "False by default. Detect if the source file/blob changes while it is being read. This parameter only applies to service to service copies, because the corresponding check is permanently enabled for uploads and downloads.") cpCmd.PersistentFlags().StringVar(&raw.s2sInvalidMetadataHandleOption, "s2s-handle-invalid-metadata", common.DefaultInvalidMetadataHandleOption.String(), "Specifies how invalid metadata keys are handled. Available options: ExcludeIfInvalid, FailIfInvalid, RenameIfInvalid (default 'ExcludeIfInvalid').") cpCmd.PersistentFlags().StringVar(&raw.listOfVersionIDs, "list-of-versions", "", "Specifies a path to a text file where each version id is listed on a separate line. Ensure that the source must point to a single blob and all the version ids specified in the file using this flag must belong to the source blob only. AzCopy will download the specified versions in the destination folder provided.") - cpCmd.PersistentFlags().StringVar(&raw.blobTags, "blob-tags", "", "Set tags on blobs to categorize data in your storage account. Multiple blob tags should be separated by ';', i.e. 'foo=bar;some=thing'.") + cpCmd.PersistentFlags().StringVar(&raw.blobTags, "blob-tags", "", "Set tags on blobs to categorize data in your storage account. Multiple blob tags should be separated by '&', i.e. 'foo=bar&some=thing'.") cpCmd.PersistentFlags().BoolVar(&raw.s2sPreserveBlobTags, "s2s-preserve-blob-tags", false, "False by default. Preserve blob tags during service to service transfer from one blob storage to another.") cpCmd.PersistentFlags().BoolVar(&raw.includeDirectoryStubs, "include-directory-stub", false, "False by default to ignore directory stubs. Directory stubs are blobs with metadata 'hdi_isfolder:true'. Setting value to true will preserve directory stubs during transfers. Including this flag with no value defaults to true (e.g, azcopy copy --include-directory-stub is the same as azcopy copy --include-directory-stub=true).") cpCmd.PersistentFlags().BoolVar(&raw.disableAutoDecoding, "disable-auto-decoding", false, "False by default to enable automatic decoding of illegal chars on Windows. Can be set to true to disable automatic decoding.") diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index 4e05212e1..73c537a34 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -279,39 +279,24 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde if cca.dryrunMode && shouldSendToSte { glcm.Dryrun(func(format common.OutputFormat) string { - if format == common.EOutputFormat.Json() { - jsonOutput, err := json.Marshal(transfer) - common.PanicIfErr(err) - return string(jsonOutput) - } else { - if cca.FromTo.From() == common.ELocation.Local() { - // formatting from local source - dryrunValue := fmt.Sprintf("DRYRUN: copy %v", common.ToShortPath(cca.Source.Value)) - if runtime.GOOS == "windows" { - dryrunValue += strings.ReplaceAll(srcRelPath, "/", "\\") - } else { // linux and mac - dryrunValue += srcRelPath - } - dryrunValue += fmt.Sprintf(" to %v%v", strings.Trim(cca.Destination.Value, "/"), dstRelPath) - return dryrunValue - } else if cca.FromTo.To() == common.ELocation.Local() { - // formatting to local source - dryrunValue := fmt.Sprintf("DRYRUN: copy %v%v to %v", - strings.Trim(cca.Source.Value, "/"), srcRelPath, - common.ToShortPath(cca.Destination.Value)) - if runtime.GOOS == "windows" { - dryrunValue += strings.ReplaceAll(dstRelPath, "/", "\\") - } else { // linux and mac - dryrunValue += dstRelPath - } - return dryrunValue - } else { - return fmt.Sprintf("DRYRUN: copy %v%v to %v%v", - cca.Source.Value, - srcRelPath, - cca.Destination.Value, - dstRelPath) + src := common.GenerateFullPath(cca.Source.Value, srcRelPath) + dst := common.GenerateFullPath(cca.Destination.Value, dstRelPath) + + switch format { + case common.EOutputFormat.Json(): + tx := DryrunTransfer{ + EntityType: transfer.EntityType, + BlobType: common.FromBlobType(transfer.BlobType), + FromTo: cca.FromTo, + Source: src, + Destination: dst, } + + buf, _ := json.Marshal(tx) + return string(buf) + default: + return fmt.Sprintf("DRYRUN: copy %v to %v", + src, dst) } }) return nil @@ -585,6 +570,10 @@ func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool return "" // ignore path encode rules } + if object.relativePath == "\x00" { // Short circuit, our relative path is requesting root/ + return "\x00" + } + // source is a EXACT path to the file if object.isSingleSourceFile() { // If we're finding an object from the source, it returns "" if it's already got it. diff --git a/cmd/helpMessages.go b/cmd/helpMessages.go index 7b2986fa6..3c0bb203c 100644 --- a/cmd/helpMessages.go +++ b/cmd/helpMessages.go @@ -21,15 +21,14 @@ const copyCmdShortDescription = "Copies source data to a destination location" const copyCmdLongDescription = ` Copies source data to a destination location. The supported directions are: - local <-> Azure Blob (SAS or OAuth authentication) - - local <-> Azure Files (Share/directory SAS authentication) + - local <-> Azure Files (Share/directory SAS or OAuth authentication) - local <-> ADLS Gen 2 (SAS, OAuth, or SharedKey authentication) - - Azure Blob (SAS or public) -> Azure Blob (SAS or OAuth authentication) - - ADLS Gen 2 (SAS or public) -> ADLS Gen 2 (SAS or OAuth authentication) + - Azure Blob (SAS, OAuth or public authentication) -> Azure Blob (SAS or OAuth authentication) - ADLS Gen2 (SAS or OAuth authentication) <-> ADLS Gen2 (SAS or OAuth authentication) - ADLS Gen2 (SAS or OAuth authentication) <-> Azure Blob (SAS or OAuth authentication) - - Azure Blob (SAS or public) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Files (SAS) - - Azure Files (SAS) -> Azure Blob (SAS or OAuth authentication) + - Azure Blob (SAS, OAuth or public) -> Azure Files (SAS or OAuth authentication) + - Azure Files (SAS or OAuth authentication) -> Azure Files (SAS or OAuth authentication) + - Azure Files (SAS or OAuth authentication) -> Azure Blob (SAS or OAuth authentication) - AWS S3 (Access Key) -> Azure Block Blob (SAS or OAuth authentication) - Google Cloud Storage (Service Account Key) -> Azure Block Blob (SAS or OAuth authentication) diff --git a/cmd/list.go b/cmd/list.go index 418d6013c..2cd7492c7 100755 --- a/cmd/list.go +++ b/cmd/list.go @@ -481,7 +481,7 @@ func getPath(containerName, relativePath string, level LocationLevel, entityType builder.WriteString(containerName + "/") } builder.WriteString(relativePath) - if entityType == common.EEntityType.Folder() { + if entityType == common.EEntityType.Folder() && !strings.HasSuffix(relativePath, "/") { builder.WriteString("/") } return builder.String() diff --git a/cmd/removeEnumerator.go b/cmd/removeEnumerator.go index 7ca0f70c3..e7ce7213a 100755 --- a/cmd/removeEnumerator.go +++ b/cmd/removeEnumerator.go @@ -22,6 +22,7 @@ package cmd import ( "context" + "encoding/json" "errors" "fmt" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake" @@ -229,8 +230,22 @@ func removeBfsResources(cca *CookedCopyCmdArgs) (err error) { func dryrunRemoveSingleDFSResource(ctx context.Context, dsc *service.Client, datalakeURLParts azdatalake.URLParts, recursive bool) error { //deleting a filesystem if datalakeURLParts.PathName == "" { - glcm.Dryrun(func(_ common.OutputFormat) string { - return fmt.Sprintf("DRYRUN: remove filesystem %s", datalakeURLParts.FileSystemName) + glcm.Dryrun(func(of common.OutputFormat) string { + switch of { + case of.Text(): + return fmt.Sprintf("DRYRUN: remove %s", dsc.NewFileSystemClient(datalakeURLParts.FileSystemName).DFSURL()) + case of.Json(): + tx := DryrunTransfer{ + EntityType: common.EEntityType.Folder(), + FromTo: common.EFromTo.BlobFSTrash(), + Source: dsc.NewFileSystemClient(datalakeURLParts.FileSystemName).DFSURL(), + } + + buf, _ := json.Marshal(tx) + return string(buf) + default: + panic("unsupported output format " + of.String()) + } }) return nil } @@ -246,8 +261,22 @@ func dryrunRemoveSingleDFSResource(ctx context.Context, dsc *service.Client, dat // then we should short-circuit and simply remove that file resourceType := common.IffNotNil(props.ResourceType, "") if strings.EqualFold(resourceType, "file") { - glcm.Dryrun(func(_ common.OutputFormat) string { - return fmt.Sprintf("DRYRUN: remove file %s", datalakeURLParts.PathName) + glcm.Dryrun(func(of common.OutputFormat) string { + switch of { + case of.Text(): + return fmt.Sprintf("DRYRUN: remove %s", directoryClient.DFSURL()) + case of.Json(): + tx := DryrunTransfer{ + EntityType: common.EEntityType.File(), + FromTo: common.EFromTo.BlobFSTrash(), + Source: directoryClient.DFSURL(), + } + + buf, _ := json.Marshal(tx) + return string(buf) + default: + panic("unsupported output format " + of.String()) + } }) return nil } @@ -267,8 +296,24 @@ func dryrunRemoveSingleDFSResource(ctx context.Context, dsc *service.Client, dat entityType = "file" } - glcm.Dryrun(func(_ common.OutputFormat) string { - return fmt.Sprintf("DRYRUN: remove %s %s", entityType, *v.Name) + glcm.Dryrun(func(of common.OutputFormat) string { + uri := dsc.NewFileSystemClient(datalakeURLParts.FileSystemName).NewFileClient(*v.Name).DFSURL() + + switch of { + case of.Text(): + return fmt.Sprintf("DRYRUN: remove %s", uri) + case of.Json(): + tx := DryrunTransfer{ + EntityType: common.Iff(entityType == "directory", common.EEntityType.Folder(), common.EEntityType.File()), + FromTo: common.EFromTo.BlobFSTrash(), + Source: uri, + } + + buf, _ := json.Marshal(tx) + return string(buf) + default: + panic("unsupported output format " + of.String()) + } }) } } diff --git a/cmd/sync.go b/cmd/sync.go index 1ab15a5c7..1a88d8d27 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -60,6 +60,7 @@ type rawSyncCmdArgs struct { compareHash string localHashStorageMode string + includeDirectoryStubs bool // Includes hdi_isfolder objects in the sync even w/o preservePermissions. preservePermissions bool preserveSMBPermissions bool // deprecated and synonymous with preservePermissions preserveOwner bool @@ -163,7 +164,17 @@ func (raw *rawSyncCmdArgs) cook() (cookedSyncCmdArgs, error) { jobsAdmin.JobsAdmin.LogToJobLog(LocalToFileShareWarnMsg, common.LogWarning) } if raw.dryrun { - glcm.Dryrun(func(_ common.OutputFormat) string { + glcm.Dryrun(func(of common.OutputFormat) string { + if of == common.EOutputFormat.Json() { + var out struct { + Warn string `json:"warn"` + } + + out.Warn = LocalToFileShareWarnMsg + buf, _ := json.Marshal(out) + return string(buf) + } + return fmt.Sprintf("DRYRUN: warn %s", LocalToFileShareWarnMsg) }) } @@ -368,6 +379,8 @@ func (raw *rawSyncCmdArgs) cook() (cookedSyncCmdArgs, error) { cooked.deleteDestinationFileIfNecessary = raw.deleteDestinationFileIfNecessary + cooked.includeDirectoryStubs = raw.includeDirectoryStubs + return cooked, nil } @@ -417,6 +430,7 @@ type cookedSyncCmdArgs struct { putBlobSize int64 forceIfReadOnly bool backupMode bool + includeDirectoryStubs bool // commandString hold the user given command which is logged to the Job log file commandString string @@ -621,7 +635,7 @@ func (cca *cookedSyncCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot if jobDone { exitCode := common.EExitCode.Success() - if summary.TransfersFailed > 0 { + if summary.TransfersFailed > 0 || summary.JobStatus == common.EJobStatus.Cancelled() || summary.JobStatus == common.EJobStatus.Cancelling() { exitCode = common.EExitCode.Error() } @@ -789,6 +803,7 @@ func init() { rootCmd.AddCommand(syncCmd) syncCmd.PersistentFlags().BoolVar(&raw.recursive, "recursive", true, "True by default, look into sub-directories recursively when syncing between directories. (default true).") syncCmd.PersistentFlags().StringVar(&raw.fromTo, "from-to", "", "Optionally specifies the source destination combination. For Example: LocalBlob, BlobLocal, LocalFile, FileLocal, BlobFile, FileBlob, etc.") + syncCmd.PersistentFlags().BoolVar(&raw.includeDirectoryStubs, "include-directory-stub", false, "False by default, includes blobs with the hdi_isfolder metadata in the transfer.") // TODO: enable for copy with IfSourceNewer // smb info/permissions can be persisted in the scenario of File -> File diff --git a/cmd/syncComparator.go b/cmd/syncComparator.go index 64e76b6b7..b99bfe733 100644 --- a/cmd/syncComparator.go +++ b/cmd/syncComparator.go @@ -65,13 +65,12 @@ type syncDestinationComparator struct { comparisonHashType common.SyncHashType - preferSMBTime bool - disableComparison bool - deleteDestinationFileSync bool + preferSMBTime bool + disableComparison bool } -func newSyncDestinationComparator(i *objectIndexer, copyScheduler, cleaner objectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool, deleteDestinationFile bool) *syncDestinationComparator { - return &syncDestinationComparator{sourceIndex: i, copyTransferScheduler: copyScheduler, destinationCleaner: cleaner, preferSMBTime: preferSMBTime, disableComparison: disableComparison, comparisonHashType: comparisonHashType, deleteDestinationFileSync: deleteDestinationFile} +func newSyncDestinationComparator(i *objectIndexer, copyScheduler, cleaner objectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool) *syncDestinationComparator { + return &syncDestinationComparator{sourceIndex: i, copyTransferScheduler: copyScheduler, destinationCleaner: cleaner, preferSMBTime: preferSMBTime, disableComparison: disableComparison, comparisonHashType: comparisonHashType} } // it will only schedule transfers for destination objects that are present in the indexer but stale compared to the entry in the map @@ -90,11 +89,6 @@ func (f *syncDestinationComparator) processIfNecessary(destinationObject StoredO if present { defer delete(f.sourceIndex.indexMap, destinationObject.relativePath) - if f.deleteDestinationFileSync { // when delete-destination-file flag is turned on via sync command, we want to overwrite the file at destination - syncComparatorLog(sourceObjectInMap.relativePath, syncStatusOverwritten, syncOverwriteReasonDeleteDestinationFile, false) - return f.copyTransferScheduler(sourceObjectInMap) - } - if f.disableComparison { syncComparatorLog(sourceObjectInMap.relativePath, syncStatusOverwritten, syncOverwriteReasonNewerHash, false) return f.copyTransferScheduler(sourceObjectInMap) @@ -148,13 +142,12 @@ type syncSourceComparator struct { comparisonHashType common.SyncHashType - preferSMBTime bool - disableComparison bool - deleteDestinationFileSync bool + preferSMBTime bool + disableComparison bool } -func newSyncSourceComparator(i *objectIndexer, copyScheduler objectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool, deleteDestNew bool) *syncSourceComparator { - return &syncSourceComparator{destinationIndex: i, copyTransferScheduler: copyScheduler, preferSMBTime: preferSMBTime, disableComparison: disableComparison, comparisonHashType: comparisonHashType, deleteDestinationFileSync: deleteDestNew} +func newSyncSourceComparator(i *objectIndexer, copyScheduler objectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool) *syncSourceComparator { + return &syncSourceComparator{destinationIndex: i, copyTransferScheduler: copyScheduler, preferSMBTime: preferSMBTime, disableComparison: disableComparison, comparisonHashType: comparisonHashType} } // it will only transfer source items that are: @@ -174,10 +167,6 @@ func (f *syncSourceComparator) processIfNecessary(sourceObject StoredObject) err if present { defer delete(f.destinationIndex.indexMap, relPath) - if f.deleteDestinationFileSync { // when delete-destination-file flag is turned on via sync command, we want to overwrite the file at destination - syncComparatorLog(sourceObject.relativePath, syncStatusOverwritten, syncOverwriteReasonDeleteDestinationFile, false) - return f.copyTransferScheduler(sourceObject) - } // if destination is stale, schedule source for transfer if f.disableComparison { syncComparatorLog(sourceObject.relativePath, syncStatusOverwritten, syncOverwriteReasonNewerHash, false) diff --git a/cmd/syncEnumerator.go b/cmd/syncEnumerator.go index 8000d5850..71ae2e8bf 100644 --- a/cmd/syncEnumerator.go +++ b/cmd/syncEnumerator.go @@ -55,7 +55,7 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s } } - includeDirStubs := cca.fromTo.From().SupportsHnsACLs() && cca.fromTo.To().SupportsHnsACLs() && cca.preservePermissions.IsTruthy() + includeDirStubs := (cca.fromTo.From().SupportsHnsACLs() && cca.fromTo.To().SupportsHnsACLs() && cca.preservePermissions.IsTruthy()) || cca.includeDirectoryStubs // TODO: enable symlink support in a future release after evaluating the implications // TODO: Consider passing an errorChannel so that enumeration errors during sync can be conveyed to the caller. @@ -129,7 +129,7 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s } // decide our folder transfer strategy - fpo, folderMessage := NewFolderPropertyOption(cca.fromTo, cca.recursive, true, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, strings.EqualFold(cca.destination.Value, common.Dev_Null), false) // sync always acts like stripTopDir=true + fpo, folderMessage := NewFolderPropertyOption(cca.fromTo, cca.recursive, true, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, strings.EqualFold(cca.destination.Value, common.Dev_Null), cca.includeDirectoryStubs) // sync always acts like stripTopDir=true if !cca.dryrunMode { glcm.Info(folderMessage) } @@ -246,7 +246,7 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s // we ALREADY have available a complete map of everything that exists locally // so as soon as we see a remote destination object we can know whether it exists in the local source - comparator = newSyncDestinationComparator(indexer, transferScheduler.scheduleCopyTransfer, destCleanerFunc, cca.compareHash, cca.preserveSMBInfo, cca.mirrorMode, cca.deleteDestinationFileIfNecessary).processIfNecessary + comparator = newSyncDestinationComparator(indexer, transferScheduler.scheduleCopyTransfer, destCleanerFunc, cca.compareHash, cca.preserveSMBInfo, cca.mirrorMode).processIfNecessary finalize = func() error { // schedule every local file that doesn't exist at the destination err = indexer.traverse(transferScheduler.scheduleCopyTransfer, filters) @@ -270,7 +270,7 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s indexer.isDestinationCaseInsensitive = IsDestinationCaseInsensitive(cca.fromTo) // in all other cases (download and S2S), the destination is scanned/indexed first // then the source is scanned and filtered based on what the destination contains - comparator = newSyncSourceComparator(indexer, transferScheduler.scheduleCopyTransfer, cca.compareHash, cca.preserveSMBInfo, cca.mirrorMode, cca.deleteDestinationFileIfNecessary).processIfNecessary + comparator = newSyncSourceComparator(indexer, transferScheduler.scheduleCopyTransfer, cca.compareHash, cca.preserveSMBInfo, cca.mirrorMode).processIfNecessary finalize = func() error { // remove the extra files at the destination that were not present at the source diff --git a/cmd/syncProcessor.go b/cmd/syncProcessor.go index 2603157ec..22599937b 100644 --- a/cmd/syncProcessor.go +++ b/cmd/syncProcessor.go @@ -27,7 +27,6 @@ import ( "net/url" "os" "path" - "runtime" "strings" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" @@ -75,24 +74,6 @@ type interactiveDeleteProcessor struct { dryrunMode bool } -func newDeleteTransfer(object StoredObject) (newDeleteTransfer common.CopyTransfer) { - return common.CopyTransfer{ - Source: object.relativePath, - EntityType: object.entityType, - LastModifiedTime: object.lastModifiedTime, - SourceSize: object.size, - ContentType: object.contentType, - ContentEncoding: object.contentEncoding, - ContentDisposition: object.contentDisposition, - ContentLanguage: object.contentLanguage, - CacheControl: object.cacheControl, - Metadata: object.Metadata, - BlobType: object.blobType, - BlobVersionID: object.blobVersionID, - BlobTags: object.blobTags, - } -} - func (d *interactiveDeleteProcessor) removeImmediately(object StoredObject) (err error) { if d.shouldPromptUser { d.shouldDelete, d.shouldPromptUser = d.promptForConfirmation(object) // note down the user's decision @@ -105,22 +86,24 @@ func (d *interactiveDeleteProcessor) removeImmediately(object StoredObject) (err if d.dryrunMode { glcm.Dryrun(func(format common.OutputFormat) string { if format == common.EOutputFormat.Json() { - jsonOutput, err := json.Marshal(newDeleteTransfer(object)) + deleteTarget := common.ELocation.Local() + if d.objectTypeToDisplay != LocalFileObjectType { + _ = deleteTarget.Parse(d.objectTypeToDisplay) + } + + tx := DryrunTransfer{ + Source: common.GenerateFullPath(d.objectLocationToDisplay, object.relativePath), + BlobType: common.FromBlobType(object.blobType), + EntityType: object.entityType, + FromTo: common.FromToValue(deleteTarget, common.ELocation.Unknown()), + } + + jsonOutput, err := json.Marshal(tx) common.PanicIfErr(err) return string(jsonOutput) } else { // remove for sync - if d.objectTypeToDisplay == "local file" { // removing from local src - dryrunValue := fmt.Sprintf("DRYRUN: remove %v", common.ToShortPath(d.objectLocationToDisplay)) - if runtime.GOOS == "windows" { - dryrunValue += "\\" + strings.ReplaceAll(object.relativePath, "/", "\\") - } else { // linux and mac - dryrunValue += "/" + object.relativePath - } - return dryrunValue - } - return fmt.Sprintf("DRYRUN: remove %v/%v", - d.objectLocationToDisplay, - object.relativePath) + return fmt.Sprintf("DRYRUN: remove %v", + common.GenerateFullPath(d.objectLocationToDisplay, object.relativePath)) } }) return nil @@ -189,9 +172,11 @@ func newInteractiveDeleteProcessor(deleter objectProcessor, deleteDestination co } } +const LocalFileObjectType = "local file" + func newSyncLocalDeleteProcessor(cca *cookedSyncCmdArgs, fpo common.FolderPropertyOption) *interactiveDeleteProcessor { localDeleter := localFileDeleter{rootPath: cca.destination.ValueLocal(), fpo: fpo, folderManager: common.NewFolderDeletionManager(context.Background(), fpo, azcopyScanningLogger)} - return newInteractiveDeleteProcessor(localDeleter.deleteFile, cca.deleteDestination, "local file", cca.destination, cca.incrementDeletionCount, cca.dryrunMode) + return newInteractiveDeleteProcessor(localDeleter.deleteFile, cca.deleteDestination, LocalFileObjectType, cca.destination, cca.incrementDeletionCount, cca.dryrunMode) } type localFileDeleter struct { @@ -293,6 +278,18 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { b.clientOptions.PerCallPolicies = append([]policy.Policy{common.NewRecursivePolicy()}, b.clientOptions.PerCallPolicies...) } */ + objectPath := path.Join(b.rootPath, object.relativePath) + if object.relativePath == "\x00" && b.targetLocation != common.ELocation.Blob() { + return nil // Do nothing, we don't want to accidentally delete the root. + } else if object.relativePath == "\x00" { // this is acceptable on blob, though. Dir stubs are a thing, and they aren't necessary for normal function. + objectPath = b.rootPath + } + + if strings.HasSuffix(object.relativePath, "/") && !strings.HasSuffix(objectPath, "/") && b.targetLocation == common.ELocation.Blob() { + // If we were targeting a directory, we still need to be. path.join breaks that. + // We also want to defensively code around this, and make sure we are not putting folder// or trying to put a weird URI in to an endpoint that can't do this. + objectPath += "/" + } sc := b.remoteClient if object.entityType == common.EEntityType.File() { @@ -309,7 +306,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { switch b.targetLocation { case common.ELocation.Blob(): bsc, _ := sc.BlobServiceClient() - var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath)) + var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(objectPath) objURL, err = b.getObjectURL(blobClient.URL()) if err != nil { @@ -321,7 +318,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { _, err = blobClient.Delete(b.ctx, nil) case common.ELocation.File(): fsc, _ := sc.FileServiceClient() - fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(path.Join(b.rootPath, object.relativePath)) + fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(objectPath) objURL, err = b.getObjectURL(fileClient.URL()) if err != nil { @@ -335,7 +332,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { }, fileClient, b.forceIfReadOnly) case common.ELocation.BlobFS(): dsc, _ := sc.DatalakeServiceClient() - fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(path.Join(b.rootPath, object.relativePath)) + fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(objectPath) objURL, err = b.getObjectURL(fileClient.DFSURL()) if err != nil { @@ -371,7 +368,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { switch b.targetLocation { case common.ELocation.Blob(): bsc, _ := sc.BlobServiceClient() - blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath)) + blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(objectPath) // HNS endpoint doesn't like delete snapshots on a directory objURL, err = b.getObjectURL(blobClient.URL()) if err != nil { @@ -384,7 +381,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { } case common.ELocation.File(): fsc, _ := sc.FileServiceClient() - dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath)) + dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(objectPath) objURL, err = b.getObjectURL(dirClient.URL()) if err != nil { return err @@ -398,7 +395,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { } case common.ELocation.BlobFS(): dsc, _ := sc.DatalakeServiceClient() - directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath)) + directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(objectPath) objURL, err = b.getObjectURL(directoryClient.DFSURL()) if err != nil { return err diff --git a/cmd/zc_processor.go b/cmd/zc_processor.go index 7497e3d92..4d9ae1fda 100644 --- a/cmd/zc_processor.go +++ b/cmd/zc_processor.go @@ -25,7 +25,6 @@ import ( "fmt" "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" "net/url" - "runtime" "strings" "github.com/pkg/errors" @@ -64,18 +63,85 @@ func newCopyTransferProcessor(copyJobTemplate *common.CopyJobPartOrderRequest, n } } +type DryrunTransfer struct { + EntityType common.EntityType + BlobType common.BlobType + FromTo common.FromTo + Source string + Destination string +} + +func (d *DryrunTransfer) UnmarshalJSON(bytes []byte) error { + var surrogate struct { + EntityType string + BlobType string + FromTo string + Source string + Destination string + } + + err := json.Unmarshal(bytes, &surrogate) + if err != nil { + return fmt.Errorf("failed to parse dryrun transfer: %w", err) + } + + err = d.FromTo.Parse(surrogate.FromTo) + if err != nil { + return fmt.Errorf("failed to parse fromto: %w", err) + } + + err = d.EntityType.Parse(surrogate.EntityType) + if err != nil { + return fmt.Errorf("failed to parse entity type: %w", err) + } + + err = d.BlobType.Parse(surrogate.BlobType) + if err != nil { + return fmt.Errorf("failed to parse entity type: %w", err) + } + + d.Source = surrogate.Source + d.Destination = surrogate.Destination + + return nil +} + +func (d DryrunTransfer) MarshalJSON() ([]byte, error) { + surrogate := struct { + EntityType string + BlobType string + FromTo string + Source string + Destination string + }{ + d.EntityType.String(), + d.BlobType.String(), + d.FromTo.String(), + d.Source, + d.Destination, + } + + return json.Marshal(surrogate) +} + func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) (err error) { // Escape paths on destinations where the characters are invalid // And re-encode them where the characters are valid. - srcRelativePath := pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, true) - dstRelativePath := pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, false) - if srcRelativePath != "" { - srcRelativePath = "/" + srcRelativePath - } - if dstRelativePath != "" { - dstRelativePath = "/" + dstRelativePath + var srcRelativePath, dstRelativePath string + if storedObject.relativePath == "\x00" { // Short circuit when we're talking about root/, because the STE is funky about this. + srcRelativePath, dstRelativePath = storedObject.relativePath, storedObject.relativePath + } else { + srcRelativePath = pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, true) + dstRelativePath = pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, false) + if srcRelativePath != "" { + srcRelativePath = "/" + srcRelativePath + } + if dstRelativePath != "" { + dstRelativePath = "/" + dstRelativePath + } } + copyTransfer, shouldSendToSte := storedObject.ToNewCopyTransfer(false, srcRelativePath, dstRelativePath, s.preserveAccessTier, s.folderPropertiesOption, s.symlinkHandlingType) if s.copyJobTemplate.FromTo.To() == common.ELocation.None() { @@ -100,55 +166,51 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) if s.dryrunMode { glcm.Dryrun(func(format common.OutputFormat) string { + prettySrcRelativePath, prettyDstRelativePath := srcRelativePath, dstRelativePath + + fromTo := s.copyJobTemplate.FromTo + if fromTo.From().IsRemote() { + prettySrcRelativePath, err = url.PathUnescape(prettySrcRelativePath) + if err != nil { + prettySrcRelativePath = srcRelativePath // Fall back, because it's better than failing. + } + } + + if fromTo.To().IsRemote() { + prettyDstRelativePath, err = url.PathUnescape(prettyDstRelativePath) + if err != nil { + prettyDstRelativePath = dstRelativePath // Fall back, because it's better than failing. + } + } + if format == common.EOutputFormat.Json() { - jsonOutput, err := json.Marshal(copyTransfer) + tx := DryrunTransfer{ + BlobType: common.FromBlobType(storedObject.blobType), + EntityType: storedObject.entityType, + FromTo: s.copyJobTemplate.FromTo, + Source: common.GenerateFullPath(s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath), + } + + if fromTo.To() != common.ELocation.None() && fromTo.To() != common.ELocation.Unknown() { + tx.Destination = common.GenerateFullPath(s.copyJobTemplate.DestinationRoot.Value, prettyDstRelativePath) + } + + jsonOutput, err := json.Marshal(tx) common.PanicIfErr(err) return string(jsonOutput) } else { - prettySrcRelativePath, err := url.QueryUnescape(srcRelativePath) - common.PanicIfErr(err) - prettyDstRelativePath, err := url.QueryUnescape(dstRelativePath) - common.PanicIfErr(err) - // if remove then To() will equal to common.ELocation.Unknown() if s.copyJobTemplate.FromTo.To() == common.ELocation.Unknown() { // remove - return fmt.Sprintf("DRYRUN: remove %v/%v", - s.copyJobTemplate.SourceRoot.Value, - prettySrcRelativePath) + return fmt.Sprintf("DRYRUN: remove %v", + common.GenerateFullPath(s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath)) } if s.copyJobTemplate.FromTo.To() == common.ELocation.None() { // set-properties - return fmt.Sprintf("DRYRUN: set-properties %v/%v", - s.copyJobTemplate.SourceRoot.Value, - prettySrcRelativePath) + return fmt.Sprintf("DRYRUN: set-properties %v", + common.GenerateFullPath(s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath)) } else { // copy for sync - if s.copyJobTemplate.FromTo.From() == common.ELocation.Local() { - // formatting from local source - dryrunValue := fmt.Sprintf("DRYRUN: copy %v", common.ToShortPath(s.copyJobTemplate.SourceRoot.Value)) - if runtime.GOOS == "windows" { - dryrunValue += "\\" + strings.ReplaceAll(prettySrcRelativePath, "/", "\\") - } else { // linux and mac - dryrunValue += "/" + prettySrcRelativePath - } - dryrunValue += fmt.Sprintf(" to %v/%v", strings.Trim(s.copyJobTemplate.DestinationRoot.Value, "/"), prettyDstRelativePath) - return dryrunValue - } else if s.copyJobTemplate.FromTo.To() == common.ELocation.Local() { - // formatting to local source - dryrunValue := fmt.Sprintf("DRYRUN: copy %v/%v to %v", - strings.Trim(s.copyJobTemplate.SourceRoot.Value, "/"), prettySrcRelativePath, - common.ToShortPath(s.copyJobTemplate.DestinationRoot.Value)) - if runtime.GOOS == "windows" { - dryrunValue += "\\" + strings.ReplaceAll(prettyDstRelativePath, "/", "\\") - } else { // linux and mac - dryrunValue += "/" + prettyDstRelativePath - } - return dryrunValue - } else { - return fmt.Sprintf("DRYRUN: copy %v/%v to %v/%v", - s.copyJobTemplate.SourceRoot.Value, - prettySrcRelativePath, - s.copyJobTemplate.DestinationRoot.Value, - prettyDstRelativePath) - } + return fmt.Sprintf("DRYRUN: copy %v to %v", + common.GenerateFullPath(s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath), + common.GenerateFullPath(s.copyJobTemplate.DestinationRoot.Value, prettyDstRelativePath)) } } }) diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index 6f0eb87a4..4cfeb629e 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -86,8 +86,8 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { } // All sources and DFS-destinations we'll look further - - _, _, isDirStub, blobErr := t.getPropertiesIfSingleBlob() + // This call is fine, because there is no trailing / here-- If there's a trailing /, this is surely referring + _, _, isDirStub, _, blobErr := t.getPropertiesIfSingleBlob() // We know for sure this is a single blob still, let it walk on through to the traverser. if bloberror.HasCode(blobErr, bloberror.BlobUsesCustomerSpecifiedEncryption) { @@ -127,36 +127,45 @@ func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { return true, nil } -func (t *blobTraverser) getPropertiesIfSingleBlob() (response *blob.GetPropertiesResponse, isBlob bool, isDirStub bool, err error) { +func (t *blobTraverser) getPropertiesIfSingleBlob() (response *blob.GetPropertiesResponse, isBlob bool, isDirStub bool, blobName string, err error) { // trim away the trailing slash before we check whether it's a single blob // so that we can detect the directory stub in case there is one blobURLParts, err := blob.ParseURL(t.rawURL) if err != nil { - return nil, false, false, err + return nil, false, false, "", err } - blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) if blobURLParts.BlobName == "" { // This is a container, which needs to be given a proper listing. - return nil, false, false, nil + return nil, false, false, "", nil } + /* + If the user specified a trailing /, they may mean: + A) `folder/` with `hdi_isfolder`, this is intentional. + B) `folder` with `hdi_isfolder` + C) a virtual directory with children, but no stub + */ + +retry: blobClient, err := createBlobClientFromServiceClient(blobURLParts, t.serviceClient) if err != nil { - return nil, false, false, err + return nil, false, false, blobURLParts.BlobName, err } props, err := blobClient.GetProperties(t.ctx, &blob.GetPropertiesOptions{CPKInfo: t.cpkOptions.GetCPKInfo()}) - // if there was no problem getting the properties, it means that we are looking at a single blob - if err == nil { - if gCopyUtil.doesBlobRepresentAFolder(props.Metadata) { - return &props, false, true, nil - } - - return &props, true, false, err + if err != nil && strings.HasSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) { + // Trim & retry, maybe the directory stub is DFS style. + blobURLParts.BlobName = strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING) + goto retry + } else if err == nil { + // We found the target blob, great! Let's return the details. + isDir := gCopyUtil.doesBlobRepresentAFolder(props.Metadata) + return &props, !isDir, isDir, blobURLParts.BlobName, nil } - return nil, false, false, err + // We found nothing. + return nil, false, false, "", err } func (t *blobTraverser) getBlobTags() (common.BlobTags, error) { @@ -190,7 +199,7 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro } // check if the url points to a single blob - blobProperties, isBlob, isDirStub, err := t.getPropertiesIfSingleBlob() + blobProperties, isBlob, isDirStub, blobName, err := t.getPropertiesIfSingleBlob() var respErr *azcore.ResponseError if errors.As(err, &respErr) { @@ -223,11 +232,16 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro azcopyScanningLogger.Log(common.LogDebug, fmt.Sprintf("Root entity type: %s", getEntityType(blobProperties.Metadata))) } + relPath := "" + if strings.HasSuffix(blobName, "/") { + relPath = "\x00" // Because the ste will trim the / suffix from our source, or we may not already have it. + } + blobPropsAdapter := blobPropertiesResponseAdapter{blobProperties} storedObject := newStoredObject( preprocessor, - getObjectNameOnly(strings.TrimSuffix(blobURLParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)), - "", + getObjectNameOnly(blobName), + relPath, getEntityType(blobPropsAdapter.Metadata), blobPropsAdapter.LastModified(), blobPropsAdapter.ContentLength(), @@ -339,15 +353,27 @@ func (t *blobTraverser) parallelList(containerClient *container.Client, containe if t.includeDirectoryStubs { // try to get properties on the directory itself, since it's not listed in BlobItems - blobClient := containerClient.NewBlobClient(strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)) + dName := strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING) + blobClient := containerClient.NewBlobClient(dName) + altNameCheck: pResp, err := blobClient.GetProperties(t.ctx, nil) - pbPropAdapter := blobPropertiesResponseAdapter{&pResp} - folderRelativePath := strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING) - folderRelativePath = strings.TrimPrefix(folderRelativePath, searchPrefix) if err == nil { + if !t.doesBlobRepresentAFolder(pResp.Metadata) { // We've picked up on a file *named* the folder, not the folder itself. Does folder/ exist? + if !strings.HasSuffix(dName, "/") { + blobClient = containerClient.NewBlobClient(dName + common.AZCOPY_PATH_SEPARATOR_STRING) // Tack on the path separator, check. + dName += common.AZCOPY_PATH_SEPARATOR_STRING + goto altNameCheck // "foo" is a file, what about "foo/"? + } + + goto skipDirAdd // We shouldn't add a blob that isn't a folder as a folder. You either have the folder metadata, or you don't. + } + + pbPropAdapter := blobPropertiesResponseAdapter{&pResp} + folderRelativePath := strings.TrimPrefix(dName, searchPrefix) + storedObject := newStoredObject( preprocessor, - getObjectNameOnly(strings.TrimSuffix(*virtualDir.Name, common.AZCOPY_PATH_SEPARATOR_STRING)), + getObjectNameOnly(dName), folderRelativePath, common.EEntityType.Folder(), pbPropAdapter.LastModified(), @@ -371,7 +397,15 @@ func (t *blobTraverser) parallelList(containerClient *container.Client, containe } enqueueOutput(storedObject, err) + } else { + // There was nothing there, but is there folder/? + if !strings.HasSuffix(dName, "/") { + blobClient = containerClient.NewBlobClient(dName + common.AZCOPY_PATH_SEPARATOR_STRING) // Tack on the path separator, check. + dName += common.AZCOPY_PATH_SEPARATOR_STRING + goto altNameCheck // "foo" is a file, what about "foo/"? + } } + skipDirAdd: } } } @@ -385,6 +419,11 @@ func (t *blobTraverser) parallelList(containerClient *container.Client, containe storedObject := t.createStoredObjectForBlob(preprocessor, blobInfo, strings.TrimPrefix(*blobInfo.Name, searchPrefix), containerName) + // edge case, blob name happens to be the same as root and ends in / + if storedObject.relativePath == "" && strings.HasSuffix(storedObject.name, "/") { + storedObject.relativePath = "\x00" // Short circuit, letting the backend know we *really* meant root/. + } + if t.s2sPreserveSourceTags && blobInfo.BlobTags != nil { blobTagsMap := common.BlobTags{} for _, blobTag := range blobInfo.BlobTags.BlobTagSet { @@ -487,7 +526,7 @@ func (t *blobTraverser) createStoredObjectForBlob(preprocessor objectMorpher, bl func (t *blobTraverser) doesBlobRepresentAFolder(metadata map[string]*string) bool { util := copyHandlerUtil{} - return util.doesBlobRepresentAFolder(metadata) && !(t.includeDirectoryStubs && t.recursive) + return util.doesBlobRepresentAFolder(metadata) // We should ignore these, because we pick them up in other ways. } func (t *blobTraverser) serialList(containerClient *container.Client, containerName string, searchPrefix string, @@ -520,6 +559,11 @@ func (t *blobTraverser) serialList(containerClient *container.Client, containerN storedObject := t.createStoredObjectForBlob(preprocessor, blobInfo, relativePath, containerName) + // edge case, blob name happens to be the same as root and ends in / + if storedObject.relativePath == "" && strings.HasSuffix(storedObject.name, "/") { + storedObject.relativePath = "\x00" // Short circuit, letting the backend know we *really* meant root/. + } + // Setting blob tags if t.s2sPreserveSourceTags && blobInfo.BlobTags != nil { blobTagsMap := common.BlobTags{} diff --git a/cmd/zt_copy_blob_download_test.go b/cmd/zt_copy_blob_download_test.go index 6189e46c9..a6a0aadfe 100644 --- a/cmd/zt_copy_blob_download_test.go +++ b/cmd/zt_copy_blob_download_test.go @@ -857,14 +857,17 @@ func TestDryrunCopyBlobToBlobJson(t *testing.T) { a.Zero(len(mockedRPC.transfers)) msg := <-mockedLcm.dryrunLog - copyMessage := common.CopyTransfer{} + copyMessage := DryrunTransfer{} errMarshal := json.Unmarshal([]byte(msg), ©Message) a.Nil(errMarshal) + // comparing some values of copyMessage - a.Zero(strings.Compare(strings.Trim(copyMessage.Source, "/"), blobsToInclude[0])) - a.Zero(strings.Compare(strings.Trim(copyMessage.Destination, "/"), blobsToInclude[0])) - a.Zero(strings.Compare(copyMessage.EntityType.String(), common.EEntityType.File().String())) - a.Zero(strings.Compare(string(copyMessage.BlobType), "BlockBlob")) + srcRel := strings.TrimPrefix(copyMessage.Source, srcContainerClient.URL()) + dstRel := strings.TrimPrefix(copyMessage.Destination, dstContainerClient.URL()) + a.Equal(blobsToInclude[0], strings.Trim(srcRel, "/")) + a.Equal(blobsToInclude[0], strings.Trim(dstRel, "/")) + a.Equal(common.EEntityType.File(), copyMessage.EntityType) + a.Equal(common.EBlobType.BlockBlob(), copyMessage.BlobType) }) } diff --git a/cmd/zt_remove_blob_test.go b/cmd/zt_remove_blob_test.go index 555417357..34a6cd286 100644 --- a/cmd/zt_remove_blob_test.go +++ b/cmd/zt_remove_blob_test.go @@ -606,14 +606,15 @@ func TestDryrunRemoveBlobsUnderContainerJson(t *testing.T) { a.Zero(len(mockedRPC.transfers)) msg := <-mockedLcm.dryrunLog - deleteTransfer := common.CopyTransfer{} + deleteTransfer := DryrunTransfer{} errMarshal := json.Unmarshal([]byte(msg), &deleteTransfer) a.Nil(errMarshal) // comparing some values of deleteTransfer - a.Equal(deleteTransfer.Source, "/"+blobName[0]) - a.Equal(deleteTransfer.Destination, "/"+blobName[0]) + targetUri := cc.NewBlobClient(blobName[0]).URL() + a.Equal(targetUri, deleteTransfer.Source) + a.Equal("", deleteTransfer.Destination) a.Equal("File", deleteTransfer.EntityType.String()) - a.Equal("BlockBlob", string(deleteTransfer.BlobType)) + a.Equal("BlockBlob", deleteTransfer.BlobType.String()) }) } diff --git a/cmd/zt_sync_blob_blob_test.go b/cmd/zt_sync_blob_blob_test.go index 3877b716d..418bf2524 100644 --- a/cmd/zt_sync_blob_blob_test.go +++ b/cmd/zt_sync_blob_blob_test.go @@ -926,12 +926,12 @@ func TestDryrunSyncBlobtoBlobJson(t *testing.T) { validateS2SSyncTransfersAreScheduled(a, []string{}, mockedRPC) msg := <-mockedLcm.dryrunLog - syncMessage := common.CopyTransfer{} + syncMessage := DryrunTransfer{} errMarshal := json.Unmarshal([]byte(msg), &syncMessage) a.Nil(errMarshal) a.True(strings.Contains(syncMessage.Source, blobsToDelete[0])) - a.Equal("File", syncMessage.EntityType.String()) - a.Equal("BlockBlob", string(syncMessage.BlobType)) + a.Equal(common.EEntityType.File(), syncMessage.EntityType) + a.Equal(common.EBlobType.BlockBlob(), syncMessage.BlobType) }) } diff --git a/cmd/zt_sync_file_file_test.go b/cmd/zt_sync_file_file_test.go index 09b8d2e58..2b35c3b71 100644 --- a/cmd/zt_sync_file_file_test.go +++ b/cmd/zt_sync_file_file_test.go @@ -35,7 +35,7 @@ func TestSyncSourceComparator(t *testing.T) { // set up the indexer as well as the source comparator indexer := newObjectIndexer() - sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, false, false) + sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, false) // create a sample destination object sampleDestinationObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: destMD5} @@ -89,7 +89,7 @@ func TestSyncSrcCompDisableComparator(t *testing.T) { // set up the indexer as well as the source comparator indexer := newObjectIndexer() - sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, true, false) + sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, true) // test the comparator in case a given source object is not present at the destination // meaning no entry in the index, so the comparator should pass the given object to schedule a transfer @@ -139,7 +139,7 @@ func TestSyncDestinationComparator(t *testing.T) { // set up the indexer as well as the destination comparator indexer := newObjectIndexer() - destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, false, false) + destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, false) // create a sample source object sampleSourceObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: srcMD5} @@ -197,7 +197,7 @@ func TestSyncDestCompDisableComparison(t *testing.T) { // set up the indexer as well as the destination comparator indexer := newObjectIndexer() - destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, true, false) + destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, true) // create a sample source object currTime := time.Now() diff --git a/common/credCache_darwin.go b/common/credCache_darwin.go index 7e3cdb0a0..24648196e 100644 --- a/common/credCache_darwin.go +++ b/common/credCache_darwin.go @@ -23,9 +23,8 @@ package common import ( "errors" "fmt" + "github.com/keybase/go-keychain" "sync" - - "github.com/wastore/keychain" // forked and customized from github.com/keybase/go-keychain, todo: make a release to ensure stability ) // For SSH environment, user need unlock login keychain once, to enable AzCopy to Add/Update/Retrieve/Delete key. diff --git a/common/extensions.go b/common/extensions.go index 593909d2a..8fc0d20d6 100644 --- a/common/extensions.go +++ b/common/extensions.go @@ -144,6 +144,8 @@ func GenerateFullPath(rootPath, childPath string) string { // if the childPath is empty, it means the rootPath already points to the desired entity if childPath == "" { return rootPath + } else if childPath == "\x00" { // The enumerator has asked us to target with a / at the end of our root path. This is a massive hack. When the footgun happens later, ping Adele! + return rootPath + rootSeparator } // otherwise, make sure a path separator is inserted between the rootPath if necessary @@ -167,6 +169,7 @@ func GenerateFullPathWithQuery(rootPath, childPath, extraQuery string) string { // Block Names of blobs are of format noted below. // <5B empty placeholder> <16B GUID of AzCopy re-interpreted as string><5B PartNum><5B Index in the jobPart><5B blockNum> const AZCOPY_BLOCKNAME_LENGTH = 48 + func GenerateBlockBlobBlockID(blockNamePrefix string, index int32) string { blockID := []byte(fmt.Sprintf("%s%05d", blockNamePrefix, index)) return base64.StdEncoding.EncodeToString(blockID) diff --git a/common/fe-ste-models.go b/common/fe-ste-models.go index 9915a170c..6eba969a1 100644 --- a/common/fe-ste-models.go +++ b/common/fe-ste-models.go @@ -1575,6 +1575,14 @@ func (e EntityType) String() string { return enum.StringInt(e, reflect.TypeOf(e)) } +func (e *EntityType) Parse(s string) error { + val, err := enum.ParseInt(reflect.TypeOf(e), s, true, true) + if err == nil { + *e = val.(EntityType) + } + return err +} + //////////////////////////////////////////////////////////////// var EFolderPropertiesOption = FolderPropertyOption(0) diff --git a/common/folderCreationTracker_interface.go b/common/folderCreationTracker_interface.go index 556e73691..798d2aee3 100644 --- a/common/folderCreationTracker_interface.go +++ b/common/folderCreationTracker_interface.go @@ -7,7 +7,6 @@ package common type FolderCreationTracker interface { CreateFolder(folder string, doCreation func() error) error ShouldSetProperties(folder string, overwrite OverwriteOption, prompter Prompter) bool - StopTracking(folder string) } type Prompter interface { diff --git a/common/logger.go b/common/logger.go index 3cec9c7c6..7e6c69fa5 100644 --- a/common/logger.go +++ b/common/logger.go @@ -116,8 +116,7 @@ func (jl *jobLogger) CloseLog() { } jl.logger.Println("Closing Log") - err := jl.file.Close() - PanicIfErr(err) + _ = jl.file.Close() // If it was already closed, that's alright. We wanted to close it, anyway. } func (jl jobLogger) Log(loglevel LogLevel, msg string) { diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index ead150881..72afc2e06 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache" "net" "net/http" "net/url" @@ -41,7 +42,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/adal" + // importing the cache module registers the cache implementation for the current platform + _ "github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache" ) // ApplicationID represents 1st party ApplicationID for AzCopy. @@ -58,6 +60,8 @@ const ManagedDiskScope = "https://disk.azure.com//.default" // There must be a t const DefaultTenantID = "common" const DefaultActiveDirectoryEndpoint = "https://login.microsoftonline.com" +const TokenCache = "AzCopyTokenCache" + // UserOAuthTokenManager for token management. type UserOAuthTokenManager struct { oauthClient *http.Client @@ -133,7 +137,7 @@ func (uotm *UserOAuthTokenManager) GetTokenInfo(ctx context.Context) (*OAuthToke return tokenInfo, nil } -func (uotm *UserOAuthTokenManager) validateAndPersistLogin(oAuthTokenInfo *OAuthTokenInfo, persist bool) error { +func (uotm *UserOAuthTokenManager) validateAndPersistLogin(oAuthTokenInfo *OAuthTokenInfo) error { // Use default tenant ID and active directory endpoint, if nothing specified. if oAuthTokenInfo.Tenant == "" { oAuthTokenInfo.Tenant = DefaultTenantID @@ -152,7 +156,7 @@ func (uotm *UserOAuthTokenManager) validateAndPersistLogin(oAuthTokenInfo *OAuth } uotm.stashedInfo = oAuthTokenInfo - if persist && err == nil { + if oAuthTokenInfo.Persist { err = uotm.credCache.SaveToken(*oAuthTokenInfo) if err != nil { return err @@ -165,28 +169,30 @@ func (uotm *UserOAuthTokenManager) validateAndPersistLogin(oAuthTokenInfo *OAuth func (uotm *UserOAuthTokenManager) WorkloadIdentityLogin(persist bool) error { oAuthTokenInfo := &OAuthTokenInfo{ LoginType: EAutoLoginType.Workload(), + Persist: persist, } - return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } func (uotm *UserOAuthTokenManager) AzCliLogin(tenantID string) error { oAuthTokenInfo := &OAuthTokenInfo{ LoginType: EAutoLoginType.AzCLI(), Tenant: tenantID, + Persist: false, // AzCLI creds do not need to be persisted, AzCLI handles persistence. } - // CLI creds will not be persisted. AzCLI would have already persistd that - return uotm.validateAndPersistLogin(oAuthTokenInfo, false) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } func (uotm *UserOAuthTokenManager) PSContextToken(tenantID string) error { oAuthTokenInfo := &OAuthTokenInfo{ LoginType: EAutoLoginType.PsCred(), Tenant: tenantID, + Persist: false, // Powershell creds do not need to be persisted, Powershell handles persistence. } - return uotm.validateAndPersistLogin(oAuthTokenInfo, false) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } // MSILogin tries to get token from MSI, persist indicates whether to cache the token on local disk. @@ -198,9 +204,10 @@ func (uotm *UserOAuthTokenManager) MSILogin(identityInfo IdentityInfo, persist b oAuthTokenInfo := &OAuthTokenInfo{ LoginType: EAutoLoginType.MSI(), IdentityInfo: identityInfo, + Persist: persist, } - return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } // SecretLogin is a UOTM shell for secretLoginNoUOTM. @@ -214,9 +221,10 @@ func (uotm *UserOAuthTokenManager) SecretLogin(tenantID, activeDirectoryEndpoint Secret: secret, CertPath: "", }, + Persist: persist, } - return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } // CertLogin non-interactively logs in using a specified certificate, certificate password, and activedirectory endpoint. @@ -231,76 +239,25 @@ func (uotm *UserOAuthTokenManager) CertLogin(tenantID, activeDirectoryEndpoint, Secret: certPass, CertPath: absCertPath, }, + Persist: persist, } - return uotm.validateAndPersistLogin(oAuthTokenInfo, persist) + return uotm.validateAndPersistLogin(oAuthTokenInfo) } // UserLogin interactively logins in with specified tenantID and activeDirectoryEndpoint, persist indicates whether to // cache the token on local disk. func (uotm *UserOAuthTokenManager) UserLogin(tenantID, activeDirectoryEndpoint string, persist bool) error { - // Use default tenant ID and active directory endpoint, if nothing specified. - if tenantID == "" { - tenantID = DefaultTenantID - } - if activeDirectoryEndpoint == "" { - activeDirectoryEndpoint = DefaultActiveDirectoryEndpoint - } - - // Init OAuth config - oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - if err != nil { - return err - } - - // Acquire the device code - deviceCode, err := adal.InitiateDeviceAuth( - uotm.oauthClient, - *oauthConfig, - ApplicationID, - Resource) - if err != nil { - return fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", - tenantID, activeDirectoryEndpoint, err) - } - - // Display the authentication message - fmt.Println(*deviceCode.Message + "\n") - - if tenantID == "" || tenantID == "common" { - fmt.Println("INFO: Logging in under the \"Common\" tenant. This will log the account in under its home tenant.") - fmt.Println("INFO: If you plan to use AzCopy with a B2B account (where the account's home tenant is separate from the tenant of the target storage account), please sign in under the target tenant with --tenant-id") - } - - // Wait here until the user is authenticated - // TODO: check if adal Go SDK has new method which supports context, currently ctrl-C can stop the login in console interactively. - token, err := adal.WaitForUserCompletion(uotm.oauthClient, deviceCode) - if err != nil { - return fmt.Errorf("failed to login with tenantID %q, Azure directory endpoint %q, %v", - tenantID, activeDirectoryEndpoint, err) - } - - oAuthTokenInfo := OAuthTokenInfo{ + oAuthTokenInfo := &OAuthTokenInfo{ LoginType: EAutoLoginType.Device(), - Token: *token, Tenant: tenantID, ActiveDirectoryEndpoint: activeDirectoryEndpoint, ApplicationID: ApplicationID, + DeviceCodeInfo: &azidentity.AuthenticationRecord{}, + Persist: persist, } - uotm.stashedInfo = &oAuthTokenInfo - // to dump for diagnostic purposes: - // buf, _ := json.Marshal(oAuthTokenInfo) - // panic("don't check me in. Buf is " + string(buf)) - - if persist { - err = uotm.credCache.SaveToken(oAuthTokenInfo) - if err != nil { - return err - } - } - - return nil + return uotm.validateAndPersistLogin(oAuthTokenInfo) } // getCachedTokenInfo get a fresh token from local disk cache. @@ -414,13 +371,57 @@ func (uotm *UserOAuthTokenManager) getTokenInfoFromEnvVar(ctx context.Context) ( // OAuthTokenInfo contains info necessary for refresh OAuth credentials. type OAuthTokenInfo struct { azcore.TokenCredential `json:"-"` - adal.Token + Token Tenant string `json:"_tenant"` ActiveDirectoryEndpoint string `json:"_ad_endpoint"` LoginType AutoLoginType `json:"_token_refresh_source"` ApplicationID string `json:"_application_id"` IdentityInfo IdentityInfo SPNInfo SPNInfo + // Note: ClientID should be only used for internal integrations through env var with refresh token. + // It indicates the Application ID assigned to your app when you registered it with Azure AD. + // In this case AzCopy refresh token on behalf of caller. + // For more details, please refer to + // https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-protocols-oauth-code#refreshing-the-access-tokens + ClientID string `json:"_client_id"` + DeviceCodeInfo *azidentity.AuthenticationRecord `json:"_authentication_record,omitempty"` + Persist bool `json:"_persist"` +} + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return !t.Expires().After(time.Now().Add(0)) } // IdentityInfo contains info for MSI. @@ -458,29 +459,22 @@ func (identityInfo *IdentityInfo) Validate() error { } // Refresh gets new token with token info. -func (credInfo *OAuthTokenInfo) Refresh(ctx context.Context) (*adal.Token, error) { +func (credInfo *OAuthTokenInfo) Refresh(ctx context.Context) (*Token, error) { // TODO: I think this method is only necessary until datalake is migrated. // Returns cached TokenCredential or creates a new one if it hasn't been created yet. tc, err := credInfo.GetTokenCredential() if err != nil { return nil, err } - if credInfo.LoginType == EAutoLoginType.TokenStore() || credInfo.LoginType != EAutoLoginType.Device() { - scopes := []string{StorageScope} - t, err := tc.GetToken(ctx, policy.TokenRequestOptions{Scopes: scopes}) - if err != nil { - return nil, err - } - return &adal.Token{ - AccessToken: t.Token, - ExpiresOn: json.Number(strconv.FormatInt(int64(t.ExpiresOn.Sub(date.UnixEpoch())/time.Second), 10)), - }, nil - } else { - if dcc, ok := tc.(*DeviceCodeCredential); ok { - return dcc.RefreshTokenWithUserCredential(ctx, Resource) - } + scopes := []string{StorageScope} + t, err := tc.GetToken(ctx, policy.TokenRequestOptions{Scopes: scopes}) + if err != nil { + return nil, err } - return nil, errors.New("invalid token info") + return &Token{ + AccessToken: t.Token, + ExpiresOn: json.Number(strconv.FormatInt(int64(t.ExpiresOn.Sub(date.UnixEpoch())/time.Second), 10)), + }, nil } // Single instance token store credential cache shared by entire azcopy process. @@ -682,59 +676,51 @@ func (credInfo *OAuthTokenInfo) GetWorkloadIdentityCredential() (azcore.TokenCre return tc, nil } -type DeviceCodeCredential struct { - token adal.Token - aadEndpoint string - tenantID string - clientID string -} - -func (dcc *DeviceCodeCredential) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { - waitDuration := dcc.token.Expires().Sub(time.Now().UTC()) / 2 - if dcc.token.WillExpireIn(waitDuration) { - resource := strings.TrimSuffix(options.Scopes[0], "/.default") - _, err := dcc.RefreshTokenWithUserCredential(ctx, resource) - if err != nil { - return azcore.AccessToken{}, err - } - } - return azcore.AccessToken{Token: dcc.token.AccessToken, ExpiresOn: dcc.token.Expires()}, nil -} - -// RefreshTokenWithUserCredential gets new token with user credential through refresh. -func (dcc *DeviceCodeCredential) RefreshTokenWithUserCredential(ctx context.Context, resource string) (*adal.Token, error) { - targetResource := resource - if dcc.token.Resource != "" && dcc.token.Resource != targetResource { - targetResource = dcc.token.Resource - } - - oauthConfig, err := adal.NewOAuthConfig(dcc.aadEndpoint, dcc.tenantID) +func (credInfo *OAuthTokenInfo) GetDeviceCodeCredential() (azcore.TokenCredential, error) { + authorityHost, err := getAuthorityURL(credInfo.Tenant, credInfo.ActiveDirectoryEndpoint) if err != nil { return nil, err } - - // ClientID in credInfo is optional which is used for internal integration only. - // Use AzCopy's 1st party applicationID for refresh by default. - spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - Iff(dcc.clientID != "", dcc.clientID, ApplicationID), - targetResource, - dcc.token) + var persistentCache azidentity.Cache + if credInfo.Persist { + persistentCache, err = cache.New(&cache.Options{ + Name: TokenCache, + }) + if err != nil { + return nil, err + } + } + // Read the record + record := IffNotNil(credInfo.DeviceCodeInfo, azidentity.AuthenticationRecord{}) + tc, err := azidentity.NewDeviceCodeCredential(&azidentity.DeviceCodeCredentialOptions{ + TenantID: credInfo.Tenant, + ClientID: ApplicationID, + DisableAutomaticAuthentication: true, + Cache: persistentCache, + AuthenticationRecord: record, + ClientOptions: azcore.ClientOptions{ + Cloud: cloud.Configuration{ActiveDirectoryAuthorityHost: authorityHost.String()}, + Transport: newAzcopyHTTPClient(), + }, + }) if err != nil { return nil, err } - if err := spt.RefreshWithContext(ctx); err != nil { - return nil, err + if record == (azidentity.AuthenticationRecord{}) { + // No stored record; call Authenticate to acquire one + record, err = tc.Authenticate(context.TODO(), &policy.TokenRequestOptions{Scopes: []string{StorageScope}}) + if err != nil { + return nil, err + } + if credInfo.Tenant == DefaultTenantID { + fmt.Println("INFO: Logging in under the \"Common\" tenant. This will log the account in under its home tenant.") + fmt.Println("INFO: If you plan to use AzCopy with a B2B account (where the account's home tenant is separate from the tenant of the target storage account), please sign in under the target tenant with --tenant-id") + } + // Store the record + credInfo.DeviceCodeInfo = &record } - newToken := spt.Token() - dcc.token = newToken - return &newToken, nil -} - -func (credInfo *OAuthTokenInfo) GetDeviceCodeCredential() (azcore.TokenCredential, error) { - tc := &DeviceCodeCredential{token: credInfo.Token, aadEndpoint: credInfo.ActiveDirectoryEndpoint, tenantID: credInfo.Tenant, clientID: credInfo.ApplicationID} credInfo.TokenCredential = tc return tc, nil } diff --git a/common/trieForDirPath.go b/common/trieForDirPath.go new file mode 100644 index 000000000..4f0e2c415 --- /dev/null +++ b/common/trieForDirPath.go @@ -0,0 +1,54 @@ +package common + +import ( + "strings" +) + +type TrieNode struct { + children map[string]*TrieNode + TransferIndex uint32 + UnregisteredButCreated bool +} + +type Trie struct { + Root *TrieNode +} + +func NewTrie() *Trie { + return &Trie{ + Root: &TrieNode{children: make(map[string]*TrieNode)}, + } +} + +// InsertDirNode inserts the dirPath into the Trie and returns the corresponding node and if it had to be created +func (t *Trie) InsertDirNode(dirPath string) (*TrieNode, bool) { + node, _, created := t.getDirNodeHelper(dirPath, true) + return node, created +} + +// GetDirNode returns the directory node if it exists +func (t *Trie) GetDirNode(dirPath string) (*TrieNode, bool) { + node, exists, _ := t.getDirNodeHelper(dirPath, false) + return node, exists +} + +// getDirNodeHelper returns the node, if the node exists and if the node had to be created +func (t *Trie) getDirNodeHelper(dirPath string, createIfNotExists bool) (*TrieNode, bool, bool) { + node := t.Root + segments := strings.Split(dirPath, "/") + created := false + for _, segment := range segments { + child, exists := node.children[segment] + if !exists { + if createIfNotExists { + child = &TrieNode{children: make(map[string]*TrieNode)} + node.children[segment] = child + created = true + } else { + return nil, false, false + } + } + node = child + } + return node, true, created +} diff --git a/common/trieForDirPath_test.go b/common/trieForDirPath_test.go new file mode 100644 index 000000000..56a98b2db --- /dev/null +++ b/common/trieForDirPath_test.go @@ -0,0 +1,95 @@ +package common + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestTrie_NewTrie(t *testing.T) { + a := assert.New(t) + trie := NewTrie() + a.NotNil(trie) + a.NotNil(trie.Root) + a.Empty(trie.Root.children) +} + +func TestTrie_InsertDirNode(t *testing.T) { + a := assert.New(t) + // One Level + trie := NewTrie() + + n1, created := trie.InsertDirNode("mydir") + a.NotNil(n1) + a.True(created) + a.Len(trie.Root.children, 1) + a.Contains(trie.Root.children, "mydir") + a.Empty(trie.Root.children["mydir"].children) + + n2, created := trie.InsertDirNode("mydir") + a.NotNil(n2) + a.Equal(n1, n2) + a.False(created) + + // Multiple Levels + trie = NewTrie() + + n1, created = trie.InsertDirNode("mydir/mysubdir/lastlevel") + a.NotNil(n1) + a.True(created) + a.Len(trie.Root.children, 1) + a.Contains(trie.Root.children, "mydir") + a.Len(trie.Root.children["mydir"].children, 1) + a.Contains(trie.Root.children["mydir"].children, "mysubdir") + a.Len(trie.Root.children["mydir"].children["mysubdir"].children, 1) + a.Contains(trie.Root.children["mydir"].children["mysubdir"].children, "lastlevel") + a.Empty(trie.Root.children["mydir"].children["mysubdir"].children["lastlevel"].children) + a.Equal(trie.Root.children["mydir"].children["mysubdir"].children["lastlevel"], n1) + + // Insert in middle + n2, created = trie.InsertDirNode("mydir/mysubdir") + a.False(created) + a.Equal(trie.Root.children["mydir"].children["mysubdir"], n2) + + // Insert a different child + n3, created := trie.InsertDirNode("mydir/mysubdirsibling") + a.True(created) + + a.Len(trie.Root.children["mydir"].children, 2) + a.Contains(trie.Root.children["mydir"].children, "mysubdir") + a.Contains(trie.Root.children["mydir"].children, "mysubdirsibling") + a.Empty(trie.Root.children["mydir"].children["mysubdirsibling"].children) + a.Equal(trie.Root.children["mydir"].children["mysubdirsibling"], n3) + +} + +func TestTrie_GetDirNode(t *testing.T) { + a := assert.New(t) + // One Level + trie := NewTrie() + + n, ok := trie.GetDirNode("mydir/mysubdir/lastlevel") + a.Nil(n) + a.False(ok) + + n1, _ := trie.InsertDirNode("mydir") + n2, ok := trie.GetDirNode("mydir") + a.True(ok) + a.Equal(n1, n2) + + n1, _ = trie.InsertDirNode("mydir/mysubdir/lastlevel") + n2, _ = trie.InsertDirNode("mydir/mysubdirsibling") + n3, ok := trie.GetDirNode("mydir") + a.True(ok) + a.Equal(trie.Root.children["mydir"], n3) + + n4, ok := trie.GetDirNode("mydir/mysubdir/lastlevel/actuallyiwantthisone") + a.Nil(n4) + a.False(ok) + + _, ok = trie.GetDirNode("mydir/mysubdir") + a.True(ok) + _, ok = trie.GetDirNode("mydir/mysubdir/lastlevel") + a.True(ok) + _, ok = trie.GetDirNode("mydir/mysubdirsibling") + a.True(ok) +} diff --git a/common/version.go b/common/version.go index d6081f7a7..cd360150f 100644 --- a/common/version.go +++ b/common/version.go @@ -1,6 +1,6 @@ package common -const AzcopyVersion = "10.26.0" +const AzcopyVersion = "10.28.0-Preview" const UserAgent = "AzCopy/" + AzcopyVersion const S3ImportUserAgent = "S3Import " + UserAgent const GCPImportUserAgent = "GCPImport " + UserAgent diff --git a/common/zt_credCache_test.go b/common/zt_credCache_test.go index 55f6f037f..072018ec7 100644 --- a/common/zt_credCache_test.go +++ b/common/zt_credCache_test.go @@ -23,12 +23,10 @@ package common import ( "github.com/stretchr/testify/assert" "testing" - - "github.com/Azure/go-autorest/autorest/adal" ) var fakeTokenInfo = OAuthTokenInfo{ - Token: adal.Token{ + Token: Token{ AccessToken: "aaa0eXAiOiJKz1QiLCJhbGciOibbbbI1NiIsIng1dCcCImlCakwxUmNdddhpeTRmcHhJeGRacW5oTTJZayIsImtpZCI948lCakwxUmNxemhpeTRmcHhJeGRacW9oTTJZayJ9.eyJhdWQiOiJodHRwczovL3N0b3JhZ2UuYXp1cmUuY29tIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTUyODEwNDQ5NywibmJmIjoxNTI4MTA0NDk3LCJleHAiOjE1MjgxMDgzOTcsIl9jbGFpbV9uYW1lcyI6eyJncm91aEHiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy9hOTIzZjhkMC1kNGNlLTQyODAtOTEzNS01ZWE2ODVjMzgwMjYvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhIQUFBQU1nVkUzWE9DMHdQcG9OeGt1b2VsK1haVGNwOEhLekRORlp4NDZkMW5VN2VHUGNmbWdWNGxnUlN0NjUwcndXaHJPaCtaTXlGa3h2S3hVR3QvTHBjanNnPT0iLCJhbXIiOlsid2lhIiwibWZhIl0sImFwcGlkIjoiMTk1MGEyNTgtMjI3Yi00ZTMxLWE5Y2YtNzE3NDk1OTQ1ZmMyIiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIyMjFjZTY3Yy1mYjM3LTQzMjYtYWJjYy0zNTRhZGJmNzk1NWYiLCJmYW1pbHlfbmFtZSI6IkZhbiIsImdpdmVuX25hbWUiOiJKYXNvbiIsImluX2NvcnAiOiJ0cnVlIiwiaXBhZGRyIjoiMTY3LjIyMC4yNTUuNTgiLCJuYW1lIjoiSmFzb24gRmFuIiwib2lkIjoiYTkyM2Y4ZDAtZDRjZS00MjgwLTkxMzUtNWVhNjg1YzM4MDI2Iiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxNDY3NzMwODUtOTAzMzYzMjg1LTcxOTM0NDcwNy0xODI4ODgzIiwicHVpZCI6IjEwMDMwMDAwOEFCNjkzQTUi10JzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJBVVBFWXo1Y0xPd1BYcmRQaUF2OXZRamNGelpDN3dRRWd5dUJhejFfVnBFIiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJqaWFjZmFuQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJqaWFjZmFuQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiJfTlpKdlVQVG4wdTExTVFrTEcwTEFBIiwidmVyIjoiMS4wIn0.J3LZgQ7RTmqZzVcnsiruzLfcuK-vceNja7gp6wJhwwcPN1LzHK9Q1ANRVBKDMRulHiWvPNmavxf493EqkvgjHDkGSSTL3S7elLVF4Hr2SHHhUqyWoiEukY0jX5DT2tg71L4KujV7csJN-7ECqXyU0DSrRSRf3gCbD7c2ne5CFVCi1lEpEK_1lLiRZe45TTuJXmQrxEr4B6fY5MRkBz05lIbhxsUPmUunR02_-coNgQcHBOkdGdLGx4qjbzn58EJO0F2bimDRend3Tjnoia2aFq_kvQslcLU3BxIvYO5TZNfGkZyOlavoKEccPPmAb033zg9AKD_6_7K-R0mu1qmZUA", RefreshToken: "Y2QwMTFkYjQ3LyIsImlhdCI6MTUyODEwNDQ5NywibmJmIjoxNTI4MTA0NDk3LCJleHAiOjE1MjgxMDgzOTcsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy9hOTIzZjhkMC1kNGNlLTQyODAtOTEzNS01ZWE2ODVjMzgwMjYvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhIQUFBQU1nVkUzWE9DMHdQcG9OeGt1b2VsK1haVGNwOEhLekRORlp4NDZkMW5VN2VHUGNmbWdWNGxnUlN0NjUwcndXaHJPaCtaTXlGa3h2S3hVR3QvTHBjanNnPT0iLCJhbXIiOlsid2lhIiwibWZhIl0sImFwcGlkIjoiMTk1MGEyNTgtMjI3Yi00ZTMxLWE5Y2YtNzE3NDk1OTQ1ZmMyIiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIyMjFjZTY3Yy1mYjM3LTQzMjYtYWJjYy0zNTRhZGJmNzk1NWYiLCJmYW1pbHlfbmFtZSI6IkZhbiIsImdpdmVuX25hbWUiOiJKYXNvbiIsImluX2NvcnAiOiJ0cnVlIiwiaXBhZGRyIjoiMTY3LjIyMC4yNTUuNTgiLCJuYW1lIjoiSmFzb24gRmFuIiwib2lkIjoiYTkyM2Y4ZDAtZDRjZS00MjgwLTkxMzUtNWVhNjg1YzM4MDI2Iiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxNDY3NzMwODUtOTAzMzYzMjg1LTcxOTM0NDcwNy0xODI4ODgzIiwicHVpZCI6IjEwMDMwMDAwOEFCNjkzQTUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJBVVBFWXo1Y0xPd1BYcmRQaUF2OXZRamNGelpDN3dRRWd5dUJhejFfVnBFIiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJqaWF", ExpiresIn: "3599", @@ -101,4 +99,4 @@ func TestCredCacheSaveLoadDeleteHas(t *testing.T) { // Test has cached token, and validate remove token. hasCachedToken, err = credCache.HasCachedToken() a.False(hasCachedToken) -} \ No newline at end of file +} diff --git a/e2etest/newe2e_generic_wrangling.go b/e2etest/newe2e_generic_wrangling.go index 9f8713827..9aaa362ab 100644 --- a/e2etest/newe2e_generic_wrangling.go +++ b/e2etest/newe2e_generic_wrangling.go @@ -88,6 +88,22 @@ func ListOfAny[T any](in []T) []any { return out } +func Keys[K comparable, V any](in map[K]V) []K { + out := make([]K, 0, len(in)) + for k, _ := range in { + out = append(out, k) + } + return out +} + +func AnyKeys[K comparable, V any](in map[K]V) []any { + out := make([]any, 0, len(in)) + for k, _ := range in { + out = append(out, k) + } + return out +} + func CloneMap[K comparable, V any](in map[K]V) map[K]V { out := make(map[K]V) @@ -98,6 +114,23 @@ func CloneMap[K comparable, V any](in map[K]V) map[K]V { return out } +func CloneMapWithRule[K comparable, V any](in map[K]V, rule func(K, V) (key K, value V, include bool)) map[K]V { + out := make(map[K]V) + + for k, v := range in { + var include bool + k, v, include = rule(k, v) + + if !include { + continue + } + + out[k] = v + } + + return out +} + func ListContains[I comparable](item I, in []I) bool { for _, v := range in { if item == v { @@ -108,6 +141,16 @@ func ListContains[I comparable](item I, in []I) bool { return false } +func Any[I any](items []I, f func(I) bool) bool { + for _, v := range items { + if f(v) { + return true + } + } + + return false +} + func ClonePointer[T any](in *T) *T { if in == nil { return nil diff --git a/e2etest/newe2e_object_content.go b/e2etest/newe2e_object_content.go index 2ba7f9a7e..dd7a814d1 100644 --- a/e2etest/newe2e_object_content.go +++ b/e2etest/newe2e_object_content.go @@ -7,6 +7,8 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "io" "math/rand" + "strconv" + "time" ) type ObjectContentContainer interface { @@ -25,10 +27,47 @@ func SizeFromString(objectSize string) int64 { return longSize } -func NewRandomObjectContentContainer(a Asserter, size int64) ObjectContentContainer { +var megaSize = []string{ + "B", + "KB", + "MB", + "GB", + "TB", + "PB", + "EB", +} + +func SizeToString(size int64, megaUnits bool) string { + units := []string{ + "B", + "KiB", + "MiB", + "GiB", + "TiB", + "PiB", + "EiB", // Let's face it, a file, account, or container probably won't be more than 1000 exabytes in YEARS. + // (and int64 literally isn't large enough to handle too many exbibytes. 128 bit processors when) + } + unit := 0 + floatSize := float64(size) + gigSize := 1024 + + if megaUnits { + gigSize = 1000 + units = megaSize + } + + for floatSize/float64(gigSize) >= 1 { + unit++ + floatSize /= float64(gigSize) + } + + return strconv.FormatFloat(floatSize, 'f', 2, 64) + " " + units[unit] +} + +func NewRandomObjectContentContainer(size int64) ObjectContentContainer { buf := make([]byte, size) - _, err := rand.Read(buf) - a.NoError("Generate random data", err) + _, _ = rand.New(rand.NewSource(time.Now().Unix())).Read(buf) return &ObjectContentContainerBuffer{buf} } diff --git a/e2etest/newe2e_object_resource_mapping.go b/e2etest/newe2e_object_resource_mapping.go index 400de245b..c1b2b0c1e 100644 --- a/e2etest/newe2e_object_resource_mapping.go +++ b/e2etest/newe2e_object_resource_mapping.go @@ -1,6 +1,8 @@ package e2etest -import "path" +import ( + "path" +) // this should maybe be in newe2e_resource_definitions but it felt relevant to have on it's own diff --git a/e2etest/newe2e_resource_manager_getter.go b/e2etest/newe2e_resource_manager_getter.go index 0d4fc399c..d23d35be3 100644 --- a/e2etest/newe2e_resource_manager_getter.go +++ b/e2etest/newe2e_resource_manager_getter.go @@ -15,6 +15,7 @@ type GetResourceOptions struct { // on *Local*, this inherently creates a container. But that's fine, because it's likely to be used. func GetRootResource(a Asserter, location common.Location, varOpts ...GetResourceOptions) ResourceManager { opts := FirstOrZero(varOpts) + defaultacct := PrimaryStandardAcct switch location { case common.ELocation.Local(): @@ -23,9 +24,15 @@ func GetRootResource(a Asserter, location common.Location, varOpts ...GetResourc } return NewLocalContainer(a) - case common.ELocation.Blob(), common.ELocation.BlobFS(), common.ELocation.File(): - // acct handles the dryrun case for us - acct := GetAccount(a, DerefOrDefault(opts.PreferredAccount, PrimaryStandardAcct)) + case common.ELocation.BlobFS(): + // do we have a hns acct attached, if so, and we're requesting blobfs, let's use it + if _, ok := AccountRegistry[PrimaryHNSAcct]; ok { + defaultacct = PrimaryHNSAcct + } + + fallthrough // Continue to grab the account + case common.ELocation.Blob(), common.ELocation.File(): + acct := GetAccount(a, DerefOrDefault(opts.PreferredAccount, defaultacct)) return acct.GetService(a, location) default: a.Error(fmt.Sprintf("TODO: Location %s is not yet supported", location)) diff --git a/e2etest/newe2e_resource_manager_interface.go b/e2etest/newe2e_resource_manager_interface.go index 7b54159b7..bca9b72c8 100644 --- a/e2etest/newe2e_resource_manager_interface.go +++ b/e2etest/newe2e_resource_manager_interface.go @@ -14,6 +14,7 @@ import ( type GetURIOptions struct { RemoteOpts RemoteURIOpts + LocalOpts LocalURIOpts AzureOpts AzureURIOpts // The wildcard string to append to the end of a resource URI. Wildcard string @@ -31,6 +32,10 @@ type AzureURIOpts struct { SASValues GenericSignatureValues } +type LocalURIOpts struct { + PreferUNCPath bool +} + type ResourceManager interface { Location() common.Location Level() cmd.LocationLevel diff --git a/e2etest/newe2e_resource_managers_blob.go b/e2etest/newe2e_resource_managers_blob.go index ff55768c0..dc7cc3cef 100644 --- a/e2etest/newe2e_resource_managers_blob.go +++ b/e2etest/newe2e_resource_managers_blob.go @@ -181,6 +181,10 @@ type BlobContainerResourceManager struct { internalClient *container.Client } +func (b *BlobContainerResourceManager) GetDatalakeContainerManager(a Asserter) ContainerResourceManager { + return b.internalAccount.GetService(a, common.ELocation.BlobFS()).GetContainer(b.containerName) +} + func (b *BlobContainerResourceManager) ValidAuthTypes() ExplicitCredentialTypes { return (&BlobServiceResourceManager{}).ValidAuthTypes() } @@ -642,6 +646,7 @@ type BlobObjectGetPropertiesOptions struct { func (b *BlobObjectResourceManager) GetPropertiesWithOptions(a Asserter, options *BlobObjectGetPropertiesOptions) ObjectProperties { a.HelperMarker().Helper() + resp, err := b.internalClient.GetProperties(ctx, &blob.GetPropertiesOptions{ CPKInfo: nil, }) @@ -677,9 +682,10 @@ func (b *BlobObjectResourceManager) GetPropertiesWithOptions(a Asserter, options Type: resp.BlobType, Tags: func() map[string]string { out := make(map[string]string) - if b.internalAccount.AccountType() == EAccountType.PremiumPageBlobs() { + if b.internalAccount.AccountType() == EAccountType.PremiumPageBlobs() || b.internalAccount.AccountType() == EAccountType.HierarchicalNamespaceEnabled() { return out } + resp, err := b.internalClient.GetTags(ctx, nil) a.NoError("Get tags", err) for _, tag := range resp.BlobTagSet { diff --git a/e2etest/newe2e_resource_managers_local.go b/e2etest/newe2e_resource_managers_local.go index 809350391..79b21ad2c 100644 --- a/e2etest/newe2e_resource_managers_local.go +++ b/e2etest/newe2e_resource_managers_local.go @@ -55,9 +55,15 @@ func (l *LocalContainerResourceManager) Level() cmd.LocationLevel { return cmd.ELocationLevel.Container() } -func (l *LocalContainerResourceManager) URI(opts ...GetURIOptions) string { +func (l *LocalContainerResourceManager) URI(o ...GetURIOptions) string { base := l.RootPath - base = addWildCard(base, opts...) + base = addWildCard(base, o...) + + opts := FirstOrZero(o) + if opts.LocalOpts.PreferUNCPath { + base = common.ToExtendedPath(base) + } + return base } @@ -222,9 +228,15 @@ func (l *LocalObjectResourceManager) Level() cmd.LocationLevel { return cmd.ELocationLevel.Object() } -func (l *LocalObjectResourceManager) URI(opts ...GetURIOptions) string { +func (l *LocalObjectResourceManager) URI(o ...GetURIOptions) string { base := filepath.Join(l.container.RootPath, l.objectPath) - base = addWildCard(base, opts...) + base = addWildCard(base, o...) + + opts := FirstOrZero(o) + if opts.LocalOpts.PreferUNCPath { + base = common.ToExtendedPath(base) + } + return base } diff --git a/e2etest/newe2e_runazcopy_stdout.go b/e2etest/newe2e_runazcopy_stdout.go index 9fa2e0aac..5c1585141 100644 --- a/e2etest/newe2e_runazcopy_stdout.go +++ b/e2etest/newe2e_runazcopy_stdout.go @@ -154,7 +154,7 @@ type AzCopyParsedCopySyncRemoveStdout struct { JobPlanFolder string LogFolder string - + InitMsg common.InitMsgJsonTemplate FinalStatus common.ListJobSummaryResponse } @@ -175,28 +175,46 @@ func (a *AzCopyParsedCopySyncRemoveStdout) Write(p []byte) (n int, err error) { } type AzCopyParsedDryrunStdout struct { - AzCopyParsedStdout - listenChan chan<- common.JsonOutputTemplate + AzCopyRawStdout + + fromTo common.FromTo // fallback for text output + + listenChan chan<- cmd.DryrunTransfer - ScheduledTransfers map[string]common.CopyTransfer + Transfers []cmd.DryrunTransfer + Raw map[string]bool + JsonMode bool } -func (a *AzCopyParsedDryrunStdout) Write(p []byte) (n int, err error) { - if a.listenChan == nil { - a.listenChan = a.OnParsedLine.SubscribeFunc(func(line common.JsonOutputTemplate) { - if line.MessageType == common.EOutputMessageType.Dryrun().String() { - var tx common.CopyTransfer - err = json.Unmarshal([]byte(line.MessageContent), &tx) - if err != nil { - return - } +func (d *AzCopyParsedDryrunStdout) Write(p []byte) (n int, err error) { + lines := strings.Split(string(p), "\n") + for _, str := range lines { + if !d.JsonMode && strings.HasPrefix(str, "DRYRUN: ") { + if strings.HasPrefix(str, "DRYRUN: warn") { + continue + } - a.ScheduledTransfers[tx.Source] = tx + d.Raw[str] = true + } else { + var out common.JsonOutputTemplate + err = json.Unmarshal([]byte(str), &out) + if err != nil { + continue } - }) + + if out.MessageType != common.EOutputMessageType.Dryrun().String() { + continue + } + + var tx cmd.DryrunTransfer + err = json.Unmarshal([]byte(out.MessageContent), &tx) + if err != nil { + continue + } + } } - return a.AzCopyParsedStdout.Write(p) + return d.AzCopyRawStdout.Write(p) } type AzCopyParsedJobsListStdout struct { diff --git a/e2etest/newe2e_scenario_variation_manager.go b/e2etest/newe2e_scenario_variation_manager.go index 6d116459a..926d28014 100644 --- a/e2etest/newe2e_scenario_variation_manager.go +++ b/e2etest/newe2e_scenario_variation_manager.go @@ -197,9 +197,10 @@ func (svm *ScenarioVariationManager) HelperMarker() HelperMarker { // =========== Variation Handling ========== var variationExcludedCallers = map[string]bool{ - "GetVariation": true, - "ResolveVariation": true, - "GetVariationCallerID": true, + "GetVariation": true, + "ResolveVariation": true, + "GetVariationCallerID": true, + "NamedResolveVariation": true, } func (svm *ScenarioVariationManager) VariationName() string { @@ -346,3 +347,10 @@ func ResolveVariation[T any](svm *ScenarioVariationManager, options []T) T { func ResolveVariationByID[T any](svm *ScenarioVariationManager, ID string, options []any) T { return GetTypeOrZero[T](svm.GetVariation(ID, ListOfAny(options))) } + +// NamedResolveVariation is similar to ResolveVariation, but instead resolves over the keys in options, and hands back T. +func NamedResolveVariation[T any](svm *ScenarioVariationManager, options map[string]T) T { + variation := GetTypeOrZero[string](svm.GetVariation(svm.GetVariationCallerID(), AnyKeys(options))) + + return options[variation] +} diff --git a/e2etest/newe2e_task_azcopy_job_validate.go b/e2etest/newe2e_task_azcopy_job_validate.go index 9c40decf6..55ef05fc8 100644 --- a/e2etest/newe2e_task_azcopy_job_validate.go +++ b/e2etest/newe2e_task_azcopy_job_validate.go @@ -386,7 +386,9 @@ func ValidatePlanFiles(sm *ScenarioVariationManager, stdOut AzCopyStdout, expect mmf.Unmap() } - for path, _ := range expected.Objects { - sm.Assert("object src: "+path.SrcPath+", dst: "+path.DstPath+"; was missing from the plan file.", Always{}) + for path, obj := range expected.Objects { + if DerefOrDefault(obj.ShouldBePresent, true) { + sm.Assert("object src: "+path.SrcPath+", dst: "+path.DstPath+"; was missing from the plan file.", Always{}) + } } } diff --git a/e2etest/newe2e_task_runazcopy.go b/e2etest/newe2e_task_runazcopy.go index ce6ad9fb2..ab62456cc 100644 --- a/e2etest/newe2e_task_runazcopy.go +++ b/e2etest/newe2e_task_runazcopy.go @@ -56,6 +56,8 @@ const ( // initially supporting a limited set of verbs AzCopyVerbSync AzCopyVerb = "sync" AzCopyVerbRemove AzCopyVerb = "remove" AzCopyVerbList AzCopyVerb = "list" + AzCopyVerbLogin AzCopyVerb = "login" + AzCopyVerbLogout AzCopyVerb = "logout" AzCopyVerbJobsList AzCopyVerb = "jobs" ) @@ -123,6 +125,7 @@ type AzCopyEnvironment struct { AzureClientId *string `env:"AZURE_CLIENT_ID"` InheritEnvironment bool + ManualLogin bool } func (env *AzCopyEnvironment) generateAzcopyDir(a ScenarioAsserter) { @@ -185,53 +188,55 @@ func (c *AzCopyCommand) applyTargetAuth(a Asserter, target ResourceManager) stri // Only set it if it wasn't already configured. If it was manually configured, // special testing may be occurring, and this may be indicated to just get a SAS-less URI. // Alternatively, we may have already configured it here once before. - if c.Environment.AutoLoginMode == nil && c.Environment.ServicePrincipalAppID == nil && c.Environment.ServicePrincipalClientSecret == nil && c.Environment.AutoLoginTenantID == nil { - if GlobalConfig.StaticResources() { - c.Environment.AutoLoginMode = pointerTo("SPN") - oAuthInfo := GlobalConfig.E2EAuthConfig.StaticStgAcctInfo.StaticOAuth - a.AssertNow("At least NEW_E2E_STATIC_APPLICATION_ID and NEW_E2E_STATIC_CLIENT_SECRET must be specified to use OAuth.", Empty{true}, oAuthInfo.ApplicationID, oAuthInfo.ClientSecret) - - c.Environment.ServicePrincipalAppID = &oAuthInfo.ApplicationID - c.Environment.ServicePrincipalClientSecret = &oAuthInfo.ClientSecret - c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.TenantID != "", &oAuthInfo.TenantID, nil) - } else { - // oauth should reliably work + if !c.Environment.ManualLogin { + + if c.Environment.AutoLoginMode == nil && c.Environment.ServicePrincipalAppID == nil && c.Environment.ServicePrincipalClientSecret == nil && c.Environment.AutoLoginTenantID == nil { + if GlobalConfig.StaticResources() { + c.Environment.AutoLoginMode = pointerTo("SPN") + oAuthInfo := GlobalConfig.E2EAuthConfig.StaticStgAcctInfo.StaticOAuth + a.AssertNow("At least NEW_E2E_STATIC_APPLICATION_ID and NEW_E2E_STATIC_CLIENT_SECRET must be specified to use OAuth.", Empty{true}, oAuthInfo.ApplicationID, oAuthInfo.ClientSecret) + + c.Environment.ServicePrincipalAppID = &oAuthInfo.ApplicationID + c.Environment.ServicePrincipalClientSecret = &oAuthInfo.ClientSecret + c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.TenantID != "", &oAuthInfo.TenantID, nil) + } else { + // oauth should reliably work + oAuthInfo := GlobalConfig.E2EAuthConfig.SubscriptionLoginInfo + if oAuthInfo.Environment == AzurePipeline { + c.Environment.InheritEnvironment = true + c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.DynamicOAuth.Workload.TenantId != "", &oAuthInfo.DynamicOAuth.Workload.TenantId, nil) + c.Environment.AutoLoginMode = pointerTo(common.EAutoLoginType.AzCLI().String()) + } else { + c.Environment.AutoLoginMode = pointerTo(common.EAutoLoginType.SPN().String()) + c.Environment.ServicePrincipalAppID = &oAuthInfo.DynamicOAuth.SPNSecret.ApplicationID + c.Environment.ServicePrincipalClientSecret = &oAuthInfo.DynamicOAuth.SPNSecret.ClientSecret + c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.DynamicOAuth.SPNSecret.TenantID != "", &oAuthInfo.DynamicOAuth.SPNSecret.TenantID, nil) + } + } + } else if c.Environment.AutoLoginMode != nil { oAuthInfo := GlobalConfig.E2EAuthConfig.SubscriptionLoginInfo - if oAuthInfo.Environment == AzurePipeline { + if strings.ToLower(*c.Environment.AutoLoginMode) == common.EAutoLoginType.Workload().String() { c.Environment.InheritEnvironment = true - c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.DynamicOAuth.Workload.TenantId != "", &oAuthInfo.DynamicOAuth.Workload.TenantId, nil) - c.Environment.AutoLoginMode = pointerTo(common.EAutoLoginType.AzCLI().String()) - } else { - c.Environment.AutoLoginMode = pointerTo(common.EAutoLoginType.SPN().String()) - c.Environment.ServicePrincipalAppID = &oAuthInfo.DynamicOAuth.SPNSecret.ApplicationID - c.Environment.ServicePrincipalClientSecret = &oAuthInfo.DynamicOAuth.SPNSecret.ClientSecret - c.Environment.AutoLoginTenantID = common.Iff(oAuthInfo.DynamicOAuth.SPNSecret.TenantID != "", &oAuthInfo.DynamicOAuth.SPNSecret.TenantID, nil) + // Get the value of the AZURE_FEDERATED_TOKEN environment variable + token := oAuthInfo.DynamicOAuth.Workload.FederatedToken + a.AssertNow("idToken must be specified to authenticate with workload identity", Empty{Invert: true}, token) + // Write the token to a temporary file + // Create a temporary file to store the token + file, err := os.CreateTemp("", "azure_federated_token.txt") + a.AssertNow("Error creating temporary file", IsNil{}, err) + defer file.Close() + + // Write the token to the temporary file + _, err = file.WriteString(token) + a.AssertNow("Error writing to temporary file", IsNil{}, err) + + // Set the AZURE_FEDERATED_TOKEN_FILE environment variable + c.Environment.AzureFederatedTokenFile = pointerTo(file.Name()) + c.Environment.AzureTenantId = pointerTo(oAuthInfo.DynamicOAuth.Workload.TenantId) + c.Environment.AzureClientId = pointerTo(oAuthInfo.DynamicOAuth.Workload.ClientId) } } - } else if c.Environment.AutoLoginMode != nil { - oAuthInfo := GlobalConfig.E2EAuthConfig.SubscriptionLoginInfo - if strings.ToLower(*c.Environment.AutoLoginMode) == common.EAutoLoginType.Workload().String() { - c.Environment.InheritEnvironment = true - // Get the value of the AZURE_FEDERATED_TOKEN environment variable - token := oAuthInfo.DynamicOAuth.Workload.FederatedToken - a.AssertNow("idToken must be specified to authenticate with workload identity", Empty{Invert: true}, token) - // Write the token to a temporary file - // Create a temporary file to store the token - file, err := os.CreateTemp("", "azure_federated_token.txt") - a.AssertNow("Error creating temporary file", IsNil{}, err) - defer file.Close() - - // Write the token to the temporary file - _, err = file.WriteString(token) - a.AssertNow("Error writing to temporary file", IsNil{}, err) - - // Set the AZURE_FEDERATED_TOKEN_FILE environment variable - c.Environment.AzureFederatedTokenFile = pointerTo(file.Name()) - c.Environment.AzureTenantId = pointerTo(oAuthInfo.DynamicOAuth.Workload.TenantId) - c.Environment.AzureClientId = pointerTo(oAuthInfo.DynamicOAuth.Workload.ClientId) - } } - return target.URI(opts) // Generate like public default: a.Error("unsupported credential type") @@ -255,7 +260,7 @@ func RunAzCopy(a ScenarioAsserter, commandSpec AzCopyCommand) (AzCopyStdout, *Az } out := []string{GlobalConfig.AzCopyExecutableConfig.ExecutablePath, string(commandSpec.Verb)} - + for _, v := range commandSpec.PositionalArgs { out = append(out, v) } @@ -292,10 +297,19 @@ func RunAzCopy(a ScenarioAsserter, commandSpec AzCopyCommand) (AzCopyStdout, *Az var out = commandSpec.Stdout if out == nil { switch { + case strings.EqualFold(flagMap["dry-run"], "true") && (strings.EqualFold(flagMap["output-type"], "json") || strings.EqualFold(flagMap["output-type"], "text") || flagMap["output-type"] == ""): // Dryrun has its own special sort of output, that supports non-json output. + jsonMode := strings.EqualFold(flagMap["output-type"], "json") + var fromTo common.FromTo + if !jsonMode && len(commandSpec.Targets) >= 2 { + fromTo = common.FromTo(commandSpec.Targets[0].Location())<<8 | common.FromTo(commandSpec.Targets[1].Location()) + } + out = &AzCopyParsedDryrunStdout{ + JsonMode: jsonMode, + fromTo: fromTo, + Raw: make(map[string]bool), + } case !strings.EqualFold(flagMap["output-type"], "json"): // Won't parse non-computer-readable outputs out = &AzCopyRawStdout{} - case strings.EqualFold(flagMap["dryrun"], "true"): // Dryrun has its own special sort of output - out = &AzCopyParsedDryrunStdout{} case commandSpec.Verb == AzCopyVerbCopy || commandSpec.Verb == AzCopyVerbSync || commandSpec.Verb == AzCopyVerbRemove: out = &AzCopyParsedCopySyncRemoveStdout{ @@ -337,8 +351,10 @@ func RunAzCopy(a ScenarioAsserter, commandSpec AzCopyCommand) (AzCopyStdout, *Az 0, command.ProcessState.ExitCode()) a.Cleanup(func(a ScenarioAsserter) { - UploadLogs(a, out, stderr, DerefOrZero(commandSpec.Environment.LogLocation)) - _ = os.RemoveAll(DerefOrZero(commandSpec.Environment.LogLocation)) + if !commandSpec.Environment.ManualLogin { + UploadLogs(a, out, stderr, DerefOrZero(commandSpec.Environment.LogLocation)) + _ = os.RemoveAll(DerefOrZero(commandSpec.Environment.LogLocation)) + } }) return out, &AzCopyJobPlan{} diff --git a/e2etest/newe2e_task_runazcopy_login_logout.go b/e2etest/newe2e_task_runazcopy_login_logout.go new file mode 100644 index 000000000..b66f59db6 --- /dev/null +++ b/e2etest/newe2e_task_runazcopy_login_logout.go @@ -0,0 +1,76 @@ +package e2etest + +import ( + "fmt" + "os/exec" + "strings" +) + +var _ AzCopyStdout = &AzCopyInteractiveStdout{} + +// AzCopyInteractiveStdout is still a semi-raw stdout struct. +type AzCopyInteractiveStdout struct { + Messages []string + asserter Asserter +} + +// NewInteractiveWriter creates a new InteractiveWriter instance. +func NewAzCopyInteractiveStdout(a Asserter) *AzCopyInteractiveStdout { + return &AzCopyInteractiveStdout{ + asserter: a, + } +} + +func (a *AzCopyInteractiveStdout) RawStdout() []string { + return a.Messages +} + +func (a *AzCopyInteractiveStdout) Write(p []byte) (n int, err error) { + str := string(p) + lines := strings.Split(strings.TrimSuffix(str, "\n"), "\n") + n = len(p) + + for _, v := range lines { + a.Messages = append(a.Messages, v) + a.asserter.Log(v) + } + + return +} + +func (a *AzCopyInteractiveStdout) String() string { + return strings.Join(a.RawStdout(), "\n") +} + +func RunAzCopyLoginLogout(a Asserter, verb AzCopyVerb) AzCopyStdout { + out := NewAzCopyInteractiveStdout(a) + + args := []string{GlobalConfig.AzCopyExecutableConfig.ExecutablePath, string(verb)} + + tenantId := GlobalConfig.E2EAuthConfig.StaticStgAcctInfo.StaticOAuth.TenantID + if verb == AzCopyVerbLogin && tenantId != "" { + args = append(args, fmt.Sprintf("--tenant-id=%s", tenantId)) + } + + command := exec.Cmd{ + Path: GlobalConfig.AzCopyExecutableConfig.ExecutablePath, + Args: args, + Stdout: out, + } + + in, err := command.StdinPipe() + a.NoError("get stdin pipe", err) + + err = command.Start() + a.Assert("run command", IsNil{}, err) + + if isLaunchedByDebugger { + beginAzCopyDebugging(in) + } + + err = command.Wait() + a.Assert("wait for finalize", IsNil{}, err) + a.Assert("expected exit code", Equal{}, 0, command.ProcessState.ExitCode()) + + return out +} diff --git a/e2etest/newe2e_task_runazcopy_parameters.go b/e2etest/newe2e_task_runazcopy_parameters.go index 57239f721..259531eae 100644 --- a/e2etest/newe2e_task_runazcopy_parameters.go +++ b/e2etest/newe2e_task_runazcopy_parameters.go @@ -174,6 +174,8 @@ func parseFlagTag(tag string) flagTag { return out } +type RawFlags map[string]string + // The below structs are intended to be mixed and matched as much as possible, // such that a variety of verbs can be used with a single struct (e.g. copy and sync) // in a test, without rewriting all the flags for every use case. @@ -268,6 +270,7 @@ type CopySyncCommonFlags struct { CPKByName *string `flag:"cpk-by-name"` CPKByValue *bool `flag:"cpk-by-value"` IncludePattern *string `flag:"include-pattern"` + IncludeDirectoryStubs *bool `flag:"include-directory-stub"` } // CopyFlags is a more exclusive struct including flags exclusi @@ -305,7 +308,6 @@ type CopyFlags struct { S2SDetectSourceChanged *bool `flag:"s2s-detect-source-changed"` ListOfVersions []string `flag:"list-of-versions,serializer:SerializeListingFile"` BlobTags common.Metadata `flag:"blob-tags,serializer:SerializeTags"` - IncludeDirectoryStubs *bool `flag:"include-directory-stubs"` DisableAutoDecoding *bool `flag:"disable-auto-decoding"` S2SGetPropertiesInBackend *bool `flag:"s2s-get-properties-in-backend"` ADLSFlushThreshold *uint32 `flag:"flush-threshold"` @@ -379,6 +381,8 @@ type SyncFlags struct { CompareHash *common.SyncHashType `flag:"compare-hash"` LocalHashDir *string `flag:"hash-meta-dir"` LocalHashStorageMode *common.HashStorageMode `flag:"local-hash-storage-mode"` + // The real flag name is not all that great due to `delete-destination`, but, it works. + DeleteIfNecessary *bool `flag:"delete-destination-file"` } // RemoveFlags is not tiered like CopySyncCommonFlags is, because it is dissimilar in functionality, and would be hard to test in the same scenario. diff --git a/e2etest/newe2e_task_validation.go b/e2etest/newe2e_task_validation.go index 4d0c6fed3..109b2245f 100644 --- a/e2etest/newe2e_task_validation.go +++ b/e2etest/newe2e_task_validation.go @@ -39,7 +39,18 @@ func ValidateMetadata(a Asserter, expected, real common.Metadata) { return } - a.Assert("Metadata must match", Equal{Deep: true}, expected, real) + rule := func(key string, value *string) (ok string, ov *string, include bool) { + ov = value + ok = strings.ToLower(key) + include = Any(common.AllLinuxProperties, func(s string) bool { + return strings.EqualFold(key, s) + }) + + return + } + + //a.Assert("Metadata must match", Equal{Deep: true}, expected, real) + expected = CloneMapWithRule(expected, rule) } func ValidateTags(a Asserter, expected, real map[string]string) { @@ -163,17 +174,17 @@ func ValidateListOutput(a Asserter, stdout AzCopyStdout, expectedObjects map[AzC a.Assert("summary must match", Equal{}, listStdout.Summary, DerefOrZero(expectedSummary)) } -func ValidateErrorOutput(a Asserter, stdout AzCopyStdout, errorMsg string) { +func ValidateMessageOutput(a Asserter, stdout AzCopyStdout, message string) { if dryrunner, ok := a.(DryrunAsserter); ok && dryrunner.Dryrun() { return } for _, line := range stdout.RawStdout() { - if strings.Contains(line, errorMsg) { + if strings.Contains(line, message) { return } } fmt.Println(stdout.String()) - a.Error("expected error message not found in azcopy output") + a.Error(fmt.Sprintf("expected message (%s) not found in azcopy output", message)) } func ValidateStatsReturned(a Asserter, stdout AzCopyStdout) { @@ -310,6 +321,101 @@ func parseAzCopyListObject(a Asserter, line string) cmd.AzCopyListObject { } } +type DryrunOp uint8 + +const ( + DryrunOpCopy DryrunOp = iota + 1 + DryrunOpDelete + DryrunOpProperties +) + +var dryrunOpStr = map[DryrunOp]string{ + DryrunOpCopy: "copy", + DryrunOpDelete: "delete", + DryrunOpProperties: "set-properties", +} + +// ValidateDryRunOutput validates output for items in the expected map; expected must equal output +func ValidateDryRunOutput(a Asserter, output AzCopyStdout, rootSrc ResourceManager, rootDst ResourceManager, expected map[string]DryrunOp) { + if dryrun, ok := a.(DryrunAsserter); ok && dryrun.Dryrun() { + return + } + a.AssertNow("Output must not be nil", Not{IsNil{}}, output) + stdout, ok := output.(*AzCopyParsedDryrunStdout) + a.AssertNow("Output must be dryrun stdout", Equal{}, ok, true) + + uriPrefs := GetURIOptions{ + LocalOpts: LocalURIOpts{ + PreferUNCPath: true, + }, + } + + srcBase := rootSrc.URI(uriPrefs) + var dstBase string + if rootDst != nil { + dstBase = rootDst.URI(uriPrefs) + } + + if stdout.JsonMode { + // validation must have nothing in it, and nothing should miss in output. + validation := CloneMap(expected) + + for _, v := range stdout.Transfers { + // Determine the op. + op := common.Iff(v.FromTo.IsDelete(), DryrunOpDelete, common.Iff(v.FromTo.IsSetProperties(), DryrunOpProperties, DryrunOpCopy)) + + // Try to find the item in expected. + relPath := strings.TrimPrefix( // Ensure we start with the rel path, not a separator + strings.ReplaceAll( // Isolate path separators + strings.TrimPrefix(v.Source, srcBase), // Isolate the relpath + "\\", "/", + ), + "/", + ) + //a.Log("base %s source %s rel %s", srcBase, v.Source, relPath) + expectedOp, ok := validation[relPath] + a.Assert(fmt.Sprintf("Expected %s in map", relPath), Equal{}, ok, true) + a.Assert(fmt.Sprintf("Expected %s to match", relPath), Equal{}, op, expectedOp) + if rootDst != nil { + a.Assert(fmt.Sprintf("Expected %s dest url to match expected dest url", relPath), Equal{}, v.Destination, common.GenerateFullPath(dstBase, relPath)) + } + } + } else { + // It is useless to try to parse details from a user friendly statement. + // Instead, we should attempt to generate the user friendly statement, and validate that it existed from there. + validation := make(map[string]bool) + + for k, v := range expected { + from := common.GenerateFullPath(srcBase, k) + var to string + if rootDst != nil { + to = " to " + common.GenerateFullPath(dstBase, k) + } + + valStr := fmt.Sprintf("DRYRUN: %s %s%s", + dryrunOpStr[v], + from, + to, + ) + + validation[valStr] = true + } + + for k := range stdout.Raw { + _, ok := validation[k] + a.Assert(k+" wasn't present in validation", Equal{}, ok, true) + + if ok { + delete(validation, k) + } + } + + for k := range validation { + a.Assert(k+" wasn't present in output", Always{}) + } + } +} + func ValidateJobsListOutput(a Asserter, stdout AzCopyStdout, expectedJobIDs int) { if dryrunner, ok := a.(DryrunAsserter); ok && dryrunner.Dryrun() { return diff --git a/e2etest/zt_aamanaged_disks_test.go b/e2etest/zt_aamanaged_disks_test.go index 22e45f3b1..578f0bbdb 100644 --- a/e2etest/zt_aamanaged_disks_test.go +++ b/e2etest/zt_aamanaged_disks_test.go @@ -22,10 +22,11 @@ package e2etest import ( "flag" - "github.com/Azure/azure-storage-azcopy/v10/common" "runtime" "testing" "time" + + "github.com/Azure/azure-storage-azcopy/v10/common" ) // Purpose: Tests for the special cases that relate to moving managed disks (default local VHD to page blob; special handling for @@ -118,7 +119,7 @@ func TestManagedDisks_SnapshotOAuth(t *testing.T) { } // Service issue causes occasional flickers in feature functionality; enough that testing is problematic. Temporarily disabled until issue is resolved. -func TestManagedDisks_OAuthRequired(t *testing.T) { +func TestManagedDisks_Aaa(t *testing.T) { if runtime.GOOS != "linux" { t.Skip("Limit runs to Linux so no simultaneous runs occur") return @@ -137,7 +138,7 @@ func TestManagedDisks_OAuthRequired(t *testing.T) { &hooks{ beforeRunJob: func(h hookHelper) { // try giving the service some time to think - time.Sleep(time.Second * 30) + time.Sleep(time.Second * 5) }, }, testFiles{ diff --git a/e2etest/zt_basic_copy_sync_remove_test.go b/e2etest/zt_basic_copy_sync_remove_test.go index bd9af5a23..4655ec97d 100644 --- a/e2etest/zt_basic_copy_sync_remove_test.go +++ b/e2etest/zt_basic_copy_sync_remove_test.go @@ -22,6 +22,7 @@ package e2etest import ( "crypto/md5" + "encoding/base64" "errors" "fmt" "os" @@ -1128,20 +1129,8 @@ func TestCopySync_DeleteDestinationFileFlag(t *testing.T) { &hooks{ beforeRunJob: func(h hookHelper) { blobClient := h.GetDestination().(*resourceBlobContainer).containerClient.NewBlockBlobClient("filea") - // initial stage block - id := []string{BlockIDIntToBase64(1)} - _, err := blobClient.StageBlock(ctx, id[0], streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), nil) - if err != nil { - t.Errorf("error staging block %s", err) - } - - _, err = blobClient.CommitBlockList(ctx, id, nil) - if err != nil { - t.Errorf("error committing block %s", err) - } - - // second stage block - _, err = blobClient.StageBlock(ctx, id[0], streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), nil) + // initial stage block, with block id incompatible with us + _, err := blobClient.StageBlock(ctx, base64.StdEncoding.EncodeToString([]byte("foobar")), streaming.NopCloser(strings.NewReader(blockBlobDefaultData)), nil) if err != nil { t.Errorf("error staging block %s", err) } diff --git a/e2etest/zt_newe2e_autodetect_blob_type_test.go b/e2etest/zt_newe2e_autodetect_blob_type_test.go index 1762209e1..cc9840627 100644 --- a/e2etest/zt_newe2e_autodetect_blob_type_test.go +++ b/e2etest/zt_newe2e_autodetect_blob_type_test.go @@ -13,7 +13,7 @@ type AutoDetectBlobTypeTestSuite struct{} func (s *AutoDetectBlobTypeTestSuite) Scenario_AutoInferDetectBlobTypeVHD(svm *ScenarioVariationManager) { fileName := "myVHD.vHd" // awkward capitalization to see if AzCopy catches it. - body := NewRandomObjectContentContainer(svm, 4*common.MegaByte) + body := NewRandomObjectContentContainer(4 * common.MegaByte) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.File(), common.ELocation.Blob()})), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) diff --git a/e2etest/zt_newe2e_basic_functionality_test.go b/e2etest/zt_newe2e_basic_functionality_test.go index 0a3ea347f..bbd46818f 100644 --- a/e2etest/zt_newe2e_basic_functionality_test.go +++ b/e2etest/zt_newe2e_basic_functionality_test.go @@ -1,6 +1,7 @@ package e2etest import ( + blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-storage-azcopy/v10/common" "strconv" "time" @@ -26,7 +27,7 @@ func (s *BasicFunctionalitySuite) Scenario_SingleFile(svm *ScenarioVariationMana } } - body := NewRandomObjectContentContainer(svm, SizeFromString("10K")) + body := NewRandomObjectContentContainer(SizeFromString("10K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()})), ResourceDefinitionObject{ ObjectName: pointerTo("test"), @@ -83,9 +84,9 @@ func (s *BasicFunctionalitySuite) Scenario_MultiFileUploadDownload(svm *Scenario // Scale up from service to object srcDef := ResourceDefinitionContainer{ Objects: ObjectResourceMappingFlat{ - "abc": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, SizeFromString("10K"))}, - "def": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, SizeFromString("10K"))}, - "foobar": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, SizeFromString("10K"))}, + "abc": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(SizeFromString("10K"))}, + "def": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(SizeFromString("10K"))}, + "foobar": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(SizeFromString("10K"))}, }, } srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, srcLoc), srcDef) @@ -108,7 +109,7 @@ func (s *BasicFunctionalitySuite) Scenario_MultiFileUploadDownload(svm *Scenario svm, AzCopyCommand{ // Sync is not included at this moment, because sync requires - Verb: azCopyVerb, + Verb: azCopyVerb, Targets: []ResourceManager{ TryApplySpecificAuthType(srcContainer, EExplicitCredentialType.SASToken(), svm, CreateAzCopyTargetOptions{ SASTokenOptions: sasOpts, @@ -161,7 +162,7 @@ func (s *BasicFunctionalitySuite) Scenario_EntireDirectory_S2SContainer(svm *Sce } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} srcObjs[name] = obj } } @@ -222,7 +223,7 @@ func (s *BasicFunctionalitySuite) Scenario_EntireDirectory_UploadContainer(svm * } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} srcObjs[name] = obj } } @@ -276,7 +277,7 @@ func (s *BasicFunctionalitySuite) Scenario_EntireDirectory_DownloadContainer(svm } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} srcObjs[name] = obj } } @@ -345,7 +346,7 @@ func (s *BasicFunctionalitySuite) Scenario_SingleFileUploadDownload_EmptySAS(svm }) // Validate that the stdout contains the missing sas message - ValidateErrorOutput(svm, stdout, "Please authenticate using Microsoft Entra ID (https://aka.ms/AzCopy/AuthZ), use AzCopy login, or append a SAS token to your Azure URL.") + ValidateMessageOutput(svm, stdout, "Please authenticate using Microsoft Entra ID (https://aka.ms/AzCopy/AuthZ), use AzCopy login, or append a SAS token to your Azure URL.") } func (s *BasicFunctionalitySuite) Scenario_Sync_EmptySASErrorCodes(svm *ScenarioVariationManager) { @@ -410,3 +411,76 @@ func (s *BasicFunctionalitySuite) Scenario_Copy_EmptySASErrorCodes(svm *Scenario // Validate that the stdout contains these error URLs ValidateContainsError(svm, stdout, []string{"https://aka.ms/AzCopyError/NoAuthenticationInformation", "https://aka.ms/AzCopyError/ResourceNotFound"}) } + +func (s *BasicFunctionalitySuite) Scenario_TagsPermission(svm *ScenarioVariationManager) { + objectType := ResolveVariation(svm, []common.EntityType{common.EEntityType.File(), common.EEntityType.Folder(), common.EEntityType.Symlink()}) + srcLoc := ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob()}) + + // Local resource manager doesn't have symlink abilities yet, and the same codepath is hit. + if objectType == common.EEntityType.Symlink() && srcLoc == common.ELocation.Local() { + svm.InvalidateScenario() + return + } + + srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, srcLoc), ResourceDefinitionObject{ + Body: common.Iff(objectType == common.EEntityType.File(), NewZeroObjectContentContainer(1024*1024*5), nil), + ObjectProperties: ObjectProperties{ + EntityType: objectType, + }, + }) + dstCt := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}) + + svm.InsertVariationSeparator("Blob") + + multiBlock := "single-block" + var blobType common.BlobType + if objectType == common.EEntityType.File() { + svm.InsertVariationSeparator("-") + blobType = ResolveVariation(svm, []common.BlobType{common.EBlobType.BlockBlob(), common.EBlobType.PageBlob(), common.EBlobType.AppendBlob()}) + + if blobType == common.EBlobType.BlockBlob() { + svm.InsertVariationSeparator("-") + multiBlock = ResolveVariation(svm, []string{"single-block", "multi-block"}) + } + } + + stdOut, _ := RunAzCopy( + svm, + AzCopyCommand{ + Verb: AzCopyVerbCopy, + Targets: []ResourceManager{ + srcObj, + AzCopyTarget{dstCt, EExplicitCredentialType.SASToken(), CreateAzCopyTargetOptions{ + SASTokenOptions: GenericServiceSignatureValues{ + ContainerName: dstCt.ContainerName(), + Permissions: PtrOf(blobsas.ContainerPermissions{ + Read: true, + Add: true, + Create: true, + Write: true, + Tag: false, + }).String(), + }, + }}, + }, + Flags: CopyFlags{ + BlobTags: common.Metadata{ + "foo": PtrOf("bar"), + "alpha": PtrOf("beta"), + }, + CopySyncCommonFlags: CopySyncCommonFlags{ + BlockSizeMB: common.Iff(objectType == common.EEntityType.File() && multiBlock != "single-block", + PtrOf(0.5), + nil), + Recursive: pointerTo(true), + IncludeDirectoryStubs: pointerTo(true), + }, + BlobType: &blobType, + PreserveSymlinks: pointerTo(true), + }, + ShouldFail: true, + }, + ) + + ValidateMessageOutput(svm, stdOut, "Authorization failed during an attempt to set tags, please ensure you have the appropriate Tags permission") +} diff --git a/e2etest/zt_newe2e_blob_test.go b/e2etest/zt_newe2e_blob_test.go index b1f4b6b60..6dfd138ca 100644 --- a/e2etest/zt_newe2e_blob_test.go +++ b/e2etest/zt_newe2e_blob_test.go @@ -21,7 +21,7 @@ func (s *BlobTestSuite) Scenario_UploadBlockBlobs(svm *ScenarioVariationManager) srcObjs := make(ObjectResourceMappingFlat) for i := range 10 { name := "dir_10_files/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} CreateResource[ObjectResourceManager](svm, srcContainer, obj) srcObjs[name] = obj } @@ -43,7 +43,7 @@ func (s *BlobTestSuite) Scenario_UploadBlockBlobs(svm *ScenarioVariationManager) func (s *BlobTestSuite) Scenario_UploadPageBlob(svm *ScenarioVariationManager) { fileName := "test_page_blob_1mb.vHd" - body := NewRandomObjectContentContainer(svm, common.MegaByte) + body := NewRandomObjectContentContainer(common.MegaByte) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -77,7 +77,7 @@ func (s *BlobTestSuite) Scenario_UploadPageBlob(svm *ScenarioVariationManager) { func (s *BlobTestSuite) Scenario_SetPageBlobTier(svm *ScenarioVariationManager) { fileName := "test_page_blob.vHd" - body := NewRandomObjectContentContainer(svm, common.KiloByte) + body := NewRandomObjectContentContainer(common.KiloByte) tier := ResolveVariation(svm, []common.PageBlobTier{common.EPageBlobTier.P10(), common.EPageBlobTier.P20(), common.EPageBlobTier.P30(), common.EPageBlobTier.P4(), common.EPageBlobTier.P40(), common.EPageBlobTier.P50()}) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). @@ -114,7 +114,7 @@ func (s *BlobTestSuite) Scenario_SetPageBlobTier(svm *ScenarioVariationManager) func (s *BlobTestSuite) Scenario_UploadBlob(svm *ScenarioVariationManager) { // Scale up from service to object dstObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}).GetObject(svm, "test", common.EEntityType.File()) - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionObject{ ObjectName: pointerTo("test"), @@ -144,7 +144,7 @@ func (s *BlobTestSuite) Scenario_UploadBlob(svm *ScenarioVariationManager) { func (s *BlobTestSuite) Scenario_DownloadBlob(svm *ScenarioVariationManager) { // Scale up from service to object dstObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}).GetObject(svm, "test", common.EEntityType.File()) - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionObject{ ObjectName: pointerTo("test"), @@ -180,7 +180,7 @@ func (s *BlobTestSuite) Scenario_DownloadBlobRecursive(svm *ScenarioVariationMan srcObjs := make(ObjectResourceMappingFlat) for i := range 5 { name := "dir_5_files/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} CreateResource[ObjectResourceManager](svm, srcContainer, obj) srcObjs[name] = obj } diff --git a/e2etest/zt_newe2e_blobfs_test.go b/e2etest/zt_newe2e_blobfs_test.go index dfba6d850..850a6a6db 100644 --- a/e2etest/zt_newe2e_blobfs_test.go +++ b/e2etest/zt_newe2e_blobfs_test.go @@ -14,7 +14,7 @@ type BlobFSTestSuite struct{} func (s *BlobFSTestSuite) Scenario_UploadFile(svm *ScenarioVariationManager) { fileName := "test.txt" size := ResolveVariation(svm, []int64{common.KiloByte, 64 * common.MegaByte}) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -41,7 +41,7 @@ func (s *BlobFSTestSuite) Scenario_UploadFile(svm *ScenarioVariationManager) { func (s *BlobFSTestSuite) Scenario_UploadFileMultiflushOAuth(svm *ScenarioVariationManager) { fileName := "test_multiflush_64MB_file.txt" - body := NewRandomObjectContentContainer(svm, 64*common.MegaByte) + body := NewRandomObjectContentContainer(64 * common.MegaByte) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -82,7 +82,7 @@ func (s *BlobFSTestSuite) Scenario_Upload100Files(svm *ScenarioVariationManager) srcObjs := make(ObjectResourceMappingFlat) for i := range 100 { name := "dir_100_files/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} CreateResource[ObjectResourceManager](svm, srcContainer, obj) srcObjs[name] = obj } @@ -105,7 +105,7 @@ func (s *BlobFSTestSuite) Scenario_Upload100Files(svm *ScenarioVariationManager) func (s *BlobFSTestSuite) Scenario_DownloadFile(svm *ScenarioVariationManager) { fileName := "test.txt" size := ResolveVariation(svm, []int64{common.KiloByte, 64 * common.MegaByte}) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) dstObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -142,7 +142,7 @@ func (s *BlobFSTestSuite) Scenario_Download100Files(svm *ScenarioVariationManage srcObjs := make(ObjectResourceMappingFlat) for i := range 100 { name := "dir_100_files/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} CreateResource[ObjectResourceManager](svm, srcContainer, obj) srcObjs[name] = obj } @@ -161,3 +161,90 @@ func (s *BlobFSTestSuite) Scenario_Download100Files(svm *ScenarioVariationManage Objects: srcObjs, }, true) } + +func (s *BlobFSTestSuite) Scenario_VirtualDirectoryHandling(svm *ScenarioVariationManager) { + targetAcct := pointerTo(NamedResolveVariation(svm, map[string]string{ + "FNS": PrimaryStandardAcct, + "HNS": PrimaryHNSAcct, + })) + + // This should also fix copy/sync because the changed codepath overlaps, *but*, we'll have a separate test for that too. + srcRoot := GetRootResource(svm, common.ELocation.Blob(), GetResourceOptions{ + PreferredAccount: targetAcct, + }) + + svm.InsertVariationSeparator("_") + resourceMapping := NamedResolveVariation(svm, map[string]ObjectResourceMappingFlat{ + "DisallowOverlap": { // "foo" is a folder, only a folder, there is no difference between "foo" and "foo/". + "foo": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + Metadata: common.Metadata{ + "foo": pointerTo("bar"), + }, + }, + Body: NewZeroObjectContentContainer(0), + }, + "foo/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File inside + "baz": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File on the side + }, + "AllowOverlap": { // "foo" (the file), and "foo/" (the directory) can exist, but "foo/" is still a directory with metadata. + "foo/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + Metadata: common.Metadata{ + "foo": pointerTo("bar"), + }, + }, + Body: NewZeroObjectContentContainer(0), + }, + "foo/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File inside + "foo": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File on the side + }, + }) + + // HNS will automatically correct blob calls to "foo/" to "foo", which is correct behavior + // But incompatible with the overlap scenario. + if _, ok := resourceMapping["foo/"]; *targetAcct == PrimaryHNSAcct && ok { + svm.InvalidateScenario() + return + } + + srcRes := CreateResource[ContainerResourceManager](svm, srcRoot, ResourceDefinitionContainer{ + Objects: resourceMapping, + }) + + svm.InsertVariationSeparator("_") + tgt := GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Blob(), common.ELocation.BlobFS()}), GetResourceOptions{ + PreferredAccount: targetAcct, + }).(ServiceResourceManager).GetContainer(srcRes.ContainerName()) + + svm.InsertVariationSeparator("->") // Formatting: "FNS_AllowOverlap_Blob->Blob" + dstRes := CreateResource[ContainerResourceManager](svm, + GetRootResource(svm, ResolveVariation(svm, []common.Location{ + common.ELocation.Blob(), + common.ELocation.BlobFS(), + }), GetResourceOptions{PreferredAccount: targetAcct}), + ResourceDefinitionContainer{}) + + RunAzCopy( + svm, + AzCopyCommand{ + Verb: AzCopyVerbCopy, + Targets: []ResourceManager{ + CreateAzCopyTarget(tgt, EExplicitCredentialType.OAuth(), svm), + CreateAzCopyTarget(dstRes, EExplicitCredentialType.OAuth(), svm), + }, + Flags: CopyFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + IncludeDirectoryStubs: pointerTo(true), + }, + }, + }, + ) + + ValidateResource(svm, dstRes, ResourceDefinitionContainer{ + Objects: resourceMapping, + }, false) +} diff --git a/e2etest/zt_newe2e_dryrun_test.go b/e2etest/zt_newe2e_dryrun_test.go new file mode 100644 index 000000000..2373f124e --- /dev/null +++ b/e2etest/zt_newe2e_dryrun_test.go @@ -0,0 +1,77 @@ +package e2etest + +import ( + "github.com/Azure/azure-storage-azcopy/v10/common" +) + +type DryrunSuite struct{} + +func init() { + suiteManager.RegisterSuite(&DryrunSuite{}) +} + +func (*DryrunSuite) Scenario_UploadSync_Encoded(a *ScenarioVariationManager) { + dst := CreateResource[ContainerResourceManager](a, GetRootResource(a, ResolveVariation(a, []common.Location{common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()})), ResourceDefinitionContainer{}) + + src := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Local()), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foo%bar": ResourceDefinitionObject{}, + "baz%bish": ResourceDefinitionObject{}, + }, + }) + + stdout, _ := RunAzCopy(a, AzCopyCommand{ + Verb: "sync", + Targets: []ResourceManager{src, dst}, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + DryRun: pointerTo(true), + + GlobalFlags: GlobalFlags{ + OutputType: pointerTo(ResolveVariation(a, []common.OutputFormat{common.EOutputFormat.Json(), common.EOutputFormat.Text()})), + }, + }, + + DeleteDestination: pointerTo(true), + }, + }) + + // we're looking to see foo%bar and bar%foo + ValidateDryRunOutput(a, stdout, src, dst, map[string]DryrunOp{ + "foo%bar": DryrunOpCopy, + "baz%bish": DryrunOpCopy, + }) +} + +func (*DryrunSuite) Scenario_DownloadSync_Encoded(a *ScenarioVariationManager) { + src := CreateResource[ContainerResourceManager](a, GetRootResource(a, ResolveVariation(a, []common.Location{common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()})), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foo%bar": ResourceDefinitionObject{}, + "baz%bish": ResourceDefinitionObject{}, + }, + }) + + dst := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Local()), ResourceDefinitionContainer{}) + + stdout, _ := RunAzCopy(a, AzCopyCommand{ + Verb: "sync", + Targets: []ResourceManager{src, dst}, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + DryRun: pointerTo(true), + + GlobalFlags: GlobalFlags{ + OutputType: pointerTo(ResolveVariation(a, []common.OutputFormat{common.EOutputFormat.Json(), common.EOutputFormat.Text()})), + }, + }, + + DeleteDestination: pointerTo(true), + }, + }) + + // we're looking to see foo%bar and bar%foo + ValidateDryRunOutput(a, stdout, src, dst, map[string]DryrunOp{ + "foo%bar": DryrunOpCopy, + "baz%bish": DryrunOpCopy, + }) +} diff --git a/e2etest/zt_newe2e_example_test.go b/e2etest/zt_newe2e_example_test.go index 9f69a01ff..83615d221 100644 --- a/e2etest/zt_newe2e_example_test.go +++ b/e2etest/zt_newe2e_example_test.go @@ -22,7 +22,7 @@ func (s *ExampleSuite) TeardownSuite(a Asserter) { } func (s *ExampleSuite) Scenario_SingleFileCopySyncS2S(svm *ScenarioVariationManager) { - body := NewRandomObjectContentContainer(svm, SizeFromString("10K")) + body := NewRandomObjectContentContainer(SizeFromString("10K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob()})), ResourceDefinitionObject{ ObjectName: pointerTo("test"), diff --git a/e2etest/zt_newe2e_file_oauth_test.go b/e2etest/zt_newe2e_file_oauth_test.go index 95c1de0c7..828cc20d5 100644 --- a/e2etest/zt_newe2e_file_oauth_test.go +++ b/e2etest/zt_newe2e_file_oauth_test.go @@ -35,5 +35,5 @@ func (s *FileOAuthTestSuite) Scenario_FileBlobOAuthError(svm *ScenarioVariationM ShouldFail: true, }) - ValidateErrorOutput(svm, stdout, fmt.Sprintf("S2S %s from Azure File authenticated with Azure AD to Blob/BlobFS is not supported", azCopyVerb)) + ValidateMessageOutput(svm, stdout, fmt.Sprintf("S2S %s from Azure File authenticated with Azure AD to Blob/BlobFS is not supported", azCopyVerb)) } diff --git a/e2etest/zt_newe2e_file_test.go b/e2etest/zt_newe2e_file_test.go index 48d425297..882124a76 100644 --- a/e2etest/zt_newe2e_file_test.go +++ b/e2etest/zt_newe2e_file_test.go @@ -4,8 +4,8 @@ import ( "context" "fmt" "github.com/Azure/azure-storage-azcopy/v10/common" - "strconv" "math" + "strconv" ) func init() { @@ -17,7 +17,7 @@ type FileTestSuite struct{} func (s *FileTestSuite) Scenario_SingleFileUploadDifferentSizes(svm *ScenarioVariationManager) { size := ResolveVariation(svm, []int64{0, 1, 4*common.MegaByte - 1, 4 * common.MegaByte, 4*common.MegaByte + 1}) fileName := fmt.Sprintf("test_file_upload_%dB_fullname", size) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -132,7 +132,7 @@ func (s *FileTestSuite) Scenario_PartialSparseFileUpload(svm *ScenarioVariationM func (s *FileTestSuite) Scenario_GuessMimeType(svm *ScenarioVariationManager) { size := int64(0) fileName := "test_guessmimetype.html" - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -164,7 +164,7 @@ func (s *FileTestSuite) Scenario_GuessMimeType(svm *ScenarioVariationManager) { func (s *FileTestSuite) Scenario_UploadFileProperties(svm *ScenarioVariationManager) { size := int64(0) fileName := "test_properties" - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) // awkward capitalization to see if AzCopy catches it. @@ -230,7 +230,7 @@ func (s *FileTestSuite) Scenario_DownloadPreserveLMTFile(svm *ScenarioVariationM } func (s *FileTestSuite) Scenario_Download63MBFile(svm *ScenarioVariationManager) { - body := NewRandomObjectContentContainer(svm, 63*common.MegaByte) + body := NewRandomObjectContentContainer(63 * common.MegaByte) name := "test_63mb" srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, common.ELocation.File()), ResourceDefinitionObject{ObjectName: pointerTo(name), Body: body}) dstObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}).GetObject(svm, name, common.EEntityType.File()) @@ -268,7 +268,7 @@ func (s *FileTestSuite) Scenario_UploadDirectory(svm *ScenarioVariationManager) } for i := range 3 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} srcObjs[name] = obj } } @@ -325,7 +325,7 @@ func (s *FileTestSuite) Scenario_DownloadDirectory(svm *ScenarioVariationManager } for i := range 3 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} srcObjs[name] = obj } } @@ -369,7 +369,7 @@ func (s *FileTestSuite) Scenario_DownloadDirectory(svm *ScenarioVariationManager func (s *FileTestSuite) Scenario_SingleFileUploadWildcard(svm *ScenarioVariationManager) { size := common.MegaByte fileName := fmt.Sprintf("test_file_upload_%dB_fullname.txt", size) - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -396,7 +396,7 @@ func (s *FileTestSuite) Scenario_SingleFileUploadWildcard(svm *ScenarioVariation func (s *FileTestSuite) Scenario_AllFileUploadWildcard(svm *ScenarioVariationManager) { size := common.KiloByte fileName := fmt.Sprintf("test_file_upload_%dB_fullname", size) - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -425,7 +425,7 @@ func (s *FileTestSuite) Scenario_AllFileUploadWildcard(svm *ScenarioVariationMan func (s *FileTestSuite) Scenario_AllFileDownloadWildcard(svm *ScenarioVariationManager) { size := common.KiloByte fileName := fmt.Sprintf("test_file_upload_%dB_fullname", size) - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.File()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -454,7 +454,7 @@ func (s *FileTestSuite) Scenario_AllFileDownloadWildcard(svm *ScenarioVariationM func (s *FileTestSuite) Scenario_SeveralFileUploadWildcard(svm *ScenarioVariationManager) { size := common.KiloByte fileName := fmt.Sprintf("test_file_upload_%dB_fullname", size) - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Local()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -483,7 +483,7 @@ func (s *FileTestSuite) Scenario_SeveralFileUploadWildcard(svm *ScenarioVariatio func (s *FileTestSuite) Scenario_SeveralFileDownloadWildcard(svm *ScenarioVariationManager) { size := common.KiloByte fileName := fmt.Sprintf("test_file_upload_%dB_fullname", size) - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.File()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) diff --git a/e2etest/zt_newe2e_fns_dir_test.go b/e2etest/zt_newe2e_fns_dir_test.go new file mode 100644 index 000000000..6515e7a2f --- /dev/null +++ b/e2etest/zt_newe2e_fns_dir_test.go @@ -0,0 +1,204 @@ +package e2etest + +import ( + "github.com/Azure/azure-storage-azcopy/v10/common" + "time" +) + +/* +FNSSuite exists to test oddities about virtual directory semantics on flat namespace blob. +*/ +type FNSSuite struct{} + +func init() { + suiteManager.RegisterSuite(&FNSSuite{}) +} + +func (*FNSSuite) Scenario_CopyToOverlappableDirectoryMarker(a *ScenarioVariationManager) { + DirMeta := ResolveVariation(a, []string{"", common.POSIXFolderMeta}) + tgtVerb := ResolveVariation(a, []AzCopyVerb{AzCopyVerbCopy, AzCopyVerbSync}) + + // Target a fns account + destRm := ObjectResourceMappingFlat{ + "foobar/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + Metadata: common.Iff(DirMeta != "", common.Metadata{ + common.POSIXFolderMeta: pointerTo("true"), + }, nil), + }, + Body: NewZeroObjectContentContainer(0), + }, + } + + if tgtVerb == AzCopyVerbSync { + // Sync must have an existing destination, non-folder. + destRm["foobar"] = ResourceDefinitionObject{ + Body: NewZeroObjectContentContainer(512), + } + } + + dest := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Blob()), + ResourceDefinitionContainer{ + Objects: destRm, + }, + ) + + if tgtVerb == AzCopyVerbSync && !a.Dryrun() { + time.Sleep(time.Second * 5) // Ensure the source is newer + } + + // Source must be newer than the destination + source := CreateResource[ObjectResourceManager](a, GetRootResource(a, common.ELocation.Local()), ResourceDefinitionObject{ + Body: NewRandomObjectContentContainer(1024), + }) + + _, _ = RunAzCopy(a, + AzCopyCommand{ + Verb: tgtVerb, + Targets: []ResourceManager{ + source, + dest.GetObject(a, "foobar", common.EEntityType.File()), + }, + Flags: CopyFlags{ + AsSubdir: common.Iff(tgtVerb == AzCopyVerbCopy, pointerTo(false), nil), + }, + }, + ) + + ValidateResource(a, dest, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{ + ObjectShouldExist: pointerTo(true), + }, + "foobar/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + Metadata: common.Iff(DirMeta != "", common.Metadata{ + common.POSIXFolderMeta: pointerTo("true"), + }, nil), + }, + ObjectShouldExist: pointerTo(true), + }, + }, + }, true) +} + +// Scenario_IncludeRootDirectoryStub tests that the root directory (and sub directories) appropriately get their files picked up. +func (*FNSSuite) Scenario_IncludeRootDirectoryStub(a *ScenarioVariationManager) { + DirMeta := ResolveVariation(a, []string{"", common.POSIXFolderMeta}) + + dst := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Blob()), ResourceDefinitionContainer{}) + src := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Blob()), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512), ObjectProperties: ObjectProperties{Metadata: common.Metadata{"dontcopyme": pointerTo("")}}}, // Object w/ same name as root dir + "foobar/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.Iff(DirMeta != "", common.EEntityType.Folder(), common.EEntityType.File()), + Metadata: common.Metadata{"asdf": pointerTo("qwerty")}, + }, + }, // Folder w/ same name as object, add special prop to ensure + "foobar/foo": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/baz": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/folder/": ResourceDefinitionObject{ObjectProperties: ObjectProperties{EntityType: common.Iff(DirMeta != "", common.EEntityType.Folder(), common.EEntityType.File())}}, + "foobar/folder/foobar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + }, + }) + + azcopyVerb := ResolveVariation(a, []AzCopyVerb{AzCopyVerbCopy, AzCopyVerbSync}) + RunAzCopy(a, + AzCopyCommand{ + Verb: azcopyVerb, + Targets: []ResourceManager{ + src.GetObject(a, "foobar/", common.EEntityType.Folder()), + dst.GetObject(a, "foobar/", common.EEntityType.Folder()), + }, + Flags: CopyFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + IncludeDirectoryStubs: pointerTo(true), + }, + AsSubdir: common.Iff(azcopyVerb == AzCopyVerbCopy, pointerTo(false), nil), + }, + }, + ) + + ValidateResource(a, dst, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{ObjectShouldExist: pointerTo(false)}, // We shouldn't have captured foobar, but foobar/ should exist as a directory. + "foobar/": ResourceDefinitionObject{ObjectProperties: ObjectProperties{ + EntityType: common.Iff(DirMeta != "", common.EEntityType.Folder(), common.EEntityType.File()), + Metadata: common.Metadata{ + "asdf": pointerTo("qwerty"), + }, + }, + }, + "foobar/foo": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/baz": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + "foobar/folder/": ResourceDefinitionObject{ObjectProperties: ObjectProperties{EntityType: common.Iff(DirMeta != "", common.EEntityType.Folder(), common.EEntityType.File())}}, + "foobar/folder/foobar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(0)}, + }, + }, false) +} + +/* +Scenario_SyncTrailingSlashDeletion tests against a potential accidental deletion bug that could occur when `folder/` exists at the destination, but not the source +and `folder/` happens to have an overlapping file at `folder`. +*/ +func (*FNSSuite) Scenario_SyncTrailingSlashDeletion(a *ScenarioVariationManager) { + folderStyle := ResolveVariation(a, []common.EntityType{common.EEntityType.File(), common.EEntityType.Folder()}) + + dest := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Blob()), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{ + Body: NewRandomObjectContentContainer(1024), + }, + "foobar/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: folderStyle, + }, + }, + "foobar/bar/": ResourceDefinitionObject{ + Body: NewRandomObjectContentContainer(1024), + }, + }, + }) + + src := CreateResource[ContainerResourceManager](a, GetRootResource(a, common.ELocation.Blob()), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{ + Body: NewRandomObjectContentContainer(1024), + }, // We don't care about anything other than the overlap. We merely want to trigger a delete op against dest's folder/. + }, + }) + + RunAzCopy(a, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{ + src.GetObject(a, "foobar/", common.EEntityType.Folder()), + dest.GetObject(a, "foobar/", common.EEntityType.Folder()), + }, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + GlobalFlags: GlobalFlags{ + OutputType: pointerTo(common.EOutputFormat.Text()), + }, + IncludeDirectoryStubs: pointerTo(true), + }, + DeleteDestination: pointerTo(true), + }, + }) + + ValidateResource(a, dest, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "foobar": ResourceDefinitionObject{}, // We just care this guy exists + "foobar/": ResourceDefinitionObject{ // and this guy doesn't. + ObjectShouldExist: pointerTo(false), + }, + "foobar/bar/": ResourceDefinitionObject{ + ObjectShouldExist: pointerTo(false), + }, + }, + }, false) +} diff --git a/e2etest/zt_newe2e_list_test.go b/e2etest/zt_newe2e_list_test.go index e35ac1dbc..3686b50d3 100644 --- a/e2etest/zt_newe2e_list_test.go +++ b/e2etest/zt_newe2e_list_test.go @@ -5,6 +5,7 @@ import ( blobsas "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-storage-azcopy/v10/cmd" "github.com/Azure/azure-storage-azcopy/v10/common" + "strings" ) func init() { @@ -18,7 +19,7 @@ func (s *ListSuite) Scenario_ListBasic(svm *ScenarioVariationManager) { common.ELocation.File()})) svm.InsertVariationSeparator(":") - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) var expectedObjects map[AzCopyOutputKey]cmd.AzCopyListObject if srcService.Location() == common.ELocation.Blob() { expectedObjects = map[AzCopyOutputKey]cmd.AzCopyListObject{} @@ -79,9 +80,9 @@ func (s *ListSuite) Scenario_ListHierarchy(svm *ScenarioVariationManager) { } } objects := []ResourceDefinitionObject{ - {ObjectName: pointerTo("file_in_root.txt"), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), Size: "1.00 KiB"}, + {ObjectName: pointerTo("file_in_root.txt"), Body: NewRandomObjectContentContainer(SizeFromString("1K")), Size: "1.00 KiB"}, {ObjectName: pointerTo("dir_in_root"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, - {ObjectName: pointerTo("dir_in_root/file.txt"), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root/file.txt"), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, {ObjectName: pointerTo("dir_in_root/subdir"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, } // Scale up from service to object @@ -128,7 +129,7 @@ func (s *ListSuite) Scenario_ListProperties(svm *ScenarioVariationManager) { for _, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) versionId := common.IffNotNil(props.BlobProperties.VersionId, "") @@ -182,7 +183,7 @@ func (s *ListSuite) Scenario_ListProperties_TextOutput(svm *ScenarioVariationMan for _, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) versionId := common.IffNotNil(props.BlobProperties.VersionId, "") @@ -239,7 +240,7 @@ func (s *ListSuite) Scenario_ListPropertiesInvalid(svm *ScenarioVariationManager for _, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{ @@ -282,7 +283,7 @@ func (s *ListSuite) Scenario_ListMachineReadable(svm *ScenarioVariationManager) for _, blobName := range blobNames { CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{ Path: blobName, @@ -323,7 +324,7 @@ func (s *ListSuite) Scenario_ListMegaUnits(svm *ScenarioVariationManager) { for _, blobName := range blobNames { CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{ Path: blobName, @@ -355,7 +356,7 @@ func (s *ListSuite) Scenario_ListBasic_TextOutput(svm *ScenarioVariationManager) acct := GetAccount(svm, PrimaryStandardAcct) srcService := acct.GetService(svm, common.ELocation.Blob()) - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, srcService, ResourceDefinitionObject{ ObjectName: pointerTo("test"), @@ -393,7 +394,7 @@ func (s *ListSuite) Scenario_ListRunningTally(svm *ScenarioVariationManager) { acct := GetAccount(svm, PrimaryStandardAcct) srcService := acct.GetService(svm, common.ELocation.Blob()) - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, srcService, ResourceDefinitionObject{ ObjectName: pointerTo("test"), @@ -437,7 +438,7 @@ func (s *ListSuite) Scenario_ListRunningTallyMegaUnits(svm *ScenarioVariationMan for _, blobName := range blobNames { CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{ Path: blobName, @@ -480,7 +481,7 @@ func (s *ListSuite) Scenario_ListRunningTallyMachineReadable(svm *ScenarioVariat for _, blobName := range blobNames { CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{ Path: blobName, @@ -521,7 +522,7 @@ func (s *ListSuite) Scenario_ListVersionIdNoAdditionalVersions(svm *ScenarioVari for _, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) versionId := common.IffNotNil(props.BlobProperties.VersionId, "") @@ -562,7 +563,7 @@ func (s *ListSuite) Scenario_ListVersionIdNoAdditionalVersions_TextOutput(svm *S for _, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) versionId := common.IffNotNil(props.BlobProperties.VersionId, "") @@ -606,7 +607,7 @@ func (s *ListSuite) Scenario_ListVersionIdWithVersions(svm *ScenarioVariationMan for i, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) props := obj.GetProperties(svm) versionId := common.IffNotNil(props.BlobProperties.VersionId, "") @@ -614,7 +615,7 @@ func (s *ListSuite) Scenario_ListVersionIdWithVersions(svm *ScenarioVariationMan // Create a new version of the blob for the first two blobs if i < 2 { - obj.Create(svm, NewRandomObjectContentContainer(svm, SizeFromString("2K")), ObjectProperties{}) + obj.Create(svm, NewRandomObjectContentContainer(SizeFromString("2K")), ObjectProperties{}) props = obj.GetProperties(svm) versionId = common.IffNotNil(props.BlobProperties.VersionId, "") expectedObjects[AzCopyOutputKey{Path: blobName, VersionId: versionId}] = cmd.AzCopyListObject{Path: blobName, ContentLength: "2.00 KiB", VersionId: versionId} @@ -654,12 +655,12 @@ func (s *ListSuite) Scenario_ListWithVersions(svm *ScenarioVariationManager) { for i, blobName := range blobNames { obj := CreateResource[ObjectResourceManager](svm, srcContainer, ResourceDefinitionObject{ ObjectName: pointerTo(blobName), - Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), + Body: NewRandomObjectContentContainer(SizeFromString("1K")), }) // Create a new version of the blob for the first two blobs if i < 2 { - obj.Create(svm, NewRandomObjectContentContainer(svm, SizeFromString("2K")), ObjectProperties{}) + obj.Create(svm, NewRandomObjectContentContainer(SizeFromString("2K")), ObjectProperties{}) expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{Path: blobName, ContentLength: "2.00 KiB"} } else { expectedObjects[AzCopyOutputKey{Path: blobName}] = cmd.AzCopyListObject{Path: blobName, ContentLength: "1.00 KiB"} @@ -702,16 +703,16 @@ func (s *ListSuite) Scenario_ListHierarchyTrailingDot(svm *ScenarioVariationMana } } objects := []ResourceDefinitionObject{ - {ObjectName: pointerTo("file_in_root"), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), Size: "1.00 KiB"}, - {ObjectName: pointerTo("file_in_root."), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), Size: "1.00 KiB"}, + {ObjectName: pointerTo("file_in_root"), Body: NewRandomObjectContentContainer(SizeFromString("1K")), Size: "1.00 KiB"}, + {ObjectName: pointerTo("file_in_root."), Body: NewRandomObjectContentContainer(SizeFromString("1K")), Size: "1.00 KiB"}, {ObjectName: pointerTo("dir_in_root."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, - {ObjectName: pointerTo("dir_in_root./file"), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, - {ObjectName: pointerTo("dir_in_root./file."), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root./file"), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root./file."), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, {ObjectName: pointerTo("dir_in_root./subdir"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root./subdir."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, - {ObjectName: pointerTo("dir_in_root/file"), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, - {ObjectName: pointerTo("dir_in_root/file."), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root/file"), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root/file."), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, {ObjectName: pointerTo("dir_in_root/subdir"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root/subdir."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, } @@ -760,16 +761,16 @@ func (s *ListSuite) Scenario_ListHierarchyTrailingDotDisable(svm *ScenarioVariat } } objects := []ResourceDefinitionObject{ - {ObjectName: pointerTo("file_in_root"), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), Size: "1.00 KiB"}, - {ObjectName: pointerTo("file_in_root."), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K")), Size: "1.00 KiB"}, + {ObjectName: pointerTo("file_in_root"), Body: NewRandomObjectContentContainer(SizeFromString("1K")), Size: "1.00 KiB"}, + {ObjectName: pointerTo("file_in_root."), Body: NewRandomObjectContentContainer(SizeFromString("1K")), Size: "1.00 KiB"}, {ObjectName: pointerTo("dir_in_root."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, - {ObjectName: pointerTo("dir_in_root./file"), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, - {ObjectName: pointerTo("dir_in_root./file."), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root./file"), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root./file."), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, {ObjectName: pointerTo("dir_in_root./subdir"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root./subdir."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, - {ObjectName: pointerTo("dir_in_root/file"), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, - {ObjectName: pointerTo("dir_in_root/file."), Body: NewRandomObjectContentContainer(svm, SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root/file"), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, + {ObjectName: pointerTo("dir_in_root/file."), Body: NewRandomObjectContentContainer(SizeFromString("2K")), Size: "2.00 KiB"}, {ObjectName: pointerTo("dir_in_root/subdir"), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, {ObjectName: pointerTo("dir_in_root/subdir."), ObjectProperties: ObjectProperties{EntityType: common.EEntityType.Folder()}, Size: "0.00 B"}, } @@ -819,5 +820,84 @@ func (s *ListSuite) Scenario_EmptySASErrorCodes(svm *ScenarioVariationManager) { }) // Validate that the stdout contains these error URLs - ValidateErrorOutput(svm, stdout, "https://aka.ms/AzCopyError/NoAuthenticationInformation") + ValidateMessageOutput(svm, stdout, "https://aka.ms/AzCopyError/NoAuthenticationInformation") +} + +func (s *ListSuite) Scenario_VirtualDirectoryHandling(svm *ScenarioVariationManager) { + targetAcct := pointerTo(NamedResolveVariation(svm, map[string]string{ + "FNS": PrimaryStandardAcct, + "HNS": PrimaryHNSAcct, + })) + + // This should also fix copy/sync because the changed codepath overlaps, *but*, we'll have a separate test for that too. + srcRoot := GetRootResource(svm, common.ELocation.Blob(), GetResourceOptions{ + PreferredAccount: targetAcct, + }) + + resourceMapping := NamedResolveVariation(svm, map[string]ObjectResourceMappingFlat{ + "DisallowOverlap": { // "foo" is a folder, only a folder, there is no difference between "foo" and "foo/". + "foo": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + }, + Body: NewZeroObjectContentContainer(0), + }, + "foo/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File inside + "baz": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File on the side + }, + "AllowOverlap": { // "foo" (the file), and "foo/" (the directory) can exist, but "foo/" is still a directory with metadata. + "foo/": ResourceDefinitionObject{ + ObjectProperties: ObjectProperties{ + EntityType: common.EEntityType.Folder(), + }, + Body: NewZeroObjectContentContainer(0), + }, + "foo/bar": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File inside + "foo": ResourceDefinitionObject{Body: NewZeroObjectContentContainer(1024)}, // File on the side + }, + }) + + // HNS will automatically correct blob calls to "foo/" to "foo", which is correct behavior + // But incompatible with the overlap scenario. + if _, ok := resourceMapping["foo/"]; *targetAcct == PrimaryHNSAcct && ok { + svm.InvalidateScenario() + return + } + + res := CreateResource[ContainerResourceManager](svm, srcRoot, ResourceDefinitionContainer{ + Objects: resourceMapping, + }) + + tgt := GetRootResource(svm, common.ELocation.BlobFS(), GetResourceOptions{ + PreferredAccount: targetAcct, + }).(ServiceResourceManager).GetContainer(res.ContainerName()) + + stdout, _ := RunAzCopy( + svm, + AzCopyCommand{ + Verb: AzCopyVerbList, + Targets: []ResourceManager{ + tgt, + }, + Flags: ListFlags{}, + }, + ) + + expectedObjects := make(map[AzCopyOutputKey]cmd.AzCopyListObject) + expectedObjects[AzCopyOutputKey{Path: "/"}] = cmd.AzCopyListObject{Path: "/", ContentLength: "0.00 B"} + for k, v := range resourceMapping { + // Correct for naming scheme if needed + if v.EntityType == common.EEntityType.Folder() && !strings.HasSuffix(k, "/") { + k += "/" + } + + expectedObjects[AzCopyOutputKey{ + Path: k, + }] = cmd.AzCopyListObject{ + Path: k, + ContentLength: SizeToString(v.Body.Size(), false), + } + } + + ValidateListOutput(svm, stdout, expectedObjects, nil) // No expected summary } diff --git a/e2etest/zt_newe2e_oauth_device_test.go b/e2etest/zt_newe2e_oauth_device_test.go new file mode 100644 index 000000000..7eb1fad75 --- /dev/null +++ b/e2etest/zt_newe2e_oauth_device_test.go @@ -0,0 +1,102 @@ +package e2etest + +import ( + "flag" + "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-azcopy/v10/testSuite/cmd" + "path" + "strings" + "time" +) + +var runDeviceCodeTest = flag.Bool("device-code", false, "Whether or not to run device code tests. These must be run manually due to interactive nature.") + +func init() { + if runDeviceCodeTest != nil && *runDeviceCodeTest { + suiteManager.RegisterSuite(&DeviceLoginManualSuite{}) + } +} + +type DeviceLoginManualSuite struct { +} + +func (s *DeviceLoginManualSuite) SetupSuite(a Asserter) { + stdout := RunAzCopyLoginLogout(a, AzCopyVerbLogin) + ValidateSuccessfulLogin(a, stdout) +} + +func ValidateSuccessfulLogin(a Asserter, stdout AzCopyStdout) { + if dryrunner, ok := a.(DryrunAsserter); ok && dryrunner.Dryrun() { + return + } + // Check for successful login + loggedIn := false + for _, p := range stdout.RawStdout() { + loggedIn = loggedIn || strings.Contains(p, "Login succeeded") + } + a.AssertNow("login should be successful", Equal{}, loggedIn, true) +} + +func (s *DeviceLoginManualSuite) TeardownSuite(a Asserter) { + RunAzCopyLoginLogout(a, AzCopyVerbLogout) +} + +func (s *DeviceLoginManualSuite) Scenario_CopySync(svm *ScenarioVariationManager) { + azCopyVerb := ResolveVariation(svm, []AzCopyVerb{AzCopyVerbCopy, AzCopyVerbSync}) // Calculate verb early to create the destination object early + // Scale up from service to object + dstObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob()})), ResourceDefinitionContainer{}).GetObject(svm, "test", common.EEntityType.File()) + // The object must exist already if we're syncing. + if azCopyVerb == AzCopyVerbSync { + dstObj.Create(svm, NewZeroObjectContentContainer(0), ObjectProperties{}) + + if !svm.Dryrun() { + // Make sure the LMT is in the past + time.Sleep(time.Second * 10) + } + } + + body := NewRandomObjectContentContainer(SizeFromString("10K")) + // Scale up from service to object + srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob()})), ResourceDefinitionObject{ + ObjectName: pointerTo("test"), + Body: body, + }) + + // no local->local + if srcObj.Location().IsLocal() && dstObj.Location().IsLocal() { + svm.InvalidateScenario() + return + } + azcopyLogPathFolder := cmd.GetAzCopyAppPath() + azcopyJobPlanFolder := path.Join(azcopyLogPathFolder, "plans") + + stdout, _ := RunAzCopy( + svm, + AzCopyCommand{ + Verb: azCopyVerb, + Environment: &AzCopyEnvironment{ + ManualLogin: true, + LogLocation: &azcopyLogPathFolder, + JobPlanLocation: &azcopyJobPlanFolder, + }, + Targets: []ResourceManager{ + TryApplySpecificAuthType(srcObj, EExplicitCredentialType.OAuth(), svm, CreateAzCopyTargetOptions{}), + TryApplySpecificAuthType(dstObj, EExplicitCredentialType.OAuth(), svm, CreateAzCopyTargetOptions{})}, + Flags: CopyFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + }, + }, + }) + + ValidateResource[ObjectResourceManager](svm, dstObj, ResourceDefinitionObject{ + Body: body, + }, true) + + if srcObj.Location().IsRemote() { + ValidateMessageOutput(svm, stdout, "Authenticating to source using Azure AD") + } + if dstObj.Location().IsRemote() { + ValidateMessageOutput(svm, stdout, "Authenticating to destination using Azure AD") + } +} diff --git a/e2etest/zt_newe2e_remove_test.go b/e2etest/zt_newe2e_remove_test.go index 3d5408a3e..80e5e780b 100644 --- a/e2etest/zt_newe2e_remove_test.go +++ b/e2etest/zt_newe2e_remove_test.go @@ -27,7 +27,7 @@ func (s *RemoveSuite) Scenario_SingleFileRemoveBlobFSEncodedPath(svm *ScenarioVa srcService := acct.GetService(svm, ResolveVariation(svm, []common.Location{common.ELocation.BlobFS()})) svm.InsertVariationSeparator(":") - body := NewRandomObjectContentContainer(svm, SizeFromString("0K")) + body := NewRandomObjectContentContainer(SizeFromString("0K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, srcService, ResourceDefinitionObject{ ObjectName: pointerTo("%23%25%3F"), @@ -70,7 +70,7 @@ func (s *RemoveSuite) Scenario_EmptySASErrorCodes(svm *ScenarioVariationManager) }) // Validate that the stdout contains these error URLs - ValidateErrorOutput(svm, stdout, "https://aka.ms/AzCopyError/NoAuthenticationInformation") + ValidateMessageOutput(svm, stdout, "https://aka.ms/AzCopyError/NoAuthenticationInformation") } func (s *RemoveSuite) Scenario_RemoveVirtualDirectory(svm *ScenarioVariationManager) { @@ -81,7 +81,7 @@ func (s *RemoveSuite) Scenario_RemoveVirtualDirectory(svm *ScenarioVariationMana srcObjs := make(ObjectResourceMappingFlat) for i := range 5 { name := "dir_5_files/test" + strconv.Itoa(i) + ".txt" - obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(svm, SizeFromString("1K"))} + obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: NewRandomObjectContentContainer(SizeFromString("1K"))} CreateResource[ObjectResourceManager](svm, srcContainer, obj) obj.Body = nil obj.ObjectShouldExist = to.Ptr(false) diff --git a/e2etest/zt_newe2e_s2s_test.go b/e2etest/zt_newe2e_s2s_test.go index b286196b4..3b4bf3866 100644 --- a/e2etest/zt_newe2e_s2s_test.go +++ b/e2etest/zt_newe2e_s2s_test.go @@ -17,7 +17,7 @@ func (s *S2STestSuite) Scenario_BlobDestinationSizes(svm *ScenarioVariationManag dst := common.ELocation.Blob() size := ResolveVariation(svm, []int64{0, common.KiloByte, 63 * common.MegaByte}) fileName := "test_copy.txt" - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) // TODO : Add S3 to source srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, src), ResourceDefinitionContainer{}) @@ -47,7 +47,7 @@ func (s *S2STestSuite) Scenario_BlobFile1KB(svm *ScenarioVariationManager) { dst := common.ELocation.File() size := common.KiloByte fileName := "test_copy.txt" - body := NewRandomObjectContentContainer(svm, int64(size)) + body := NewRandomObjectContentContainer(int64(size)) // TODO : Add S3 to source srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, src), ResourceDefinitionContainer{}) @@ -77,7 +77,7 @@ func (s *S2STestSuite) Scenario_SingleFileCopyBlobTypeVariations(svm *ScenarioVa destBlobType := ResolveVariation(svm, []blob.BlobType{blob.BlobTypeBlockBlob, blob.BlobTypePageBlob, blob.BlobTypeAppendBlob}) fileName := "test_512b_copy.txt" - body := NewRandomObjectContentContainer(svm, 512) + body := NewRandomObjectContentContainer(512) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -111,7 +111,7 @@ func (s *S2STestSuite) Scenario_SingleFilePropertyMetadata(svm *ScenarioVariatio srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Blob(), common.ELocation.File()})), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) - srcBody := NewRandomObjectContentContainer(svm, 0) + srcBody := NewRandomObjectContentContainer(0) srcProps := ObjectProperties{ Metadata: common.Metadata{"Author": pointerTo("gapra"), "Viewport": pointerTo("width"), "Description": pointerTo("test file")}, HTTPHeaders: contentHeaders{ @@ -147,7 +147,7 @@ func (s *S2STestSuite) Scenario_SingleFilePropertyMetadata(svm *ScenarioVariatio func (s *S2STestSuite) Scenario_BlockBlobBlockBlob(svm *ScenarioVariationManager) { fileName := "test_copy.txt" size := ResolveVariation(svm, []int64{0, 1, 8*common.MegaByte - 1, 8 * common.MegaByte, 8*common.MegaByte + 1}) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -180,7 +180,7 @@ func (s *S2STestSuite) Scenario_BlockBlobBlockBlob(svm *ScenarioVariationManager func (s *S2STestSuite) Scenario_BlockBlobBlockBlobNoPreserveTier(svm *ScenarioVariationManager) { fileName := "test_copy.txt" size := int64(4*common.MegaByte + 1) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -214,7 +214,7 @@ func (s *S2STestSuite) Scenario_BlockBlobBlockBlobNoPreserveTier(svm *ScenarioVa func (s *S2STestSuite) Scenario_PageBlobToPageBlob(svm *ScenarioVariationManager) { fileName := "test_copy.txt" size := ResolveVariation(svm, []int64{0, 512, common.KiloByte, 4 * common.MegaByte}) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -246,7 +246,7 @@ func (s *S2STestSuite) Scenario_PageBlobToPageBlob(svm *ScenarioVariationManager func (s *S2STestSuite) Scenario_AppendBlobToAppendBlob(svm *ScenarioVariationManager) { fileName := "test_copy.txt" size := ResolveVariation(svm, []int64{0, 1, 8*common.MegaByte - 1, 8 * common.MegaByte, 8*common.MegaByte + 1}) - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcObj := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}). GetObject(svm, fileName, common.EEntityType.File()) @@ -278,8 +278,8 @@ func (s *S2STestSuite) Scenario_AppendBlobToAppendBlob(svm *ScenarioVariationMan func (s *S2STestSuite) Scenario_OverwriteSingleFile(svm *ScenarioVariationManager) { srcFileName := "test_1kb_copy.txt" dstFileName := "test_copy.txt" - srcBody := NewRandomObjectContentContainer(svm, common.KiloByte) - dstBody := NewRandomObjectContentContainer(svm, 2*common.KiloByte) + srcBody := NewRandomObjectContentContainer(common.KiloByte) + dstBody := NewRandomObjectContentContainer(2 * common.KiloByte) // TODO : Add S3 to source srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Blob(), common.ELocation.File()})), ResourceDefinitionContainer{}) @@ -309,8 +309,8 @@ func (s *S2STestSuite) Scenario_OverwriteSingleFile(svm *ScenarioVariationManage func (s *S2STestSuite) Scenario_NonOverwriteSingleFile(svm *ScenarioVariationManager) { srcFileName := "test_1kb_copy.txt" dstFileName := "test_copy.txt" - srcBody := NewRandomObjectContentContainer(svm, common.KiloByte) - dstBody := NewRandomObjectContentContainer(svm, 2*common.KiloByte) + srcBody := NewRandomObjectContentContainer(common.KiloByte) + dstBody := NewRandomObjectContentContainer(2 * common.KiloByte) // TODO : Add S3 to source srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Blob(), common.ELocation.File()})), ResourceDefinitionContainer{}) @@ -340,7 +340,7 @@ func (s *S2STestSuite) Scenario_NonOverwriteSingleFile(svm *ScenarioVariationMan func (s *S2STestSuite) Scenario_BlobBlobOAuth(svm *ScenarioVariationManager) { fileName := "test_copy.txt" size := int64(17) * common.MegaByte - body := NewRandomObjectContentContainer(svm, size) + body := NewRandomObjectContentContainer(size) srcContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -374,7 +374,7 @@ func (s *S2STestSuite) Scenario_S2SContainerSingleFilePropertyAndMetadata(svm *S dstContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) - srcBody := NewRandomObjectContentContainer(svm, 0) + srcBody := NewRandomObjectContentContainer(0) srcProps := ObjectProperties{ Metadata: common.Metadata{"Author": pointerTo("gapra"), "Viewport": pointerTo("width"), "Description": pointerTo("test file")}, HTTPHeaders: contentHeaders{ @@ -425,7 +425,7 @@ func (s *S2STestSuite) Scenario_S2SContainerSingleFileStripTopDir(svm *ScenarioV dstContainer := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{}) srcObj := srcContainer.GetObject(svm, fileName, common.EEntityType.File()) - srcBody := NewRandomObjectContentContainer(svm, 0) + srcBody := NewRandomObjectContentContainer(0) srcObj.Create(svm, srcBody, ObjectProperties{}) dstObj := dstContainer.GetObject(svm, fileName, common.EEntityType.File()) @@ -473,7 +473,7 @@ func (s *S2STestSuite) Scenario_S2SDirectoryMultipleFiles(svm *ScenarioVariation } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: body} CreateResource[ObjectResourceManager](svm, srcContainer, obj) dstObj := ResourceDefinitionObject{ObjectName: pointerTo("dir_file_copy_test/" + name), Body: body} @@ -527,7 +527,7 @@ func (s *S2STestSuite) Scenario_S2SDirectoryMultipleFilesStripTopDirRecursive(sv } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: body} CreateResource[ObjectResourceManager](svm, srcContainer, obj) dstObj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: body} @@ -584,7 +584,7 @@ func (s *S2STestSuite) Scenario_S2SDirectoryMultipleFilesStripTopDirNonRecursive } for i := range 10 { name := dir + "/test" + strconv.Itoa(i) + ".txt" - body := NewRandomObjectContentContainer(svm, SizeFromString("1K")) + body := NewRandomObjectContentContainer(SizeFromString("1K")) obj := ResourceDefinitionObject{ObjectName: pointerTo(name), Body: body} CreateResource[ObjectResourceManager](svm, srcContainer, obj) if j == 0 { diff --git a/e2etest/zt_newe2e_sync_test.go b/e2etest/zt_newe2e_sync_test.go index 66529de10..1b0bb0477 100644 --- a/e2etest/zt_newe2e_sync_test.go +++ b/e2etest/zt_newe2e_sync_test.go @@ -1,7 +1,9 @@ package e2etest import ( + "bytes" "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-storage-azcopy/v10/common" "io/fs" "os" @@ -42,11 +44,11 @@ func (s *SyncTestSuite) Scenario_TestSyncHashStorageModes(a *ScenarioVariationMa // A local source is required to use any hash storage mode. source := NewLocalContainer(a) dupeBodyPath := "underfolder/donottransfer" // A directory is used to validate that the hidden files cache creates *all* subdirectories. - dupeBody := NewRandomObjectContentContainer(a, 512) + dupeBody := NewRandomObjectContentContainer(512) resourceSpec := ResourceDefinitionContainer{ Objects: ObjectResourceMappingFlat{ - "newobject": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, - "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, + "newobject": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512)}, + "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512)}, dupeBodyPath: ResourceDefinitionObject{Body: dupeBody}, // note: at this moment, this is *not* a great test, because we lack plan file validation. todo WI#26418256 }, } @@ -59,7 +61,7 @@ func (s *SyncTestSuite) Scenario_TestSyncHashStorageModes(a *ScenarioVariationMa ResourceDefinitionContainer{ Objects: ObjectResourceMappingFlat{ // Object to overwrite - "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, + "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512)}, // Object to avoid overwriting dupeBodyPath: ResourceDefinitionObject{Body: dupeBody, ObjectProperties: ObjectProperties{HTTPHeaders: contentHeaders{contentMD5: md5[:]}}}, }, @@ -147,8 +149,8 @@ func (s *SyncTestSuite) Scenario_TestSyncRemoveDestination(svm *ScenarioVariatio PreferredAccount: common.Iff(dstLoc == common.ELocation.BlobFS(), pointerTo(PrimaryHNSAcct), nil), }), ResourceDefinitionContainer{ Objects: ObjectResourceMappingFlat{ - "deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, 512)}, - "also/deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, 512)}, + "deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512)}, + "also/deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(512)}, }, }) @@ -170,3 +172,92 @@ func (s *SyncTestSuite) Scenario_TestSyncRemoveDestination(svm *ScenarioVariatio }, }, false) } + +// Scenario_TestSyncDeleteDestinationIfNecessary tests that sync is +// - capable of deleting blobs of the wrong type +func (s *SyncTestSuite) Scenario_TestSyncDeleteDestinationIfNecessary(svm *ScenarioVariationManager) { + dstLoc := ResolveVariation(svm, []common.Location{common.ELocation.Blob(), common.ELocation.BlobFS()}) + dstRes := CreateResource[ContainerResourceManager](svm, + GetRootResource(svm, dstLoc, GetResourceOptions{ + PreferredAccount: common.Iff(dstLoc == common.ELocation.Blob(), + pointerTo(PrimaryStandardAcct), // + pointerTo(PrimaryHNSAcct), + ), + }), + ResourceDefinitionContainer{}) + + overwriteName := "copyme.txt" + ignoreName := "ignore.txt" + + if !svm.Dryrun() { // We're working directly with raw clients, so, we need to be careful. + buf := streaming.NopCloser(bytes.NewReader([]byte("foo"))) + + switch dstRes.Location() { + case common.ELocation.Blob(): // In this case, we want to submit a block ID with a different length. + ctClient := dstRes.(*BlobContainerResourceManager).internalClient + blobClient := ctClient.NewBlockBlobClient(overwriteName) + + _, err := blobClient.StageBlock(ctx, base64.StdEncoding.EncodeToString([]byte("foobar")), buf, nil) + svm.Assert("stage block error", IsNil{}, err) + case common.ELocation.BlobFS(): // In this case, we want to upload a blob via DFS. + ctClient := dstRes.(*BlobFSFileSystemResourceManager).internalClient + pathClient := ctClient.NewFileClient(overwriteName) + + _, err := pathClient.Create(ctx, nil) + svm.Assert("Create error", IsNil{}, err) + err = pathClient.UploadStream(ctx, buf, nil) + svm.Assert("Upload stream error", IsNil{}, err) + } + + // Sleep so it's in the past. + time.Sleep(time.Second * 10) + } + + srcData := NewRandomObjectContentContainer(1024) + srcRes := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, common.ELocation.Blob()), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + overwriteName: ResourceDefinitionObject{Body: srcData}, + ignoreName: ResourceDefinitionObject{Body: srcData}, + }, + }) + + dstData := NewRandomObjectContentContainer(1024) + if !svm.Dryrun() { + time.Sleep(time.Second * 10) // Make sure this file is newer + + CreateResource[ObjectResourceManager](svm, dstRes, ResourceDefinitionObject{ + ObjectName: &ignoreName, + Body: dstData, + }) + } + + stdout, _ := RunAzCopy(svm, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{srcRes, dstRes}, + Flags: SyncFlags{ + DeleteIfNecessary: pointerTo(true), + }, + }) + + ValidatePlanFiles(svm, stdout, ExpectedPlanFile{ + Objects: map[PlanFilePath]PlanFileObject{ + PlanFilePath{"/" + overwriteName, "/" + overwriteName}: { + ShouldBePresent: pointerTo(true), + }, + PlanFilePath{"/" + ignoreName, "/" + ignoreName}: { + ShouldBePresent: pointerTo(false), + }, + }, + }) + + ValidateResource(svm, dstRes, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + overwriteName: ResourceDefinitionObject{ + Body: srcData, // Validate we overwrote this one + }, + ignoreName: ResourceDefinitionObject{ + Body: dstData, // Validate we did not overwrite this one + }, + }, + }, true) +} diff --git a/e2etest/zt_newe2e_workload_test.go b/e2etest/zt_newe2e_workload_test.go index 50f391f92..6d799fae7 100644 --- a/e2etest/zt_newe2e_workload_test.go +++ b/e2etest/zt_newe2e_workload_test.go @@ -31,7 +31,7 @@ func (s *WorkloadIdentitySuite) Scenario_SingleFileUploadDownloadWorkloadIdentit } } - body := NewRandomObjectContentContainer(svm, SizeFromString("10K")) + body := NewRandomObjectContentContainer(SizeFromString("10K")) // Scale up from service to object srcObj := CreateResource[ObjectResourceManager](svm, GetRootResource(svm, ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob()})), ResourceDefinitionObject{ ObjectName: pointerTo("test"), diff --git a/e2etest/zt_sync_local_file_test.go b/e2etest/zt_sync_local_file_test.go index 7af8dfdc0..0d04ef06a 100644 --- a/e2etest/zt_sync_local_file_test.go +++ b/e2etest/zt_sync_local_file_test.go @@ -32,5 +32,5 @@ func (s *FileOAuthTestSuite) Scenario_SyncFromLocalToFSWarningMsg(svm *ScenarioV ShouldFail: false, }) - ValidateErrorOutput(svm, stdout, cmd.LocalToFileShareWarnMsg) + ValidateMessageOutput(svm, stdout, cmd.LocalToFileShareWarnMsg) } diff --git a/go.mod b/go.mod index ac47b6608..de67e9938 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,10 @@ module github.com/Azure/azure-storage-azcopy/v10 require ( - cloud.google.com/go/storage v1.43.0 + cloud.google.com/go/storage v1.45.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.0 - github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda github.com/danieljoos/wincred v1.2.2 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -17,46 +16,55 @@ require ( github.com/pkg/xattr v0.4.10 github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/spf13/cobra v1.8.1 - github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807 github.com/wastore/keyctl v0.3.1 - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - google.golang.org/api v0.189.0 + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.26.0 + google.golang.org/api v0.202.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ) require github.com/stretchr/testify v1.9.0 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 github.com/Azure/go-autorest/autorest/date v0.3.0 - golang.org/x/net v0.27.0 + github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 + golang.org/x/net v0.30.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.2 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.11 // indirect + cel.dev/expr v0.16.1 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/s2a-go v0.1.8 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -64,22 +72,27 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20240723171418-e6d459c13d2a // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240723171418-e6d459c13d2a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index e70b7db96..7dadd4db6 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,32 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= -cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= -cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= -cloud.google.com/go/longrunning v0.5.10 h1:eB/BniENNRKhjz/xgiillrdcH3G74TGSl3BXinGlI7E= -cloud.google.com/go/longrunning v0.5.10/go.mod h1:tljz5guTr5oc/qhlUjBlk7UAIFMOGuPNxkNDZXlLics= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= +cloud.google.com/go/storage v1.45.0 h1:5av0QcIVj77t+44mV4gffFC/LscFRUhto6UBMB5SimM= +cloud.google.com/go/storage v1.45.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= @@ -29,26 +39,34 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.0 h1:VIM+5hiNPIOmFy2Ra github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.0/go.mod h1:amxdkUcI+5PQTRqJCPqWXf1CBMh/q3YGuFwyqpwAwd4= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= -github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda h1:NOo6+gM9NNPJ3W56nxOKb4164LEw094U0C8zYQM8mQU= github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda/go.mod h1:2CaSFTh2ph9ymS6goiOKIBdfhwWUVsX4nQ5QjIYFHHs= github.com/PuerkitoBio/goquery v1.7.1/go.mod h1:XY0pP4kfraEmmV1O7Uf6XyjoslwsneBbgeDjLYuN8xY= github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -57,10 +75,16 @@ github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= @@ -70,8 +94,6 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -105,14 +127,16 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/hillu/go-ntdll v0.0.0-20240418173803-69345773b582 h1:msYGLGTxozPDUlpoVgYS6CrFw/PCRBOds5PGwo2XXuk= github.com/hillu/go-ntdll v0.0.0-20240418173803-69345773b582/go.mod h1:cHjYsnAnSckPDx8/H01Y+owD1hf2adLA6VRiw4guEbA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -135,9 +159,13 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -155,127 +183,100 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807 h1:Uzh85j0tl46Sf2OOx1wDePSWkz3Eq8XdCFkLXqaX8Bg= -github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807/go.mod h1:zI8umr7xnBSyT9ZJ8wn48RiQ0EWXo4xmYLNw9FQvC9w= github.com/wastore/keyctl v0.3.1 h1:wMkYW9y9jGbQ1ARBLGLwnDdbgrkbuSeuIQeHy+BZOU0= github.com/wastore/keyctl v0.3.1/go.mod h1:1359RfMRDlblBSa2vaPC+kkmIxxt+rgl/FxLG38h9xM= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= -google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= +google.golang.org/api v0.202.0 h1:y1iuVHMqokQbimW79ZqPZWo4CiyFu6HcCYHwSNyzlfo= +google.golang.org/api v0.202.0/go.mod h1:3Jjeq7M/SFblTNCp7ES2xhq+WvGL0KeXI0joHQBfwTQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240723171418-e6d459c13d2a h1:hPbLwHFm59QoSKUT0uGaL19YN4U9W5lY4+iNXlUBNj0= -google.golang.org/genproto v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:+7gIV7FP6jBo5hiY2lsWA//NkNORQVj0J1Isc/4HzR4= -google.golang.org/genproto/googleapis/api v0.0.0-20240723171418-e6d459c13d2a h1:YIa/rzVqMEokBkPtydCkx1VLmv3An1Uw7w1P1m6EhOY= -google.golang.org/genproto/googleapis/api v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a h1:hqK4+jJZXCU4pW7jsAdGOVFIfLHQeV7LaizZKnZ84HI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240723171418-e6d459c13d2a/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -285,8 +286,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/release-pipeline.yml b/release-pipeline.yml index 7a30adaf2..a6fe40c2a 100644 --- a/release-pipeline.yml +++ b/release-pipeline.yml @@ -1057,15 +1057,8 @@ stages: steps: - checkout: none - - script: | - sudo apt update - sudo apt --fix-broken install - ldd --version - displayName: "GLIBC Version" - - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-binaries' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-binaries/azcopy_linux*amd64' @@ -1110,15 +1103,9 @@ stages: vmImage: $(vmImage) steps: - checkout: none - - script: | - sudo apt update - sudo apt --fix-broken install - ldd --version - displayName: "GLIBC Version" - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-linux-signed' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-linux-signed/azcopy_linux_*amd64*.tar.gz' @@ -1201,17 +1188,16 @@ stages: - ImageOverride -equals $(AgentName) steps: - checkout: none - + - script: | sudo apt update sudo apt-get install libsecret-1-dev -y sudo apt --fix-broken install ldd --version - displayName: "GLIBC Version" + displayName: "Libsecret Install" - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-linux-signed' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-linux-signed/azcopy_linux_*arm64*.tar.gz' @@ -1301,13 +1287,12 @@ stages: - script: | sudo apt update sudo apt-get install libsecret-1-dev -y - sudo apt --fix-broken install -y + sudo apt --fix-broken install ldd --version - displayName: "GLIBC Version" + displayName: "Libsecret Install" - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-binaries' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-binaries/azcopy_linux*arm64' @@ -1356,19 +1341,8 @@ stages: steps: - checkout: none - - script: | - sudo yum update -y - sudo yum install git -y - displayName: 'Install Git' - - # get glibc version with which build is done - - script: | - ldd --version - displayName: "GLIBC Version" - - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-linux-signed' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-linux-signed/azcopy-*arm64.rpm' @@ -1403,22 +1377,11 @@ stages: steps: - checkout: none - - script: | - sudo yum update -y - sudo yum install git -y - displayName: 'Install Git' - - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-linux-signed' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-linux-signed/azcopy-*x86_64.rpm' - - # get glibc version with which build is done - - script: | - ldd --version - displayName: "GLIBC Version" - script: | sudo sed -i '/^failovermethod=/d' /etc/yum.repos.d/*.repo @@ -1450,24 +1413,13 @@ stages: steps: - checkout: none - - - script: | - sudo tdnf update -y - sudo tdnf install git -y - displayName: 'Install dependencies' - template: setup/test_artifacts.yml parameters: - go_version: '$(AZCOPY_GOLANG_VERSION)' artifact_name: 'azCopy-linux-signed' download_path: '$(System.DefaultWorkingDirectory)' item_pattern: 'azCopy-linux-signed/mariner/azcopy-*x86_64.rpm' - # get glibc version with which build is done - - script: | - ldd --version - displayName: "GLIBC Version" - - script: | sudo rpm -qip azcopy*x86_64.rpm sudo rpm -i azcopy*x86_64.rpm @@ -1496,18 +1448,6 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-azcopy' steps: - - task: GoTool@0 - env: - GO111MODULE: 'on' - inputs: - version: $(AZCOPY_GOLANG_VERSION) - - - script: | - echo 'Running GO Vet' - go vet - displayName: 'Golang Vet - Mac' - workingDirectory: $(System.DefaultWorkingDirectory) - - task: DownloadBuildArtifacts@0 displayName: 'Download Build Artifacts' inputs: @@ -1565,18 +1505,6 @@ stages: value: '$(System.DefaultWorkingDirectory)' steps: - - task: GoTool@0 - env: - GO111MODULE: 'on' - inputs: - version: $(AZCOPY_GOLANG_VERSION) - - - script: | - echo 'Running GO Vet' - go vet - displayName: 'Golang Vet - Windows' - workingDirectory: $(root_dir) - - task: DownloadBuildArtifacts@0 displayName: 'Download Build Artifacts' inputs: @@ -1670,12 +1598,6 @@ stages: steps: - checkout: none - - script: | - sudo apt update - sudo apt --fix-broken install - ldd --version - displayName: "GLIBC Version" - - task: DownloadBuildArtifacts@0 displayName: 'Download Build Artifacts' inputs: @@ -1711,18 +1633,6 @@ stages: value: '$(System.DefaultWorkingDirectory)' steps: - - task: GoTool@0 - env: - GO111MODULE: 'on' - inputs: - version: $(AZCOPY_GOLANG_VERSION) - - - script: | - echo 'Running GO Vet' - go vet - displayName: 'Golang Vet - Windows' - workingDirectory: $(root_dir) - - task: DownloadBuildArtifacts@0 displayName: 'Download Build Artifacts' inputs: diff --git a/setup/test_artifacts.yml b/setup/test_artifacts.yml index 790a729b5..fc6c9a4f0 100644 --- a/setup/test_artifacts.yml +++ b/setup/test_artifacts.yml @@ -1,6 +1,4 @@ parameters: - - name: go_version - type: string - name: artifact_name type: string - name: download_path @@ -9,21 +7,6 @@ parameters: type: string steps: - - script: | - git clone https://github.com/Azure/azure-storage-azcopy - displayName: 'Checkout Code' - workingDirectory: $(System.DefaultWorkingDirectory) - - - script: | - git checkout `echo $(Build.SourceBranch) | cut -d "/" -f 1,2 --complement` - displayName: 'Checkout Branch' - workingDirectory: $(System.DefaultWorkingDirectory)/azure-storage-azcopy - - - task: ShellScript@2 - inputs: - scriptPath: "$(System.DefaultWorkingDirectory)/azure-storage-azcopy/go_installer.sh" - args: "$(System.DefaultWorkingDirectory)/ ${{ parameters.go_version }}" - displayName: "GoTool Custom Setup" - task: DownloadBuildArtifacts@0 displayName: 'Download Build Artifacts' diff --git a/ste/folderCreationTracker.go b/ste/folderCreationTracker.go index 434b1f44d..09d9551b7 100644 --- a/ste/folderCreationTracker.go +++ b/ste/folderCreationTracker.go @@ -1,7 +1,6 @@ package ste import ( - "fmt" "net/url" "strings" "sync" @@ -21,10 +20,9 @@ func NewFolderCreationTracker(fpo common.FolderPropertyOption, plan *JobPartPlan case common.EFolderPropertiesOption.AllFolders(), common.EFolderPropertiesOption.AllFoldersExceptRoot(): return &jpptFolderTracker{ // This prevents a dependency cycle. Reviewers: Are we OK with this? Can you think of a better way to do it? - plan: plan, - mu: &sync.Mutex{}, - contents: make(map[string]uint32), - unregisteredButCreated: make(map[string]struct{}), + plan: plan, + mu: &sync.Mutex{}, + contents: common.NewTrie(), } case common.EFolderPropertiesOption.NoFolders(): // can't use simpleFolderTracker here, because when no folders are processed, @@ -48,15 +46,10 @@ func (f *nullFolderTracker) ShouldSetProperties(folder string, overwrite common. panic("wrong type of folder tracker has been instantiated. This type does not do any tracking") } -func (f *nullFolderTracker) StopTracking(folder string) { - // noop (because we don't track anything) -} - type jpptFolderTracker struct { - plan IJobPartPlanHeader - mu *sync.Mutex - contents map[string]uint32 - unregisteredButCreated map[string]struct{} + plan IJobPartPlanHeader + mu *sync.Mutex + contents *common.Trie } func (f *jpptFolderTracker) RegisterPropertiesTransfer(folder string, transferIndex uint32) { @@ -67,13 +60,14 @@ func (f *jpptFolderTracker) RegisterPropertiesTransfer(folder string, transferIn return // Never persist to dev-null } - f.contents[folder] = transferIndex + fNode, _ := f.contents.InsertDirNode(folder) + fNode.TransferIndex = transferIndex // We created it before it was enumerated-- Let's register that now. - if _, ok := f.unregisteredButCreated[folder]; ok { + if fNode.UnregisteredButCreated { f.plan.Transfer(transferIndex).SetTransferStatus(common.ETransferStatus.FolderCreated(), false) + fNode.UnregisteredButCreated = false - delete(f.unregisteredButCreated, folder) } } @@ -85,12 +79,15 @@ func (f *jpptFolderTracker) CreateFolder(folder string, doCreation func() error) return nil // Never persist to dev-null } - if idx, ok := f.contents[folder]; ok && - f.plan.Transfer(idx).TransferStatus() == (common.ETransferStatus.FolderCreated()) { + // If the folder has already been created, then we don't need to create it again + fNode, addedToTrie := f.contents.InsertDirNode(folder) + + if !addedToTrie && (f.plan.Transfer(fNode.TransferIndex).TransferStatus() == common.ETransferStatus.FolderCreated() || + f.plan.Transfer(fNode.TransferIndex).TransferStatus() == common.ETransferStatus.Success()) { return nil } - if _, ok := f.unregisteredButCreated[folder]; ok { + if fNode.UnregisteredButCreated { return nil } @@ -99,13 +96,14 @@ func (f *jpptFolderTracker) CreateFolder(folder string, doCreation func() error) return err } - if idx, ok := f.contents[folder]; ok { + if !addedToTrie { // overwrite it's transfer status - f.plan.Transfer(idx).SetTransferStatus(common.ETransferStatus.FolderCreated(), false) + f.plan.Transfer(fNode.TransferIndex).SetTransferStatus(common.ETransferStatus.FolderCreated(), false) } else { // A folder hasn't been hit in traversal yet. // Recording it in memory is OK, because we *cannot* resume a job that hasn't finished traversal. - f.unregisteredButCreated[folder] = struct{}{} + // We set the value to 0 as we just want to record it in memory + fNode.UnregisteredButCreated = true } return nil @@ -127,8 +125,9 @@ func (f *jpptFolderTracker) ShouldSetProperties(folder string, overwrite common. defer f.mu.Unlock() var created bool - if idx, ok := f.contents[folder]; ok { - created = f.plan.Transfer(idx).TransferStatus() == common.ETransferStatus.FolderCreated() + if fNode, ok := f.contents.GetDirNode(folder); ok { + created = f.plan.Transfer(fNode.TransferIndex).TransferStatus() == common.ETransferStatus.FolderCreated() || + f.plan.Transfer(fNode.TransferIndex).TransferStatus() == common.ETransferStatus.Success() } else { // This should not happen, ever. // Folder property jobs register with the tracker before they start getting processed. @@ -158,26 +157,3 @@ func (f *jpptFolderTracker) ShouldSetProperties(folder string, overwrite common. panic("unknown overwrite option") } } - -func (f *jpptFolderTracker) StopTracking(folder string) { - f.mu.Lock() - defer f.mu.Unlock() - - if folder == common.Dev_Null { - return // Not possible to track this - } - - // no-op, because tracking is now handled by jppt, anyway. - if _, ok := f.contents[folder]; ok { - delete(f.contents, folder) - } else { - currentContents := "" - - for k, v := range f.contents { - currentContents += fmt.Sprintf("K: %s V: %d\n", k, v) - } - - // double should never be hit, but *just in case*. - panic(common.NewAzCopyLogSanitizer().SanitizeLogMessage("Folder " + folder + " shouldn't finish tracking until it's been recorded\nCurrent Contents:\n" + currentContents)) - } -} diff --git a/ste/folderCreationTracker_test.go b/ste/folderCreationTracker_test.go index ae3452f9e..35238eff2 100644 --- a/ste/folderCreationTracker_test.go +++ b/ste/folderCreationTracker_test.go @@ -32,26 +32,26 @@ import ( // This is mocked to test the folder creation tracker type mockedJPPH struct { folderName []string - index []int + index []int status []*JobPartPlanTransfer - } -func (jpph *mockedJPPH) CommandString() string { panic("Not implemented") } -func (jpph *mockedJPPH) GetRelativeSrcDstStrings(uint32) (string, string) { panic("Not implemented") } -func (jpph *mockedJPPH) JobPartStatus() common.JobStatus { panic("Not implemented") } -func (jpph *mockedJPPH) JobStatus() common.JobStatus { panic("Not implemented") } -func (jpph *mockedJPPH) SetJobPartStatus(common.JobStatus) { panic("Not implemented") } -func (jpph *mockedJPPH) SetJobStatus(common.JobStatus) { panic("Not implemented") } -func (jpph *mockedJPPH) Transfer(idx uint32) *JobPartPlanTransfer { +func (jpph *mockedJPPH) CommandString() string { panic("Not implemented") } +func (jpph *mockedJPPH) GetRelativeSrcDstStrings(uint32) (string, string) { panic("Not implemented") } +func (jpph *mockedJPPH) JobPartStatus() common.JobStatus { panic("Not implemented") } +func (jpph *mockedJPPH) JobStatus() common.JobStatus { panic("Not implemented") } +func (jpph *mockedJPPH) SetJobPartStatus(common.JobStatus) { panic("Not implemented") } +func (jpph *mockedJPPH) SetJobStatus(common.JobStatus) { panic("Not implemented") } +func (jpph *mockedJPPH) Transfer(idx uint32) *JobPartPlanTransfer { return jpph.status[idx] } -func (jpph *mockedJPPH) TransferSrcDstRelatives(uint32) (string, string) { panic("Not implemented") } -func (jpph *mockedJPPH) TransferSrcDstStrings(uint32) (string, string, bool) { panic("Not implemented") } -func (jpph *mockedJPPH) TransferSrcPropertiesAndMetadata(uint32) (common.ResourceHTTPHeaders, common.Metadata, blob.BlobType, blob.AccessTier, bool, bool, bool, common.InvalidMetadataHandleOption, common.EntityType, string, string, common.BlobTags) { +func (jpph *mockedJPPH) TransferSrcDstRelatives(uint32) (string, string) { panic("Not implemented") } +func (jpph *mockedJPPH) TransferSrcDstStrings(uint32) (string, string, bool) { + panic("Not implemented") +} +func (jpph *mockedJPPH) TransferSrcPropertiesAndMetadata(uint32) (common.ResourceHTTPHeaders, common.Metadata, blob.BlobType, blob.AccessTier, bool, bool, bool, common.InvalidMetadataHandleOption, common.EntityType, string, string, common.BlobTags) { panic("Not implemented") } - // This test verifies that when we call dir create for a directory, it is created only once, // even if multiple routines request it to be created. @@ -60,23 +60,21 @@ func TestFolderCreationTracker_directoryCreate(t *testing.T) { // create a plan with one registered and one unregistered folder folderReg := "folderReg" - folderUnReg := "folderUnReg" - + folderUnReg := "folderUnReg" plan := &mockedJPPH{ folderName: []string{folderReg, folderUnReg}, - index: []int{0, 1}, - status: []*JobPartPlanTransfer { - &JobPartPlanTransfer{atomicTransferStatus: common.ETransferStatus.NotStarted(),}, - &JobPartPlanTransfer{atomicTransferStatus: common.ETransferStatus.NotStarted(),}, + index: []int{0, 1}, + status: []*JobPartPlanTransfer{ + &JobPartPlanTransfer{atomicTransferStatus: common.ETransferStatus.NotStarted()}, + &JobPartPlanTransfer{atomicTransferStatus: common.ETransferStatus.NotStarted()}, }, } - fct := &jpptFolderTracker{ - plan: plan, - mu: &sync.Mutex{}, - contents: make(map[string]uint32), - unregisteredButCreated: make(map[string]struct{}), + fct := &jpptFolderTracker{ + plan: plan, + mu: &sync.Mutex{}, + contents: common.NewTrie(), } // 1. Register folder1 @@ -85,13 +83,13 @@ func TestFolderCreationTracker_directoryCreate(t *testing.T) { // Multiple calls to create folderReg should execute create only once. numOfCreations := int32(0) var wg sync.WaitGroup - doCreation := func() error{ + doCreation := func() error { atomic.AddInt32(&numOfCreations, 1) plan.status[0].atomicTransferStatus = common.ETransferStatus.FolderCreated() return nil } - ch := make(chan bool) + ch := make(chan bool) for i := 0; i < 50; i++ { wg.Add(1) go func() { @@ -100,7 +98,7 @@ func TestFolderCreationTracker_directoryCreate(t *testing.T) { wg.Done() }() } - close(ch) // this will cause all above go rotuines to start creating folder + close(ch) // this will cause all above go routines to start creating folder wg.Wait() a.Equal(int32(1), numOfCreations) @@ -108,7 +106,7 @@ func TestFolderCreationTracker_directoryCreate(t *testing.T) { // similar test for unregistered folder numOfCreations = 0 ch = make(chan bool) - doCreation = func() error{ + doCreation = func() error { atomic.AddInt32(&numOfCreations, 1) plan.status[1].atomicTransferStatus = common.ETransferStatus.FolderCreated() return nil @@ -126,4 +124,4 @@ func TestFolderCreationTracker_directoryCreate(t *testing.T) { wg.Wait() a.Equal(int32(1), numOfCreations) -} \ No newline at end of file +} diff --git a/ste/jobStatusManager.go b/ste/jobStatusManager.go index 7d2a1a827..39b42569e 100755 --- a/ste/jobStatusManager.go +++ b/ste/jobStatusManager.go @@ -61,15 +61,36 @@ func (jm *jobMgr) statusMgrClosed() bool { /* These functions should not fail */ func (jm *jobMgr) SendJobPartCreatedMsg(msg JobPartCreatedMsg) { - jm.jstm.partCreated <- msg - if msg.IsFinalPart { - // Inform statusManager that this is all parts we've - close(jm.jstm.partCreated) + defer func() { + if recErr := recover(); recErr != nil { + jm.Log(common.LogError, "Cannot send message on closed channel") + } + }() + if jm.jstm.partCreated != nil { // Sends not allowed if channel is closed + select { + case jm.jstm.partCreated <- msg: + case <-jm.jstm.statusMgrDone: // Nobody is listening anymore, let's back off. + } + + if msg.IsFinalPart { + // Inform statusManager that this is all parts we've + close(jm.jstm.partCreated) + } } } func (jm *jobMgr) SendXferDoneMsg(msg xferDoneMsg) { - jm.jstm.xferDone <- msg + defer func() { + if recErr := recover(); recErr != nil { + jm.Log(common.LogError, "Cannot send message on channel") + } + }() + if jm.jstm.xferDone != nil { + select { + case jm.jstm.xferDone <- msg: + case <-jm.jstm.statusMgrDone: // Nobody is listening anymore, let's back off. + } + } } func (jm *jobMgr) ListJobSummary() common.ListJobSummaryResponse { @@ -155,7 +176,17 @@ func (jm *jobMgr) handleStatusUpdateMessage() { case <-jstm.listReq: /* Display stats */ js.Timestamp = time.Now().UTC() - jstm.respChan <- *js + defer func() { // Exit gracefully if panic + if recErr := recover(); recErr != nil { + jm.Log(common.LogError, "Cannot send message on respChan") + } + }() + select { + case jstm.respChan <- *js: + // Send on the channel + case <-jstm.statusMgrDone: + // If we time out, no biggie. This isn't world-ending, nor is it essential info. The other side stopped listening by now. + } // Reset the lists so that they don't keep accumulating and take up excessive memory // There is no need to keep sending the same items over and over again @@ -166,8 +197,6 @@ func (jm *jobMgr) handleStatusUpdateMessage() { close(jstm.statusMgrDone) close(jstm.respChan) close(jstm.listReq) - jstm.listReq = nil - jstm.respChan = nil return } } diff --git a/ste/mgr-JobMgr.go b/ste/mgr-JobMgr.go index 4325b1893..d4895b73b 100755 --- a/ste/mgr-JobMgr.go +++ b/ste/mgr-JobMgr.go @@ -424,8 +424,8 @@ type AddJobPartArgs struct { // These clients are valid if this fits the FromTo. i.e if // we're uploading - SrcClient *common.ServiceClient - DstClient *common.ServiceClient + SrcClient *common.ServiceClient + DstClient *common.ServiceClient SrcIsOAuth bool // true if source is authenticated via token ScheduleTransfers bool @@ -446,7 +446,7 @@ func (jm *jobMgr) AddJobPart2(args *AddJobPartArgs) IJobPartMgr { cacheLimiter: jm.cacheLimiter, fileCountLimiter: jm.fileCountLimiter, closeOnCompletion: args.CompletionChan, - srcIsOAuth: args.SrcIsOAuth, + srcIsOAuth: args.SrcIsOAuth, } // If an existing plan MMF was supplied, re use it. Otherwise, init a new one. if args.ExistingPlanMMF == nil { @@ -716,6 +716,7 @@ func (jm *jobMgr) reportJobPartDoneHandler() { if shouldComplete { // Inform StatusManager that all parts are done. close(jm.jstm.xferDone) + // Wait for all XferDone messages to be processed by statusManager. Front end // depends on JobStatus to determine if we've to quit job. Setting it here without // draining XferDone will make it report incorrect statistics. diff --git a/ste/mgr-JobPartTransferMgr.go b/ste/mgr-JobPartTransferMgr.go index c727f7bac..d0bb3e855 100644 --- a/ste/mgr-JobPartTransferMgr.go +++ b/ste/mgr-JobPartTransferMgr.go @@ -858,7 +858,12 @@ func (jptm *jobPartTransferMgr) failActiveTransfer(typ transferErrorCode, descri !jptm.jobPartMgr.(*jobPartMgr).jobMgr.IsDaemon() { // quit right away, since without proper authentication no work can be done // display a clear message - common.GetLifecycleMgr().Info(fmt.Sprintf("Authentication failed, it is either not correct, or expired, or does not have the correct permission %s", err.Error())) + if strings.Contains(descriptionOfWhereErrorOccurred, "tags") { + common.GetLifecycleMgr().Info(fmt.Sprintf("Authorization failed during an attempt to set tags, please ensure you have the appropriate Tags permission %s", err.Error())) + } else { + common.GetLifecycleMgr().Info(fmt.Sprintf("Authentication failed, it is either not correct, or expired, or does not have the correct permission %s", err.Error())) + } + // and use the normal cancelling mechanism so that we can exit in a clean and controlled way jptm.jobPartMgr.(*jobPartMgr).jobMgr.CancelPauseJobOrder(common.EJobStatus.Cancelling()) // TODO: this results in the final job output line being: Final Job Status: Cancelled diff --git a/ste/sender-appendBlob.go b/ste/sender-appendBlob.go index 95b413a22..768063668 100644 --- a/ste/sender-appendBlob.go +++ b/ste/sender-appendBlob.go @@ -164,7 +164,7 @@ func (s *appendBlobSenderBase) Prologue(ps common.PrologueState) (destinationMod CPKScopeInfo: s.jptm.CpkScopeInfo(), }) if err != nil { - s.jptm.FailActiveSend("Creating blob", err) + s.jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Creating blob (with tags)", "Creating blob"), err) return } destinationModified = true @@ -172,7 +172,7 @@ func (s *appendBlobSenderBase) Prologue(ps common.PrologueState) (destinationMod if setTags { _, err = s.destAppendBlobClient.SetTags(s.jptm.Context(), s.blobTagsToApply, nil) if err != nil { - s.jptm.Log(common.LogWarning, err.Error()) + s.jptm.FailActiveSend("Set blob tags", err) } } return diff --git a/ste/sender-blobFolders.go b/ste/sender-blobFolders.go index 73ddc711b..e1b3b6332 100644 --- a/ste/sender-blobFolders.go +++ b/ste/sender-blobFolders.go @@ -199,17 +199,33 @@ func (b *blobFolderSender) EnsureFolderExists() error { } err = t.CreateFolder(b.DirUrlToString(), func() error { + blobTags := b.blobTagsToApply + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { + blobTags = nil + } + // It doesn't make sense to use a special access tier for a blob folder, the blob will be 0 bytes. _, err := b.destinationClient.Upload(b.jptm.Context(), streaming.NopCloser(bytes.NewReader(nil)), &blockblob.UploadOptions{ HTTPHeaders: &b.headersToApply, Metadata: b.metadataToApply, - Tags: b.blobTagsToApply, + Tags: blobTags, CPKInfo: b.jptm.CpkInfo(), CPKScopeInfo: b.jptm.CpkScopeInfo(), }) + if err != nil { + b.jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Upload folder (with tags)", "Upload folder"), err) + } - return err + if setTags { + if _, err := b.destinationClient.SetTags(b.jptm.Context(), b.blobTagsToApply, nil); err != nil { + b.jptm.FailActiveSend("Set tags", err) + return nil + } + } + + return nil }) if err != nil { diff --git a/ste/sender-blobSymlinks.go b/ste/sender-blobSymlinks.go index 403e887ef..4dbc3d8c4 100644 --- a/ste/sender-blobSymlinks.go +++ b/ste/sender-blobSymlinks.go @@ -68,16 +68,33 @@ func (s *blobSymlinkSender) SendSymlink(linkData string) error { } s.metadataToApply["is_symlink"] = to.Ptr("true") + blobTags := s.blobTagsToApply + setTags := separateSetTagsRequired(blobTags) + if setTags || len(blobTags) == 0 { + blobTags = nil + } + _, err = s.destinationClient.Upload(s.jptm.Context(), streaming.NopCloser(strings.NewReader(linkData)), &blockblob.UploadOptions{ HTTPHeaders: &s.headersToApply, Metadata: s.metadataToApply, Tier: s.destBlobTier, - Tags: s.blobTagsToApply, + Tags: blobTags, CPKInfo: s.jptm.CpkInfo(), CPKScopeInfo: s.jptm.CpkScopeInfo(), }) - return err + if err != nil { + s.jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Upload symlink (with tags)", "Upload symlink"), err) + return nil + } + + if setTags { + if _, err := s.destinationClient.SetTags(s.jptm.Context(), s.blobTagsToApply, nil); err != nil { + s.jptm.FailActiveSend("Set tags", err) + return nil + } + } + return nil } // ===== Implement sender so that it can be returned in newBlobUploader. ===== diff --git a/ste/sender-blockBlob.go b/ste/sender-blockBlob.go index 720fcbfd3..bd1364be9 100644 --- a/ste/sender-blockBlob.go +++ b/ste/sender-blockBlob.go @@ -275,13 +275,13 @@ func (s *blockBlobSenderBase) Epilogue() { CPKScopeInfo: s.jptm.CpkScopeInfo(), }) if err != nil { - jptm.FailActiveSend("Committing block list", err) + jptm.FailActiveSend(common.Iff(blobTags != nil, "Committing block list (with tags)", "Committing block list"), err) return } if setTags { if _, err := s.destBlockBlobClient.SetTags(jptm.Context(), s.blobTagsToApply, nil); err != nil { - s.jptm.Log(common.LogWarning, err.Error()) + jptm.FailActiveSend("Setting tags", err) } } } diff --git a/ste/sender-blockBlobFromLocal.go b/ste/sender-blockBlobFromLocal.go index e066ba899..a99922567 100644 --- a/ste/sender-blockBlobFromLocal.go +++ b/ste/sender-blockBlobFromLocal.go @@ -178,7 +178,7 @@ func (u *blockBlobUploader) generatePutWholeBlob(id common.ChunkID, reader commo // if the put blob is a failure, update the transfer status to failed if err != nil { - jptm.FailActiveUpload("Uploading blob", err) + jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Committing block list (with tags)", "Committing block list"), err) return } @@ -186,7 +186,7 @@ func (u *blockBlobUploader) generatePutWholeBlob(id common.ChunkID, reader commo if setTags { if _, err := u.destBlockBlobClient.SetTags(jptm.Context(), u.blobTagsToApply, nil); err != nil { - u.jptm.Log(common.LogWarning, err.Error()) + jptm.FailActiveSend("Set blob tags", err) } } }) diff --git a/ste/sender-blockBlobFromURL.go b/ste/sender-blockBlobFromURL.go index 5c880e73e..3adf0a74b 100644 --- a/ste/sender-blockBlobFromURL.go +++ b/ste/sender-blockBlobFromURL.go @@ -170,7 +170,7 @@ func (c *urlToBlockBlobCopier) generateStartPutBlobFromURL(id common.ChunkID, bl }) if err != nil { - c.jptm.FailActiveSend("Put Blob from URL", err) + c.jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Committing block list (with tags)", "Committing block list"), err) return } @@ -178,7 +178,7 @@ func (c *urlToBlockBlobCopier) generateStartPutBlobFromURL(id common.ChunkID, bl if setTags { if _, err := c.destBlockBlobClient.SetTags(c.jptm.Context(), c.blobTagsToApply, nil); err != nil { - c.jptm.Log(common.LogWarning, err.Error()) + c.jptm.FailActiveSend("Set blob tags", err) } } }) diff --git a/ste/sender-pageBlob.go b/ste/sender-pageBlob.go index 91929fa38..ed6f756c4 100644 --- a/ste/sender-pageBlob.go +++ b/ste/sender-pageBlob.go @@ -260,7 +260,7 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif CPKScopeInfo: s.jptm.CpkScopeInfo(), }) if err != nil { - s.jptm.FailActiveSend("Creating blob", err) + s.jptm.FailActiveSend(common.Iff(len(blobTags) > 0, "Creating blob (with tags)", "Creating blob"), err) return } @@ -268,7 +268,7 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif if setTags { if _, err := s.destPageBlobClient.SetTags(s.jptm.Context(), s.blobTagsToApply, nil); err != nil { - s.jptm.Log(common.LogWarning, err.Error()) + s.jptm.FailActiveSend("Set blob tags", err) } } diff --git a/ste/xfer-anyToRemote-folder.go b/ste/xfer-anyToRemote-folder.go index afa20dcdc..173d3517f 100644 --- a/ste/xfer-anyToRemote-folder.go +++ b/ste/xfer-anyToRemote-folder.go @@ -82,7 +82,6 @@ func anyToRemote_folder(jptm IJobPartTransferMgr, info *TransferInfo, pacer pace } else { t := jptm.GetFolderCreationTracker() - defer t.StopTracking(s.DirUrlToString()) // don't need it after this routine shouldSetProps := t.ShouldSetProperties(s.DirUrlToString(), jptm.GetOverwriteOption(), jptm.GetOverwritePrompter()) if !shouldSetProps { jptm.LogAtLevelForCurrentTransfer(common.LogWarning, "Folder already exists, so due to the --overwrite option, its properties won't be set") diff --git a/ste/xfer-remoteToLocal-folder.go b/ste/xfer-remoteToLocal-folder.go index 3e80f59ce..3d3b65904 100644 --- a/ste/xfer-remoteToLocal-folder.go +++ b/ste/xfer-remoteToLocal-folder.go @@ -56,7 +56,6 @@ func remoteToLocal_folder(jptm IJobPartTransferMgr, pacer pacer, df downloaderFa // no chunks to schedule. Just run the folder handling operations t := jptm.GetFolderCreationTracker() - defer t.StopTracking(info.Destination) // don't need it after this routine err = common.CreateDirectoryIfNotExist(info.Destination, t) // we may create it here, or possible there's already a file transfer for the folder that has created it, or maybe it already existed before this job if err != nil {