From 575a69c587026a5669d9ef58525f41835f50b6e1 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Sat, 14 Sep 2024 17:27:19 +0200 Subject: [PATCH 01/18] Add persistent bucket fixtures --- test/integration/conftest.py | 44 +++++++++++++ test/integration/helpers.py | 5 +- test/integration/persistent_bucket.py | 89 +++++++++++++++++++++++++++ 3 files changed, 135 insertions(+), 3 deletions(-) create mode 100644 test/integration/persistent_bucket.py diff --git a/test/integration/conftest.py b/test/integration/conftest.py index bff38e95..4f076613 100755 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -16,6 +16,7 @@ import subprocess import sys import tempfile +import uuid from os import environ, path from tempfile import TemporaryDirectory @@ -31,6 +32,11 @@ from ..helpers import b2_uri_args_v3, b2_uri_args_v4 from .helpers import NODE_DESCRIPTION, RNG_SEED, Api, CommandLine, bucket_name_part, random_token +from .persistent_bucket import ( + PersistentBucketAggregate, + delete_files, + get_or_create_persistent_bucket, +) logger = logging.getLogger(__name__) @@ -402,3 +408,41 @@ def b2_uri_args(apiver_int): return b2_uri_args_v4 else: return b2_uri_args_v3 + + +# -- Persistent bucket fixtures -- +@pytest.fixture +def persistent_bucket(b2_api, account_info_file) -> Bucket: + return get_or_create_persistent_bucket(b2_api, account_info_file) + + +@pytest.fixture +def unique_subfolder(): + subfolder = f"test-{uuid.uuid4().hex[:8]}" + yield subfolder + + +@pytest.fixture +def persistent_bucket_aggregate(persistent_bucket, unique_subfolder) -> PersistentBucketAggregate: + """ + Since all consumers of the `bucket_name` fixture expect a new bucket to be created, + we need to mirror this behavior by appending a unique subfolder to the persistent bucket name. + """ + yield PersistentBucketAggregate(persistent_bucket.name, unique_subfolder) + + +@pytest.fixture(autouse=True) +def cleanup_persistent_bucket_subfolders( + persistent_bucket_aggregate: PersistentBucketAggregate, b2_api: Api +): + yield + # Clean up all files in the persistent bucket after each test + bucket = b2_api.api.get_bucket_by_name(persistent_bucket_aggregate.bucket_name) + delete_files(bucket, persistent_bucket_aggregate.subfolder) + + +# @pytest.fixture(scope="session", autouse=True) +# def final_cleanup_persistent_buckets(b2_api, worker_id): +# yield +# if worker_id == "gw0": +# cleanup_persistent_bucket(b2_api) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 70132944..b128254b 100755 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -188,7 +188,6 @@ def _should_remove_bucket(self, bucket: Bucket) -> tuple[bool, str]: def clean_buckets(self, quick=False): # even with use_cache=True, if cache is empty API call will be made buckets = self.api.list_buckets(use_cache=quick) - print('Total bucket count:', len(buckets)) remaining_buckets = [] for bucket in buckets: should_remove, why = self._should_remove_bucket(bucket) @@ -539,9 +538,9 @@ def reauthorize(self, check_key_capabilities=False): } - private_preview_caps - set(auth_dict['allowed']['capabilities']) assert not missing_capabilities, f'it appears that the raw_api integration test is being run with a non-full key. Missing capabilities: {missing_capabilities}' - def list_file_versions(self, bucket_name): + def list_file_versions(self, bucket_name, path=''): return self.should_succeed_json( - ['ls', '--json', '--recursive', '--versions', *self.b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', '--versions', *self.b2_uri_args(bucket_name, path)] ) def cleanup_buckets(self, buckets: dict[str, dict | None]) -> None: diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py new file mode 100644 index 00000000..3a53907d --- /dev/null +++ b/test/integration/persistent_bucket.py @@ -0,0 +1,89 @@ +###################################################################### +# +# File: test/integration/persistent_bucket.py +# +# Copyright 2024 Backblaze Inc. All Rights Reserved. +# +# License https://www.backblaze.com/using_b2_code.html +# +###################################################################### +import hashlib +import os +import sys +from dataclasses import dataclass +from functools import cached_property +from pathlib import Path +from test.integration.helpers import BUCKET_NAME_LENGTH, Api + +import backoff +from b2sdk.v2 import Bucket, SqliteAccountInfo +from b2sdk.v2.exception import NonExistentBucket + +PERSISTENT_BUCKET_NAME_PREFIX = "constst" + + +@dataclass +class PersistentBucketAggregate: + bucket_name: str + subfolder: str + + @cached_property + def virtual_bucket_name(self): + return f"{self.bucket_name}/{self.subfolder}" + + +@backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=10) +def delete_all_files(bucket: Bucket): + all_items = list(bucket.ls(recursive=True)) + for item, _ in all_items: + bucket.delete_file_version(item.id_, item.file_name) + + +@backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=10) +def delete_files(bucket: Bucket, subfolder: str): + for file_version, _ in bucket.ls(recursive=True, folder_to_list=subfolder): + bucket.delete_file_version(file_version.id_, file_version.file_name) + + +def cleanup_persistent_bucket(b2_api: Api): + all_buckets = b2_api.api.list_buckets() + for bucket in all_buckets: + if bucket.name.startswith(PERSISTENT_BUCKET_NAME_PREFIX): + print(f"Deleting all files in bucket {bucket.name}", flush=True, file=sys.stderr) + delete_all_files(bucket) + + +def get_persistent_bucket_name(b2_api: Api, account_info_file: Path) -> str: + if "CI" in os.environ: + # CI environment + repo_id = os.environ.get("GITHUB_REPOSITORY_ID") + if not repo_id: + raise ValueError("GITHUB_REPOSITORY_ID is not set") + bucket_hash = hashlib.sha256(repo_id.encode()).hexdigest() + else: + # Local development + account_info = SqliteAccountInfo(file_name=account_info_file) + bucket_hash = hashlib.sha256(account_info.get_account_id().encode()).hexdigest() + + return f"{PERSISTENT_BUCKET_NAME_PREFIX}-{bucket_hash}" [:BUCKET_NAME_LENGTH] + + +def get_or_create_persistent_bucket(b2_api: Api, account_info_file: Path) -> Bucket: + bucket_name = get_persistent_bucket_name(b2_api, account_info_file) + try: + bucket = b2_api.api.get_bucket_by_name(bucket_name) + except NonExistentBucket: + bucket = b2_api.api.create_bucket( + bucket_name, + bucket_type="allPublic", + lifecycle_rules=[ + { + "daysFromHidingToDeleting": 1, + "daysFromUploadingToHiding": 14, + "fileNamePrefix": "", + } + ], + ) + # add the new bucket name to the list of bucket names + b2_api.bucket_name_log.append(bucket_name) + return bucket From 295630cfca3bf7a52aaf373befe118d78f508103 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Sat, 14 Sep 2024 17:28:10 +0200 Subject: [PATCH 02/18] Refactor integration tests to use persistent bucket where applicable --- test/integration/test_b2_command_line.py | 792 +++++++++++++---------- 1 file changed, 460 insertions(+), 332 deletions(-) diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index f799fdfe..9dd43cb5 100755 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -271,18 +271,23 @@ def test_command_with_env_vars_reusing_existing_account_info( @pytest.fixture -def uploaded_sample_file(b2_tool, bucket_name, sample_filepath): +def uploaded_sample_file(b2_tool, persistent_bucket_aggregate, sample_filepath): return b2_tool.should_succeed_json( - ['file', 'upload', '--quiet', bucket_name, - str(sample_filepath), 'sample_file'] + [ + 'file', 'upload', '--quiet', persistent_bucket_aggregate.bucket_name, + str(sample_filepath), 'sample_file' + ] ) -def test_download(b2_tool, bucket_name, sample_filepath, uploaded_sample_file, tmp_path): +def test_download( + b2_tool, persistent_bucket_aggregate, sample_filepath, uploaded_sample_file, tmp_path +): output_a = tmp_path / 'a' b2_tool.should_succeed( [ - 'file', 'download', '--quiet', f"b2://{bucket_name}/{uploaded_sample_file['fileName']}", + 'file', 'download', '--quiet', + f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", str(output_a) ] ) @@ -296,8 +301,11 @@ def test_download(b2_tool, bucket_name, sample_filepath, uploaded_sample_file, t assert output_b.read_text() == sample_filepath.read_text() -def test_basic(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args, apiver_int): - +def test_basic( + b2_tool, persistent_bucket_aggregate, sample_file, tmp_path, b2_uri_args, apiver_int +): + bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = f"{persistent_bucket_aggregate.subfolder}/" file_mod_time_str = str(file_mod_time_millis(sample_file)) file_data = read_file(sample_file) @@ -308,73 +316,120 @@ def test_basic(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args, apiver_ [bucket_name], [b['bucketName'] for b in list_of_buckets if b['bucketName'] == bucket_name] ) - b2_tool.should_succeed(['file', 'upload', '--quiet', bucket_name, sample_file, 'a']) + b2_tool.should_succeed(['file', 'upload', '--quiet', bucket_name, sample_file, f'{subfolder}a']) b2_tool.should_succeed(['ls', '--long', '--replication', *b2_uri_args(bucket_name)]) - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'a']) - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'b/1']) - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'b/2']) + b2_tool.should_succeed( + ['file', 'upload', '--no-progress', bucket_name, sample_file, f'{subfolder}a'] + ) + b2_tool.should_succeed( + ['file', 'upload', '--no-progress', bucket_name, sample_file, f'{subfolder}b/1'] + ) + b2_tool.should_succeed( + ['file', 'upload', '--no-progress', bucket_name, sample_file, f'{subfolder}b/2'] + ) b2_tool.should_succeed( [ 'file', 'upload', '--no-progress', '--sha1', hex_sha1, '--info', 'foo=bar=baz', - '--info', 'color=blue', bucket_name, sample_file, 'c' + '--info', 'color=blue', bucket_name, sample_file, f'{subfolder}c' ] ) b2_tool.should_fail( [ 'file', 'upload', '--no-progress', '--sha1', hex_sha1, '--info', 'foo-bar', '--info', - 'color=blue', bucket_name, sample_file, 'c' + 'color=blue', bucket_name, sample_file, f'{subfolder}c' ], r'ERROR: Bad file info: foo-bar' ) b2_tool.should_succeed( [ 'file', 'upload', '--no-progress', '--content-type', 'text/plain', bucket_name, - sample_file, 'd' + sample_file, f'{subfolder}d' ] ) - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'rm']) - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'rm1']) + b2_tool.should_succeed( + ['file', 'upload', '--no-progress', bucket_name, sample_file, f'{subfolder}rm'] + ) + b2_tool.should_succeed( + ['file', 'upload', '--no-progress', bucket_name, sample_file, f'{subfolder}rm1'] + ) # with_wildcard allows us to target a single file. rm will be removed, rm1 will be left alone b2_tool.should_succeed( - ['rm', '--recursive', '--with-wildcard', *b2_uri_args(bucket_name, 'rm')] + ['rm', '--recursive', '--with-wildcard', *b2_uri_args(bucket_name, f'{subfolder}rm')] ) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', '--with-wildcard', *b2_uri_args(bucket_name, 'rm*')] + [ + 'ls', '--json', '--recursive', '--with-wildcard', + *b2_uri_args(bucket_name, f'{subfolder}rm*') + ] ) - should_equal(['rm1'], [f['fileName'] for f in list_of_files]) + should_equal([f'{subfolder}rm1'], [f['fileName'] for f in list_of_files]) b2_tool.should_succeed( - ['rm', '--recursive', '--with-wildcard', *b2_uri_args(bucket_name, 'rm1')] + ['rm', '--recursive', '--with-wildcard', *b2_uri_args(bucket_name, f'{subfolder}rm1')] ) b2_tool.should_succeed( - ['file', 'download', '--quiet', f'b2://{bucket_name}/b/1', tmp_path / 'a'] + ['file', 'download', '--quiet', f'b2://{bucket_name}/{subfolder}b/1', tmp_path / 'a'] ) - b2_tool.should_succeed(['file', 'hide', bucket_name, 'c']) + b2_tool.should_succeed(['file', 'hide', bucket_name, f'{subfolder}c']) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, f'{subfolder}')] + ) + should_equal( + [ + f'{subfolder}a', + f'{subfolder}b/1', + f'{subfolder}b/2', + f'{subfolder}d', + ], [f['fileName'] for f in list_of_files] ) - should_equal(['a', 'b/1', 'b/2', 'd'], [f['fileName'] for f in list_of_files]) - b2_tool.should_succeed(['file', 'unhide', f'b2://{bucket_name}/c']) + b2_tool.should_succeed( + ['file', 'unhide', f'b2://{persistent_bucket_aggregate.virtual_bucket_name}/c'] + ) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, f'{subfolder}')] + ) + should_equal( + [ + f'{subfolder}a', + f'{subfolder}b/1', + f'{subfolder}b/2', + f'{subfolder}c', + f'{subfolder}d', + ], [f['fileName'] for f in list_of_files] ) - should_equal(['a', 'b/1', 'b/2', 'c', 'd'], [f['fileName'] for f in list_of_files]) - b2_tool.should_succeed(['file', 'hide', bucket_name, 'c']) + b2_tool.should_succeed(['file', 'hide', bucket_name, f'{subfolder}c']) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, f'{subfolder}')] + ) + should_equal( + [ + f'{subfolder}a', + f'{subfolder}b/1', + f'{subfolder}b/2', + f'{subfolder}d', + ], [f['fileName'] for f in list_of_files] ) - should_equal(['a', 'b/1', 'b/2', 'd'], [f['fileName'] for f in list_of_files]) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', '--versions', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', '--versions', *b2_uri_args(bucket_name, f'{subfolder}')] + ) + should_equal( + [ + f'{subfolder}a', + f'{subfolder}a', + f'{subfolder}b/1', + f'{subfolder}b/2', + f'{subfolder}c', + f'{subfolder}c', + f'{subfolder}d', + ], [f['fileName'] for f in list_of_files] ) - should_equal(['a', 'a', 'b/1', 'b/2', 'c', 'c', 'd'], [f['fileName'] for f in list_of_files]) should_equal( ['upload', 'upload', 'upload', 'upload', 'hide', 'upload', 'upload'], [f['action'] for f in list_of_files] @@ -385,38 +440,49 @@ def test_basic(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args, apiver_ first_c_version = list_of_files[4] second_c_version = list_of_files[5] list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', '--versions', *b2_uri_args(bucket_name, 'c')] + ['ls', '--json', '--recursive', '--versions', *b2_uri_args(bucket_name, f'{subfolder}c')] ) if apiver_int >= 4: # b2://bucketName/c should list all c versions on v4 - should_equal(['c', 'c'], [f['fileName'] for f in list_of_files]) + should_equal([ + f'{subfolder}c', + f'{subfolder}c', + ], [f['fileName'] for f in list_of_files]) else: should_equal([], [f['fileName'] for f in list_of_files]) - b2_tool.should_succeed(['file', 'copy-by-id', first_a_version['fileId'], bucket_name, 'x']) + b2_tool.should_succeed( + ['file', 'copy-by-id', first_a_version['fileId'], bucket_name, f'{subfolder}x'] + ) - b2_tool.should_succeed(['ls', *b2_uri_args(bucket_name)], '^a{0}b/{0}d{0}'.format(os.linesep)) + b2_tool.should_succeed( + ['ls', *b2_uri_args(bucket_name, f'{subfolder}')], + '^{0}a{1}{0}b/{1}{0}d{1}'.format(subfolder, os.linesep) + ) # file_id, action, date, time, size(, replication), name + b2_tool.should_succeed( - ['ls', '--long', *b2_uri_args(bucket_name)], - '^4_z.* upload .* {1} a{0}.* - .* b/{0}4_z.* upload .* {1} d{0}'.format( - os.linesep, len(file_data) + ['ls', '--long', *b2_uri_args(bucket_name, f'{subfolder}')], + '^4_z.* upload .* {1} {2}a{0}.* - .* {2}b/{0}4_z.* upload .* {1} {2}d{0}'.format( + os.linesep, len(file_data), subfolder ) ) b2_tool.should_succeed( - ['ls', '--long', '--replication', *b2_uri_args(bucket_name)], - '^4_z.* upload .* {1} - a{0}.* - .* - b/{0}4_z.* upload .* {1} - d{0}'.format( - os.linesep, len(file_data) - ) + ['ls', '--long', '--replication', *b2_uri_args(bucket_name, f'{subfolder}')], + '^4_z.* upload .* {1} - {2}a{0}.* - .* - {2}b/{0}4_z.* upload .* {1} - {2}d{0}'. + format(os.linesep, len(file_data), subfolder) ) + b2_tool.should_succeed( - ['ls', '--versions', *b2_uri_args(bucket_name)], - f'^a{os.linesep}a{os.linesep}b/{os.linesep}c{os.linesep}c{os.linesep}d{os.linesep}' + ['ls', '--versions', *b2_uri_args(bucket_name, f'{subfolder}')], + f'^{subfolder}a{os.linesep}{subfolder}a{os.linesep}{subfolder}b/{os.linesep}{subfolder}c{os.linesep}{subfolder}c{os.linesep}{subfolder}d{os.linesep}' ) b2_tool.should_succeed( - ['ls', *b2_uri_args(bucket_name, 'b')], f'^b/1{os.linesep}b/2{os.linesep}' + ['ls', *b2_uri_args(bucket_name, f'{subfolder}b')], + f'^{subfolder}b/1{os.linesep}{subfolder}b/2{os.linesep}' ) b2_tool.should_succeed( - ['ls', *b2_uri_args(bucket_name, 'b/')], f'^b/1{os.linesep}b/2{os.linesep}' + ['ls', *b2_uri_args(bucket_name, f'{subfolder}b/')], + f'^{subfolder}b/1{os.linesep}{subfolder}b/2{os.linesep}' ) file_info = b2_tool.should_succeed_json( @@ -430,21 +496,22 @@ def test_basic(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args, apiver_ should_equal(expected_info, file_info['fileInfo']) b2_tool.should_succeed( - ['delete-file-version', 'c', first_c_version['fileId']], + ['delete-file-version', f'{subfolder}c', first_c_version['fileId']], expected_stderr_pattern=re.compile( re.escape('WARNING: `delete-file-version` command is deprecated. Use `rm` instead.') ) ) b2_tool.should_succeed( - ['ls', *b2_uri_args(bucket_name)], f'^a{os.linesep}b/{os.linesep}c{os.linesep}d{os.linesep}' + ['ls', *b2_uri_args(bucket_name, f"{subfolder}")], + f'^{subfolder}a{os.linesep}{subfolder}b/{os.linesep}{subfolder}c{os.linesep}{subfolder}d{os.linesep}' ) b2_tool.should_succeed(['file', 'url', f"b2id://{second_c_version['fileId']}"]) b2_tool.should_succeed( - ['file', 'url', f"b2://{bucket_name}/any-file-name"], + ['file', 'url', f"b2://{persistent_bucket_aggregate.virtual_bucket_name}/any-file-name"], '^https://.*/file/{}/{}\r?$'.format( - bucket_name, + persistent_bucket_aggregate.virtual_bucket_name, 'any-file-name', ), ) # \r? is for Windows, as $ doesn't match \r\n @@ -459,13 +526,13 @@ def test_ls_b2id(b2_tool, uploaded_sample_file): @pytest.mark.apiver(from_ver=4) -def test_rm_b2id(b2_tool, bucket_name, uploaded_sample_file): +def test_rm_b2id(b2_tool, persistent_bucket_aggregate, uploaded_sample_file): # remove the file by id b2_tool.should_succeed(['rm', f"b2id://{uploaded_sample_file['fileId']}"]) # check that the file is gone b2_tool.should_succeed( - ['ls', f'b2://{bucket_name}'], + ['ls', f'b2://{persistent_bucket_aggregate.bucket_name}'], expected_pattern='^$', ) @@ -525,7 +592,7 @@ def test_debug_logs(b2_tool, is_running_on_docker, tmp_path): assert re.search(log_file_regex, log), log -def test_bucket(b2_tool, bucket_name): +def test_bucket(b2_tool, persistent_bucket_aggregate): rule = """{ "daysFromHidingToDeleting": 1, "daysFromUploadingToHiding": null, @@ -533,8 +600,8 @@ def test_bucket(b2_tool, bucket_name): }""" output = b2_tool.should_succeed_json( [ - 'bucket', 'update', '--lifecycle-rule', rule, bucket_name, 'allPublic', - *b2_tool.get_bucket_info_args() + 'bucket', 'update', '--lifecycle-rule', rule, persistent_bucket_aggregate.bucket_name, + 'allPublic', *b2_tool.get_bucket_info_args() ], ) @@ -554,9 +621,16 @@ def test_bucket(b2_tool, bucket_name): ] -def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_uri_args): +def test_key_restrictions( + b2_tool, persistent_bucket_aggregate, sample_file, bucket_factory, b2_uri_args +): # A single file for rm to fail on. - b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'test']) + b2_tool.should_succeed( + [ + 'file', 'upload', '--no-progress', persistent_bucket_aggregate.bucket_name, sample_file, + 'test' + ] + ) key_one_name = 'clt-testKey-01' + random_hex(6) created_key_stdout = b2_tool.should_succeed( @@ -573,7 +647,7 @@ def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_ ['account', 'authorize', '--environment', b2_tool.realm, key_one_id, key_one], ) - b2_tool.should_succeed(['bucket', 'get', bucket_name],) + b2_tool.should_succeed(['bucket', 'get', persistent_bucket_aggregate.bucket_name],) second_bucket_name = bucket_factory().name b2_tool.should_succeed(['bucket', 'get', second_bucket_name],) @@ -583,7 +657,7 @@ def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_ 'key', 'create', '--bucket', - bucket_name, + persistent_bucket_aggregate.bucket_name, key_two_name, 'listFiles,listBuckets,readFiles', ] @@ -598,7 +672,7 @@ def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_ [ 'create-key', '--bucket', - bucket_name, + persistent_bucket_aggregate.bucket_name, key_three_name, 'listFiles,listBuckets,readFiles', ], @@ -609,8 +683,8 @@ def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_ b2_tool.should_succeed( ['account', 'authorize', '--environment', b2_tool.realm, key_two_id, key_two], ) - b2_tool.should_succeed(['bucket', 'get', bucket_name],) - b2_tool.should_succeed(['ls', *b2_uri_args(bucket_name)],) + b2_tool.should_succeed(['bucket', 'get', persistent_bucket_aggregate.bucket_name],) + b2_tool.should_succeed(['ls', *b2_uri_args(persistent_bucket_aggregate.bucket_name)],) b2_tool.should_succeed( ['account', 'authorize', '--environment', b2_tool.realm, key_three_id, key_three], @@ -621,18 +695,21 @@ def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_ failed_bucket_err = r'Deletion of file "test" \([^\)]+\) failed: unauthorized for ' \ r'application key with capabilities ' \ r"'(.*listFiles.*|.*listBuckets.*|.*readFiles.*){3}', " \ - r"restricted to bucket '%s' \(unauthorized\)" % bucket_name + r"restricted to bucket '%s' \(unauthorized\)" % persistent_bucket_aggregate.bucket_name b2_tool.should_fail( - ['rm', '--recursive', '--no-progress', *b2_uri_args(bucket_name)], failed_bucket_err + [ + 'rm', '--recursive', '--no-progress', + *b2_uri_args(persistent_bucket_aggregate.bucket_name) + ], failed_bucket_err ) - failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name + failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name b2_tool.should_fail(['bucket', 'get', second_bucket_name], failed_bucket_err) - failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name + failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name b2_tool.should_fail(['ls', *b2_uri_args(second_bucket_name)], failed_list_files_err) - failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name + failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name b2_tool.should_fail(['rm', *b2_uri_args(second_bucket_name)], failed_list_files_err) # reauthorize with more capabilities for clean up @@ -814,255 +891,273 @@ def encryption_summary(sse_dict, file_info): @pytest.mark.parametrize( - "dir_, encryption", [('sync', None), ('sync', SSE_B2_AES), ('sync', SSE_C_AES), ('', None)] + "dir_, encryption", + [('sync', None), ('sync', SSE_B2_AES), ('sync', SSE_C_AES), ('', None)], ) -def test_sync_up(b2_tool, bucket_name, apiver_int, dir_, encryption): - sync_point_parts = [bucket_name] +def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir_, encryption): + # persistent_bucket_aggregate.subfolder = persistent_bucket_aggregate.subfolder + random_hex(6) + + sync_point_parts = [ + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ] if dir_: sync_point_parts.append(dir_) - prefix = dir_ + '/' + prefix = f'{persistent_bucket_aggregate.subfolder}/{dir_}/' else: - prefix = '' + prefix = persistent_bucket_aggregate.subfolder + '/' b2_sync_point = 'b2:' + '/'.join(sync_point_parts) - with TempDir() as dir_path: - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal([], file_version_summary(file_versions)) - - write_file(dir_path / 'a', b'hello') - write_file(dir_path / 'b', b'hello') - write_file(dir_path / 'c', b'hello') - - # simulate action (nothing should be uploaded) - b2_tool.should_succeed(['sync', '--no-progress', '--dry-run', dir_path, b2_sync_point]) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal([], file_version_summary(file_versions)) - - # - # A note about OSError: [WinError 1314] - # - # If you are seeing this, then probably you ran the integration test suite from - # a non-admin account which on Windows doesn't by default get to create symlinks. - # A special permission is needed. Now maybe there is a way to give that permission, - # but it didn't work for me, so I just ran it as admin. A guide that I've found - # recommended to go to Control Panel, Administrative Tools, Local Security Policy, - # Local Policies, User Rights Assignment and there you can find a permission to - # create symbolic links. Add your user to it (or a group that the user is in). - # - # Finally in order to apply the new policy, run `cmd` and execute - # ``gpupdate /force``. - # - # Again, if it still doesn't work, consider just running the shell you are - # launching ``nox`` as admin. - - os.symlink('broken', dir_path / 'd') # OSError: [WinError 1314] ? See the comment above - - additional_env = None - - # now upload - if encryption is None: - command = ['sync', '--no-progress', dir_path, b2_sync_point] - expected_encryption = SSE_NONE - expected_encryption_str = encryption_summary(expected_encryption.as_dict(), {}) - elif encryption == SSE_B2_AES: - command = [ - 'sync', '--no-progress', '--destination-server-side-encryption', 'SSE-B2', dir_path, - b2_sync_point - ] - expected_encryption = encryption - expected_encryption_str = encryption_summary(expected_encryption.as_dict(), {}) - elif encryption == SSE_C_AES: - command = [ - 'sync', '--no-progress', '--destination-server-side-encryption', 'SSE-C', dir_path, - b2_sync_point - ] - expected_encryption = encryption - additional_env = { - 'B2_DESTINATION_SSE_C_KEY_B64': base64.b64encode(SSE_C_AES.key.secret).decode(), - 'B2_DESTINATION_SSE_C_KEY_ID': SSE_C_AES.key.key_id, - } - expected_encryption_str = encryption_summary( - expected_encryption.as_dict(), - {SSE_C_KEY_ID_FILE_INFO_KEY_NAME: SSE_C_AES.key.key_id} - ) - else: - raise NotImplementedError('unsupported encryption mode: %s' % encryption) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([], file_version_summary(file_versions)) + + write_file(tmp_path / 'a', b'hello') + write_file(tmp_path / 'b', b'hello') + write_file(tmp_path / 'c', b'hello') + + # simulate action (nothing should be uploaded) + b2_tool.should_succeed(['sync', '--no-progress', '--dry-run', tmp_path, b2_sync_point]) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([], file_version_summary(file_versions)) + + # + # A note about OSError: [WinError 1314] + # + # If you are seeing this, then probably you ran the integration test suite from + # a non-admin account which on Windows doesn't by default get to create symlinks. + # A special permission is needed. Now maybe there is a way to give that permission, + # but it didn't work for me, so I just ran it as admin. A guide that I've found + # recommended to go to Control Panel, Administrative Tools, Local Security Policy, + # Local Policies, User Rights Assignment and there you can find a permission to + # create symbolic links. Add your user to it (or a group that the user is in). + # + # Finally in order to apply the new policy, run `cmd` and execute + # ``gpupdate /force``. + # + # Again, if it still doesn't work, consider just running the shell you are + # launching ``nox`` as admin. + + os.symlink('broken', tmp_path / 'd') # OSError: [WinError 1314] ? See the comment above + + additional_env = None + + # now upload + if encryption is None: + command = ['sync', '--no-progress', tmp_path, b2_sync_point] + expected_encryption = SSE_NONE + expected_encryption_str = encryption_summary(expected_encryption.as_dict(), {}) + elif encryption == SSE_B2_AES: + command = [ + 'sync', '--no-progress', '--destination-server-side-encryption', 'SSE-B2', tmp_path, + b2_sync_point + ] + expected_encryption = encryption + expected_encryption_str = encryption_summary(expected_encryption.as_dict(), {}) + elif encryption == SSE_C_AES: + command = [ + 'sync', '--no-progress', '--destination-server-side-encryption', 'SSE-C', tmp_path, + b2_sync_point + ] + expected_encryption = encryption + additional_env = { + 'B2_DESTINATION_SSE_C_KEY_B64': base64.b64encode(SSE_C_AES.key.secret).decode(), + 'B2_DESTINATION_SSE_C_KEY_ID': SSE_C_AES.key.key_id, + } + expected_encryption_str = encryption_summary( + expected_encryption.as_dict(), {SSE_C_KEY_ID_FILE_INFO_KEY_NAME: SSE_C_AES.key.key_id} + ) + else: + raise NotImplementedError('unsupported encryption mode: %s' % encryption) - status, stdout, stderr = b2_tool.execute(command, additional_env=additional_env) - assert re.search(r'd[\'"]? could not be accessed', stdout) - assert status == (1 if apiver_int >= 4 else 0) - file_versions = b2_tool.list_file_versions(bucket_name) + status, stdout, stderr = b2_tool.execute(command, additional_env=additional_env) + assert re.search(r'd[\'"]? could not be accessed', stdout) + assert status == (1 if apiver_int >= 4 else 0) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) - should_equal( - [ - ('+ ' + prefix + 'a', expected_encryption_str), - ('+ ' + prefix + 'b', expected_encryption_str), - ('+ ' + prefix + 'c', expected_encryption_str), - ], - file_version_summary_with_encryption(file_versions), + should_equal( + [ + ('+ ' + prefix + 'a', expected_encryption_str), + ('+ ' + prefix + 'b', expected_encryption_str), + ('+ ' + prefix + 'c', expected_encryption_str), + ], + file_version_summary_with_encryption(file_versions), + ) + if encryption and encryption.mode == EncryptionMode.SSE_C: + b2_tool.should_fail( + command, + expected_pattern="ValueError: Using SSE-C requires providing an encryption key via " + "B2_DESTINATION_SSE_C_KEY_B64 env var" ) - if encryption and encryption.mode == EncryptionMode.SSE_C: - b2_tool.should_fail( - command, - expected_pattern="ValueError: Using SSE-C requires providing an encryption key via " - "B2_DESTINATION_SSE_C_KEY_B64 env var" - ) - if encryption is not None: - return # that's enough, we've checked that encryption works, no need to repeat the whole sync suite + if encryption is not None: + return # that's enough, we've checked that encryption works, no need to repeat the whole sync suite - c_id = find_file_id(file_versions, prefix + 'c') - file_info = b2_tool.should_succeed_json(['file', 'info', f"b2id://{c_id}"])['fileInfo'] - should_equal( - file_mod_time_millis(dir_path / 'c'), int(file_info['src_last_modified_millis']) - ) + c_id = find_file_id(file_versions, prefix + 'c') + file_info = b2_tool.should_succeed_json(['file', 'info', f"b2id://{c_id}"])['fileInfo'] + should_equal(file_mod_time_millis(tmp_path / 'c'), int(file_info['src_last_modified_millis'])) - os.unlink(dir_path / 'b') - write_file(dir_path / 'c', b'hello world') + os.unlink(tmp_path / 'b') + write_file(tmp_path / 'c', b'hello world') - status, stdout, stderr = b2_tool.execute( - ['sync', '--no-progress', '--keep-days', '10', dir_path, b2_sync_point] - ) - assert re.search(r'd[\'"]? could not be accessed', stdout) - assert status == (1 if apiver_int >= 4 else 0) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'a', - '- ' + prefix + 'b', - '+ ' + prefix + 'b', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - ], file_version_summary(file_versions) - ) + status, stdout, stderr = b2_tool.execute( + ['sync', '--no-progress', '--keep-days', '10', tmp_path, b2_sync_point] + ) + assert re.search(r'd[\'"]? could not be accessed', stdout) + assert status == (1 if apiver_int >= 4 else 0) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [ + '+ ' + prefix + 'a', + '- ' + prefix + 'b', + '+ ' + prefix + 'b', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + ], file_version_summary(file_versions) + ) - os.unlink(dir_path / 'a') - os.unlink(dir_path / 'd') # remove broken symlink to get status 0 on >=b2v4 + os.unlink(tmp_path / 'a') + os.unlink(tmp_path / 'd') # remove broken symlink to get status 0 on >=b2v4 - b2_tool.should_succeed(['sync', '--no-progress', '--delete', dir_path, b2_sync_point]) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal([ - '+ ' + prefix + 'c', - ], file_version_summary(file_versions)) + b2_tool.should_succeed(['sync', '--no-progress', '--delete', tmp_path, b2_sync_point]) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([ + '+ ' + prefix + 'c', + ], file_version_summary(file_versions)) - # test --compare-threshold with file size - write_file(dir_path / 'c', b'hello world!') + # test --compare-threshold with file size + write_file(tmp_path / 'c', b'hello world!') - # should not upload new version of c - b2_tool.should_succeed( - [ - 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'size', - '--compare-threshold', '1', dir_path, b2_sync_point - ] - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal([ - '+ ' + prefix + 'c', - ], file_version_summary(file_versions)) + # should not upload new version of c + b2_tool.should_succeed( + [ + 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'size', + '--compare-threshold', '1', tmp_path, b2_sync_point + ] + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([ + '+ ' + prefix + 'c', + ], file_version_summary(file_versions)) - # should upload new version of c - b2_tool.should_succeed( - [ - 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'size', - dir_path, b2_sync_point - ] - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - ], file_version_summary(file_versions) - ) + # should upload new version of c + b2_tool.should_succeed( + [ + 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'size', tmp_path, + b2_sync_point + ] + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + ], file_version_summary(file_versions)) - set_file_mod_time_millis(dir_path / 'c', file_mod_time_millis(dir_path / 'c') + 2000) + set_file_mod_time_millis(tmp_path / 'c', file_mod_time_millis(tmp_path / 'c') + 2000) - # test --compare-threshold with modTime - # should not upload new version of c - b2_tool.should_succeed( - [ - 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'modTime', - '--compare-threshold', '2000', dir_path, b2_sync_point - ] - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - ], file_version_summary(file_versions) - ) + # test --compare-threshold with modTime + # should not upload new version of c + b2_tool.should_succeed( + [ + 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'modTime', + '--compare-threshold', '2000', tmp_path, b2_sync_point + ] + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal([ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + ], file_version_summary(file_versions)) - # should upload new version of c - b2_tool.should_succeed( - [ - 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'modTime', - dir_path, b2_sync_point - ] - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - ], file_version_summary(file_versions) - ) + # should upload new version of c + b2_tool.should_succeed( + [ + 'sync', '--no-progress', '--keep-days', '10', '--compare-versions', 'modTime', tmp_path, + b2_sync_point + ] + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + ], file_version_summary(file_versions) + ) - # create one more file - write_file(dir_path / 'linktarget', b'hello') - mod_time = str((file_mod_time_millis(dir_path / 'linktarget') - 10) / 1000) + # create one more file + write_file(tmp_path / 'linktarget', b'hello') + mod_time = str((file_mod_time_millis(tmp_path / 'linktarget') - 10) / 1000) - # exclude last created file because of mtime - b2_tool.should_succeed( - [ - 'sync', '--no-progress', '--exclude-if-modified-after', mod_time, dir_path, - b2_sync_point - ] - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - ], - file_version_summary(file_versions), - ) + # exclude last created file because of mtime + b2_tool.should_succeed( + ['sync', '--no-progress', '--exclude-if-modified-after', mod_time, tmp_path, b2_sync_point] + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + ], + file_version_summary(file_versions), + ) - # confirm symlink is skipped - os.symlink('linktarget', dir_path / 'alink') + # confirm symlink is skipped + os.symlink('linktarget', tmp_path / 'alink') - b2_tool.should_succeed( - ['sync', '--no-progress', '--exclude-all-symlinks', dir_path, b2_sync_point], - ) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'linktarget', - ], - file_version_summary(file_versions), - ) + b2_tool.should_succeed( + ['sync', '--no-progress', '--exclude-all-symlinks', tmp_path, b2_sync_point], + ) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'linktarget', + ], + file_version_summary(file_versions), + ) - # confirm symlink target is uploaded (with symlink's name) - b2_tool.should_succeed(['sync', '--no-progress', dir_path, b2_sync_point]) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal( - [ - '+ ' + prefix + 'alink', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'c', - '+ ' + prefix + 'linktarget', - ], - file_version_summary(file_versions), - ) + # confirm symlink target is uploaded (with symlink's name) + b2_tool.should_succeed(['sync', '--no-progress', tmp_path, b2_sync_point]) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [ + '+ ' + prefix + 'alink', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'linktarget', + ], + file_version_summary(file_versions), + ) -def test_sync_down(b2_tool, bucket_name, sample_file): - sync_down_helper(b2_tool, bucket_name, 'sync', sample_file) +def test_sync_down(b2_tool, persistent_bucket_aggregate, sample_file): + sync_down_helper(b2_tool, persistent_bucket_aggregate.bucket_name, 'sync', sample_file) def test_sync_down_no_prefix(b2_tool, bucket_name, sample_file): @@ -1075,7 +1170,7 @@ def test_sync_down_sse_c_no_prefix(b2_tool, bucket_name, sample_file): def sync_down_helper(b2_tool, bucket_name, folder_in_bucket, sample_file, encryption=None): - b2_sync_point = 'b2:%s' % bucket_name + b2_sync_point = f'b2:{bucket_name}' if folder_in_bucket: b2_sync_point += '/' + folder_in_bucket b2_file_prefix = folder_in_bucket + '/' @@ -1161,9 +1256,13 @@ def sync_down_helper(b2_tool, bucket_name, folder_in_bucket, sample_file, encryp ) -def test_sync_copy(bucket_factory, b2_tool, bucket_name, sample_file): +def test_sync_copy(bucket_factory, b2_tool, persistent_bucket_aggregate, sample_file): prepare_and_run_sync_copy_tests( - bucket_factory, b2_tool, bucket_name, 'sync', sample_file=sample_file + bucket_factory, + b2_tool, + persistent_bucket_aggregate.bucket_name, + 'sync', + sample_file=sample_file ) @@ -1258,7 +1357,7 @@ def prepare_and_run_sync_copy_tests( expected_encryption=SSE_NONE, source_encryption=None, ): - b2_sync_point = 'b2:%s' % bucket_name + b2_sync_point = f'b2:{bucket_name}' if folder_in_bucket: b2_sync_point += '/' + folder_in_bucket b2_file_prefix = folder_in_bucket + '/' @@ -1267,7 +1366,7 @@ def prepare_and_run_sync_copy_tests( other_bucket_name = bucket_factory().name - other_b2_sync_point = 'b2:%s' % other_bucket_name + other_b2_sync_point = f'b2:{other_bucket_name}' if folder_in_bucket: other_b2_sync_point += '/' + folder_in_bucket @@ -1391,11 +1490,11 @@ def run_sync_copy_with_basic_checks( raise NotImplementedError(destination_encryption) -def test_sync_long_path(b2_tool, bucket_name): +def test_sync_long_path(tmp_path, b2_tool, persistent_bucket_aggregate): """ test sync with very long path (overcome windows 260 character limit) """ - b2_sync_point = 'b2://' + bucket_name + b2_sync_point = f'b2://{persistent_bucket_aggregate.virtual_bucket_name}' long_path = '/'.join( ( @@ -1406,15 +1505,19 @@ def test_sync_long_path(b2_tool, bucket_name): ) ) - with TempDir() as dir_path: - local_long_path = (dir_path / long_path).resolve() - fixed_local_long_path = Path(fix_windows_path_limit(str(local_long_path))) - os.makedirs(fixed_local_long_path.parent) - write_file(fixed_local_long_path, b'asdf') + local_long_path = (tmp_path / long_path).resolve() + fixed_local_long_path = Path(fix_windows_path_limit(str(local_long_path))) + os.makedirs(fixed_local_long_path.parent) + write_file(fixed_local_long_path, b'asdf') - b2_tool.should_succeed(['sync', '--no-progress', '--delete', dir_path, b2_sync_point]) - file_versions = b2_tool.list_file_versions(bucket_name) - should_equal(['+ ' + long_path], file_version_summary(file_versions)) + b2_tool.should_succeed(['sync', '--no-progress', '--delete', str(tmp_path), b2_sync_point]) + file_versions = b2_tool.list_file_versions( + persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + ) + should_equal( + [f'+ {persistent_bucket_aggregate.subfolder}/{long_path}'], + file_version_summary(file_versions) + ) def test_default_sse_b2__update_bucket(b2_tool, bucket_name, schedule_bucket_cleanup): @@ -1462,27 +1565,34 @@ def test_default_sse_b2__create_bucket(b2_tool, schedule_bucket_cleanup): should_equal(second_bucket_default_sse, second_bucket_info['defaultServerSideEncryption']) -def test_sse_b2(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args): +def test_sse_b2(b2_tool, persistent_bucket_aggregate, sample_file, tmp_path, b2_uri_args): + bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = persistent_bucket_aggregate.subfolder b2_tool.should_succeed( [ 'file', 'upload', '--destination-server-side-encryption=SSE-B2', '--quiet', bucket_name, - sample_file, 'encrypted' + sample_file, f'{subfolder}/encrypted' ] ) - b2_tool.should_succeed(['file', 'upload', '--quiet', bucket_name, sample_file, 'not_encrypted']) + b2_tool.should_succeed( + ['file', 'upload', '--quiet', bucket_name, sample_file, f'{subfolder}/not_encrypted'] + ) b2_tool.should_succeed( - ['file', 'download', '--quiet', f'b2://{bucket_name}/encrypted', tmp_path / 'encrypted'] + [ + 'file', 'download', '--quiet', f'b2://{bucket_name}/{subfolder}/encrypted', + tmp_path / 'encrypted' + ] ) b2_tool.should_succeed( [ - 'file', 'download', '--quiet', f'b2://{bucket_name}/not_encrypted', + 'file', 'download', '--quiet', f'b2://{bucket_name}/{subfolder}/not_encrypted', tmp_path / 'not_encrypted' ] ) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, subfolder)] ) should_equal( [{ @@ -1507,18 +1617,18 @@ def test_sse_b2(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args): b2_tool.should_succeed( [ 'file', 'copy-by-id', '--destination-server-side-encryption=SSE-B2', - encrypted_version['fileId'], bucket_name, 'copied_encrypted' + encrypted_version['fileId'], bucket_name, f'{subfolder}/copied_encrypted' ] ) b2_tool.should_succeed( [ 'file', 'copy-by-id', not_encrypted_version['fileId'], bucket_name, - 'copied_not_encrypted' + f'{subfolder}/copied_not_encrypted' ] ) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, subfolder)] ) should_equal( [{ @@ -1542,8 +1652,10 @@ def test_sse_b2(b2_tool, bucket_name, sample_file, tmp_path, b2_uri_args): should_equal({'mode': 'none'}, file_info['serverSideEncryption']) -def test_sse_c(b2_tool, bucket_name, is_running_on_docker, sample_file, tmp_path, b2_uri_args): - +def test_sse_c( + b2_tool, persistent_bucket_aggregate, is_running_on_docker, sample_file, tmp_path, b2_uri_args +): + bucket_name = persistent_bucket_aggregate.bucket_name sse_c_key_id = 'user-generated-key-id \nąóźćż\nœøΩ≈ç\nßäöü' if is_running_on_docker: # TODO: fix this once we figure out how to pass env vars with \n in them to docker, docker-compose should work @@ -3123,7 +3235,11 @@ def _assert_file_lock_configuration( assert legal_hold == actual_legal_hold -def test_upload_file__custom_upload_time(b2_tool, bucket_name, sample_file, b2_uri_args): +def test_upload_file__custom_upload_time( + b2_tool, persistent_bucket_aggregate, sample_file, b2_uri_args +): + bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = persistent_bucket_aggregate.subfolder file_data = read_file(sample_file) cut = 12345 cut_printable = '1970-01-01 00:00:12' @@ -3136,7 +3252,7 @@ def test_upload_file__custom_upload_time(b2_tool, bucket_name, sample_file, b2_u '--quiet', bucket_name, sample_file, - 'a', + f'{subfolder}/a', ] succeeded, stdout = b2_tool.run_command(args) if not succeeded: @@ -3144,51 +3260,59 @@ def test_upload_file__custom_upload_time(b2_tool, bucket_name, sample_file, b2_u else: # file_id, action, date, time, size(, replication), name b2_tool.should_succeed( - ['ls', '--long', *b2_uri_args(bucket_name)], '^4_z.* upload {} +{} a'.format( + ['ls', '--long', *b2_uri_args(bucket_name, subfolder)], + '^4_z.* upload {} +{} a'.format( cut_printable, len(file_data), ) ) # file_id, action, date, time, size(, replication), name b2_tool.should_succeed( - ['ls', '--long', '--replication', *b2_uri_args(bucket_name)], + ['ls', '--long', '--replication', *b2_uri_args(bucket_name, subfolder)], f'^4_z.* upload {cut_printable} +{len(file_data)} - a' ) @skip_on_windows -def test_upload_file__stdin_pipe_operator(request, bash_runner, b2_tool, bucket_name): +def test_upload_file__stdin_pipe_operator( + request, bash_runner, b2_tool, persistent_bucket_aggregate +): """Test `file upload` from stdin using pipe operator.""" + bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = persistent_bucket_aggregate.subfolder content = request.node.name run = bash_runner( f'echo -n {content!r} ' f'| ' - f'{" ".join(b2_tool.parse_command(b2_tool.prepare_env()))} file upload {bucket_name} - {request.node.name}.txt' + f'{" ".join(b2_tool.parse_command(b2_tool.prepare_env()))} file upload {bucket_name} - {subfolder}/{request.node.name}.txt' ) assert hashlib.sha1(content.encode()).hexdigest() in run.stdout @skip_on_windows def test_upload_unbound_stream__redirect_operator( - request, bash_runner, b2_tool, bucket_name, is_running_on_docker + request, bash_runner, b2_tool, persistent_bucket_aggregate, is_running_on_docker ): """Test upload-unbound-stream from stdin using redirect operator.""" + bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = persistent_bucket_aggregate.subfolder if is_running_on_docker: pytest.skip('Not supported on Docker') content = request.node.name command = request.config.getoption('--sut') run = bash_runner( - f'{command} upload-unbound-stream {bucket_name} <(echo -n {content}) {request.node.name}.txt' + f'{command} upload-unbound-stream {bucket_name} <(echo -n {content}) {subfolder}/{request.node.name}.txt' ) assert hashlib.sha1(content.encode()).hexdigest() in run.stdout def test_download_file_stdout( - b2_tool, bucket_name, sample_filepath, tmp_path, uploaded_sample_file + b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file ): assert b2_tool.should_succeed( [ - 'file', 'download', '--quiet', f"b2://{bucket_name}/{uploaded_sample_file['fileName']}", + 'file', 'download', '--quiet', + f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", '-' ], ) == sample_filepath.read_text() @@ -3198,7 +3322,7 @@ def test_download_file_stdout( def test_download_file_to_directory( - b2_tool, bucket_name, sample_filepath, tmp_path, uploaded_sample_file + b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file ): downloads_directory = 'downloads' target_directory = tmp_path / downloads_directory @@ -3211,7 +3335,7 @@ def test_download_file_to_directory( 'file', 'download', '--quiet', - f"b2://{bucket_name}/{uploaded_sample_file['fileName']}", + f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", str(target_directory), ], ) @@ -3239,15 +3363,19 @@ def test_download_file_to_directory( f'{new_files}, {new_files[0].read_text()}, {sample_file_content}' -def test_cat(b2_tool, bucket_name, sample_filepath, tmp_path, uploaded_sample_file): +def test_cat(b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file): assert b2_tool.should_succeed( - ['file', 'cat', f"b2://{bucket_name}/{uploaded_sample_file['fileName']}"], + [ + 'file', 'cat', + f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}" + ], ) == sample_filepath.read_text() assert b2_tool.should_succeed(['file', 'cat', f"b2id://{uploaded_sample_file['fileId']}"] ) == sample_filepath.read_text() -def test_header_arguments(b2_tool, bucket_name, sample_filepath, tmp_path): +def test_header_arguments(b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path): + bucket_name = persistent_bucket_aggregate.bucket_name # yapf: disable args = [ '--cache-control', 'max-age=3600', @@ -3277,7 +3405,7 @@ def assert_expected(file_info, expected=expected_file_info): '--no-progress', bucket_name, str(sample_filepath), - 'sample_file', + f'{persistent_bucket_aggregate.subfolder}/sample_file', *args, '--info', 'b2-content-disposition=will-be-overwritten', From 0251d03846cc0e9391967b5cd402b6d29d387544 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 10:17:51 +0000 Subject: [PATCH 03/18] Adjust SSE-C¬ifcation rules tests to use const bucket \w subfolders --- test/integration/test_b2_command_line.py | 72 +++++++++++++----------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index 9dd43cb5..2f7ca9db 100755 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -1156,8 +1156,8 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ) -def test_sync_down(b2_tool, persistent_bucket_aggregate, sample_file): - sync_down_helper(b2_tool, persistent_bucket_aggregate.bucket_name, 'sync', sample_file) +def test_sync_down(b2_tool, bucket_name, sample_file): + sync_down_helper(b2_tool, bucket_name, 'sync', sample_file) def test_sync_down_no_prefix(b2_tool, bucket_name, sample_file): @@ -1256,13 +1256,9 @@ def sync_down_helper(b2_tool, bucket_name, folder_in_bucket, sample_file, encryp ) -def test_sync_copy(bucket_factory, b2_tool, persistent_bucket_aggregate, sample_file): +def test_sync_copy(bucket_factory, b2_tool, bucket_name, sample_file): prepare_and_run_sync_copy_tests( - bucket_factory, - b2_tool, - persistent_bucket_aggregate.bucket_name, - 'sync', - sample_file=sample_file + bucket_factory, b2_tool, bucket_name, 'sync', sample_file=sample_file ) @@ -1656,6 +1652,7 @@ def test_sse_c( b2_tool, persistent_bucket_aggregate, is_running_on_docker, sample_file, tmp_path, b2_uri_args ): bucket_name = persistent_bucket_aggregate.bucket_name + subfolder = persistent_bucket_aggregate.subfolder sse_c_key_id = 'user-generated-key-id \nąóźćż\nœøΩ≈ç\nßäöü' if is_running_on_docker: # TODO: fix this once we figure out how to pass env vars with \n in them to docker, docker-compose should work @@ -1673,7 +1670,7 @@ def test_sse_c( file_version_info = b2_tool.should_succeed_json( [ 'file', 'upload', '--no-progress', '--quiet', '--destination-server-side-encryption', - 'SSE-C', bucket_name, sample_file, 'uploaded_encrypted' + 'SSE-C', bucket_name, sample_file, f'{subfolder}/uploaded_encrypted' ], additional_env={ 'B2_DESTINATION_SSE_C_KEY_B64': base64.b64encode(secret).decode(), @@ -1692,7 +1689,7 @@ def test_sse_c( b2_tool.should_fail( [ - 'file', 'download', '--quiet', f'b2://{bucket_name}/uploaded_encrypted', + 'file', 'download', '--quiet', f'b2://{bucket_name}/{subfolder}/uploaded_encrypted', 'gonna_fail_anyway' ], expected_pattern='ERROR: The object was stored using a form of Server Side Encryption. The ' @@ -1701,7 +1698,7 @@ def test_sse_c( b2_tool.should_fail( [ 'file', 'download', '--quiet', '--source-server-side-encryption', 'SSE-C', - f'b2://{bucket_name}/uploaded_encrypted', 'gonna_fail_anyway' + f'b2://{bucket_name}/{subfolder}/uploaded_encrypted', 'gonna_fail_anyway' ], expected_pattern='ValueError: Using SSE-C requires providing an encryption key via ' 'B2_SOURCE_SSE_C_KEY_B64 env var' @@ -1709,7 +1706,7 @@ def test_sse_c( b2_tool.should_fail( [ 'file', 'download', '--quiet', '--source-server-side-encryption', 'SSE-C', - f'b2://{bucket_name}/uploaded_encrypted', 'gonna_fail_anyway' + f'b2://{bucket_name}/{subfolder}/uploaded_encrypted', 'gonna_fail_anyway' ], expected_pattern='ERROR: Wrong or no SSE-C key provided when reading a file.', additional_env={'B2_SOURCE_SSE_C_KEY_B64': base64.b64encode(os.urandom(32)).decode()} @@ -1723,7 +1720,7 @@ def test_sse_c( '--quiet', '--source-server-side-encryption', 'SSE-C', - f'b2://{bucket_name}/uploaded_encrypted', + f'b2://{bucket_name}/{subfolder}/uploaded_encrypted', dir_path / 'a', ], additional_env={'B2_SOURCE_SSE_C_KEY_B64': base64.b64encode(secret).decode()} @@ -1785,7 +1782,7 @@ def test_sse_c( '--source-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'not_encrypted_copied_from_encrypted_metadata_replace', + f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_replace', '--info', 'a=b', '--content-type', @@ -1800,7 +1797,7 @@ def test_sse_c( '--source-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'not_encrypted_copied_from_encrypted_metadata_replace_empty', + f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_replace_empty', '--no-info', '--content-type', 'text/plain', @@ -1814,7 +1811,7 @@ def test_sse_c( '--source-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'not_encrypted_copied_from_encrypted_metadata_pseudo_copy', + f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_pseudo_copy', '--fetch-metadata', ], additional_env={'B2_SOURCE_SSE_C_KEY_B64': base64.b64encode(secret).decode()} @@ -1827,7 +1824,7 @@ def test_sse_c( '--destination-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'encrypted_no_id_copied_from_encrypted', + f'{subfolder}/encrypted_no_id_copied_from_encrypted', '--fetch-metadata', ], additional_env={ @@ -1843,7 +1840,7 @@ def test_sse_c( '--destination-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'encrypted_with_id_copied_from_encrypted_metadata_replace', + f'{subfolder}/encrypted_with_id_copied_from_encrypted_metadata_replace', '--no-info', '--content-type', 'text/plain', @@ -1862,7 +1859,7 @@ def test_sse_c( '--destination-server-side-encryption=SSE-C', file_version_info['fileId'], bucket_name, - 'encrypted_with_id_copied_from_encrypted_metadata_pseudo_copy', + f'{subfolder}/encrypted_with_id_copied_from_encrypted_metadata_pseudo_copy', '--fetch-metadata', ], additional_env={ @@ -1872,12 +1869,14 @@ def test_sse_c( } ) list_of_files = b2_tool.should_succeed_json( - ['ls', '--json', '--recursive', *b2_uri_args(bucket_name)] + ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, subfolder)] ) + print(list_of_files, flush=True, file=sys.stderr) + should_equal( [ { - 'file_name': 'encrypted_no_id_copied_from_encrypted', + 'file_name': f'{subfolder}/encrypted_no_id_copied_from_encrypted', 'sse_c_key_id': 'missing_key', 'serverSideEncryption': { @@ -1888,8 +1887,10 @@ def test_sse_c( }, }, { - 'file_name': 'encrypted_with_id_copied_from_encrypted_metadata_pseudo_copy', - 'sse_c_key_id': 'another-user-generated-key-id', + 'file_name': + f'{subfolder}/encrypted_with_id_copied_from_encrypted_metadata_pseudo_copy', + 'sse_c_key_id': + 'another-user-generated-key-id', 'serverSideEncryption': { 'algorithm': 'AES256', @@ -1899,8 +1900,10 @@ def test_sse_c( }, }, { - 'file_name': 'encrypted_with_id_copied_from_encrypted_metadata_replace', - 'sse_c_key_id': 'another-user-generated-key-id', + 'file_name': + f'{subfolder}/encrypted_with_id_copied_from_encrypted_metadata_replace', + 'sse_c_key_id': + 'another-user-generated-key-id', 'serverSideEncryption': { 'algorithm': 'AES256', @@ -1910,28 +1913,32 @@ def test_sse_c( }, }, { - 'file_name': 'not_encrypted_copied_from_encrypted_metadata_pseudo_copy', - 'sse_c_key_id': 'missing_key', + 'file_name': + f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_pseudo_copy', + 'sse_c_key_id': + 'missing_key', 'serverSideEncryption': { 'mode': 'none', }, }, { - 'file_name': 'not_encrypted_copied_from_encrypted_metadata_replace', + 'file_name': f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_replace', 'sse_c_key_id': 'missing_key', 'serverSideEncryption': { 'mode': 'none', }, }, { - 'file_name': 'not_encrypted_copied_from_encrypted_metadata_replace_empty', - 'sse_c_key_id': 'missing_key', + 'file_name': + f'{subfolder}/not_encrypted_copied_from_encrypted_metadata_replace_empty', + 'sse_c_key_id': + 'missing_key', 'serverSideEncryption': { 'mode': 'none', }, }, { - 'file_name': 'uploaded_encrypted', + 'file_name': f'{subfolder}/uploaded_encrypted', 'sse_c_key_id': sse_c_key_id, 'serverSideEncryption': { @@ -3437,7 +3444,8 @@ def assert_expected(file_info, expected=expected_file_info): assert re.search(r'Expires: *Thu, 01 Dec 2050 16:00:00 GMT', download_output) -def test_notification_rules(b2_tool, bucket_name): +def test_notification_rules(b2_tool, persistent_bucket_aggregate): + bucket_name = persistent_bucket_aggregate.bucket_name auth_dict = b2_tool.should_succeed_json(['account', 'get']) if 'writeBucketNotifications' not in auth_dict['allowed']['capabilities']: pytest.skip('Test account does not have writeBucketNotifications capability') From b3c1b1178ca9b0adcb01137bb5610714e1e387bd Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 10:20:20 +0000 Subject: [PATCH 04/18] Remove account_info_file dependency in persistent bucket creation --- test/integration/conftest.py | 4 ++-- test/integration/persistent_bucket.py | 12 +++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 4f076613..94c2ce67 100755 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -412,8 +412,8 @@ def b2_uri_args(apiver_int): # -- Persistent bucket fixtures -- @pytest.fixture -def persistent_bucket(b2_api, account_info_file) -> Bucket: - return get_or_create_persistent_bucket(b2_api, account_info_file) +def persistent_bucket(b2_api) -> Bucket: + return get_or_create_persistent_bucket(b2_api) @pytest.fixture diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index 3a53907d..79896e62 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -12,11 +12,10 @@ import sys from dataclasses import dataclass from functools import cached_property -from pathlib import Path from test.integration.helpers import BUCKET_NAME_LENGTH, Api import backoff -from b2sdk.v2 import Bucket, SqliteAccountInfo +from b2sdk.v2 import Bucket from b2sdk.v2.exception import NonExistentBucket PERSISTENT_BUCKET_NAME_PREFIX = "constst" @@ -53,7 +52,7 @@ def cleanup_persistent_bucket(b2_api: Api): delete_all_files(bucket) -def get_persistent_bucket_name(b2_api: Api, account_info_file: Path) -> str: +def get_persistent_bucket_name(b2_api: Api) -> str: if "CI" in os.environ: # CI environment repo_id = os.environ.get("GITHUB_REPOSITORY_ID") @@ -62,14 +61,13 @@ def get_persistent_bucket_name(b2_api: Api, account_info_file: Path) -> str: bucket_hash = hashlib.sha256(repo_id.encode()).hexdigest() else: # Local development - account_info = SqliteAccountInfo(file_name=account_info_file) - bucket_hash = hashlib.sha256(account_info.get_account_id().encode()).hexdigest() + bucket_hash = hashlib.sha256(b2_api.account_id.encode()).hexdigest() return f"{PERSISTENT_BUCKET_NAME_PREFIX}-{bucket_hash}" [:BUCKET_NAME_LENGTH] -def get_or_create_persistent_bucket(b2_api: Api, account_info_file: Path) -> Bucket: - bucket_name = get_persistent_bucket_name(b2_api, account_info_file) +def get_or_create_persistent_bucket(b2_api: Api) -> Bucket: + bucket_name = get_persistent_bucket_name(b2_api) try: bucket = b2_api.api.get_bucket_by_name(bucket_name) except NonExistentBucket: From a171c1c0adcc277dd909308b65f9a6ffb33dab82 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 16:24:26 +0000 Subject: [PATCH 05/18] Clean up --- test/integration/persistent_bucket.py | 3 +-- test/integration/test_b2_command_line.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index 79896e62..37f3623c 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -9,7 +9,6 @@ ###################################################################### import hashlib import os -import sys from dataclasses import dataclass from functools import cached_property from test.integration.helpers import BUCKET_NAME_LENGTH, Api @@ -48,7 +47,7 @@ def cleanup_persistent_bucket(b2_api: Api): all_buckets = b2_api.api.list_buckets() for bucket in all_buckets: if bucket.name.startswith(PERSISTENT_BUCKET_NAME_PREFIX): - print(f"Deleting all files in bucket {bucket.name}", flush=True, file=sys.stderr) + print(f"Deleting all files in bucket {bucket.name}") delete_all_files(bucket) diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index 2f7ca9db..51e8757b 100755 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -1871,7 +1871,6 @@ def test_sse_c( list_of_files = b2_tool.should_succeed_json( ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, subfolder)] ) - print(list_of_files, flush=True, file=sys.stderr) should_equal( [ From d1b65f56e0ab9b1882d55748785387118b76392d Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 16:26:01 +0000 Subject: [PATCH 06/18] Refactor persistent bucket cleanup in tests: manual clear, remove auto-teardown, add error handling --- test/integration/cleanup_buckets.py | 10 ++++++++-- test/integration/conftest.py | 14 +++++--------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index 51d21652..84935d1c 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -7,10 +7,16 @@ # License https://www.backblaze.com/using_b2_code.html # ###################################################################### +from .persistent_bucket import get_or_create_persistent_bucket def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything - # set up - pass # b2_api calls b2_api.clean_buckets() in its finalizer + # set up. + # The persistent bucket is cleared manually now and not + # when tests tear down, as otherwise we'd lose the main benefit + # of a persistent bucket, whose identity is shared across tests. + persistent_bucket = get_or_create_persistent_bucket(b2_api) + b2_api.clean_bucket(persistent_bucket) + b2_api.api.list_buckets() diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 94c2ce67..43f7d4b4 100755 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -17,11 +17,13 @@ import sys import tempfile import uuid +from contextlib import suppress from os import environ, path from tempfile import TemporaryDirectory import pytest from b2sdk.v2 import B2_ACCOUNT_INFO_ENV_VAR, XDG_CONFIG_HOME_ENV_VAR, Bucket +from b2sdk.v2.exception import NonExistentBucket from b2._internal.version_listing import ( CLI_VERSIONS, @@ -437,12 +439,6 @@ def cleanup_persistent_bucket_subfolders( ): yield # Clean up all files in the persistent bucket after each test - bucket = b2_api.api.get_bucket_by_name(persistent_bucket_aggregate.bucket_name) - delete_files(bucket, persistent_bucket_aggregate.subfolder) - - -# @pytest.fixture(scope="session", autouse=True) -# def final_cleanup_persistent_buckets(b2_api, worker_id): -# yield -# if worker_id == "gw0": -# cleanup_persistent_bucket(b2_api) + with suppress(NonExistentBucket): + bucket = b2_api.api.get_bucket_by_name(persistent_bucket_aggregate.bucket_name) + delete_files(bucket, persistent_bucket_aggregate.subfolder) \ No newline at end of file From efda8b526d93182b906122c86e5d3d7b4fc19c9e Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 16:36:18 +0000 Subject: [PATCH 07/18] Add changelog --- changelog.d/+persistent_bucket.added.md | 1 + changelog.d/+persistent_bucket_aggregate.added.md | 1 + changelog.d/+persistent_bucket_utils.added.md | 1 + changelog.d/+update_integration_tests.changed.md | 1 + 4 files changed, 4 insertions(+) create mode 100644 changelog.d/+persistent_bucket.added.md create mode 100644 changelog.d/+persistent_bucket_aggregate.added.md create mode 100644 changelog.d/+persistent_bucket_utils.added.md create mode 100644 changelog.d/+update_integration_tests.changed.md diff --git a/changelog.d/+persistent_bucket.added.md b/changelog.d/+persistent_bucket.added.md new file mode 100644 index 00000000..af7e3e91 --- /dev/null +++ b/changelog.d/+persistent_bucket.added.md @@ -0,0 +1 @@ +Add persistent bucket fixtures for integration tests diff --git a/changelog.d/+persistent_bucket_aggregate.added.md b/changelog.d/+persistent_bucket_aggregate.added.md new file mode 100644 index 00000000..42cb9e4c --- /dev/null +++ b/changelog.d/+persistent_bucket_aggregate.added.md @@ -0,0 +1 @@ +Introduce PersistentBucketAggregate class to manage bucket name and subfolder diff --git a/changelog.d/+persistent_bucket_utils.added.md b/changelog.d/+persistent_bucket_utils.added.md new file mode 100644 index 00000000..5523f8a4 --- /dev/null +++ b/changelog.d/+persistent_bucket_utils.added.md @@ -0,0 +1 @@ +Add utility functions for managing persistent buckets diff --git a/changelog.d/+update_integration_tests.changed.md b/changelog.d/+update_integration_tests.changed.md new file mode 100644 index 00000000..9e3e9d26 --- /dev/null +++ b/changelog.d/+update_integration_tests.changed.md @@ -0,0 +1 @@ +Update integration tests to use persistent buckets From 1a9dba70747c257edb5032274ad56f1299c94c9b Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 16 Sep 2024 16:48:28 +0000 Subject: [PATCH 08/18] Add full-stops --- changelog.d/+persistent_bucket.added.md | 2 +- changelog.d/+persistent_bucket_aggregate.added.md | 2 +- changelog.d/+persistent_bucket_utils.added.md | 2 +- changelog.d/+update_integration_tests.changed.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/changelog.d/+persistent_bucket.added.md b/changelog.d/+persistent_bucket.added.md index af7e3e91..1c9b6bf7 100644 --- a/changelog.d/+persistent_bucket.added.md +++ b/changelog.d/+persistent_bucket.added.md @@ -1 +1 @@ -Add persistent bucket fixtures for integration tests +Add persistent bucket fixtures for integration tests. diff --git a/changelog.d/+persistent_bucket_aggregate.added.md b/changelog.d/+persistent_bucket_aggregate.added.md index 42cb9e4c..72c0e5fb 100644 --- a/changelog.d/+persistent_bucket_aggregate.added.md +++ b/changelog.d/+persistent_bucket_aggregate.added.md @@ -1 +1 @@ -Introduce PersistentBucketAggregate class to manage bucket name and subfolder +Introduce PersistentBucketAggregate class to manage bucket name and subfolder. diff --git a/changelog.d/+persistent_bucket_utils.added.md b/changelog.d/+persistent_bucket_utils.added.md index 5523f8a4..99d33bf3 100644 --- a/changelog.d/+persistent_bucket_utils.added.md +++ b/changelog.d/+persistent_bucket_utils.added.md @@ -1 +1 @@ -Add utility functions for managing persistent buckets +Add utility functions for managing persistent buckets. diff --git a/changelog.d/+update_integration_tests.changed.md b/changelog.d/+update_integration_tests.changed.md index 9e3e9d26..a039ded5 100644 --- a/changelog.d/+update_integration_tests.changed.md +++ b/changelog.d/+update_integration_tests.changed.md @@ -1 +1 @@ -Update integration tests to use persistent buckets +Update integration tests to use persistent buckets. From 7d18983d1f2a7ca2960f1665208ae925557951b6 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Tue, 17 Sep 2024 11:19:06 +0000 Subject: [PATCH 09/18] Format --- test/integration/conftest.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 43f7d4b4..fe42f85a 100755 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -425,20 +425,18 @@ def unique_subfolder(): @pytest.fixture -def persistent_bucket_aggregate(persistent_bucket, unique_subfolder) -> PersistentBucketAggregate: +def persistent_bucket_aggregate( + persistent_bucket, unique_subfolder, b2_api +) -> PersistentBucketAggregate: """ Since all consumers of the `bucket_name` fixture expect a new bucket to be created, we need to mirror this behavior by appending a unique subfolder to the persistent bucket name. """ - yield PersistentBucketAggregate(persistent_bucket.name, unique_subfolder) - - -@pytest.fixture(autouse=True) -def cleanup_persistent_bucket_subfolders( - persistent_bucket_aggregate: PersistentBucketAggregate, b2_api: Api -): - yield + persistent_bucket_aggregate = PersistentBucketAggregate( + persistent_bucket.name, unique_subfolder + ) + yield persistent_bucket_aggregate # Clean up all files in the persistent bucket after each test with suppress(NonExistentBucket): bucket = b2_api.api.get_bucket_by_name(persistent_bucket_aggregate.bucket_name) - delete_files(bucket, persistent_bucket_aggregate.subfolder) \ No newline at end of file + delete_files(bucket, persistent_bucket_aggregate.subfolder) From 2c05f31c6a9fb628f51799960ddf98dc1002fe2d Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Tue, 17 Sep 2024 11:19:40 +0000 Subject: [PATCH 10/18] Retry on duplicate bucket in persistent bucket get_or_create --- test/integration/persistent_bucket.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index 37f3623c..fc9b5273 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -15,7 +15,7 @@ import backoff from b2sdk.v2 import Bucket -from b2sdk.v2.exception import NonExistentBucket +from b2sdk.v2.exception import DuplicateBucketName, NonExistentBucket PERSISTENT_BUCKET_NAME_PREFIX = "constst" @@ -65,6 +65,12 @@ def get_persistent_bucket_name(b2_api: Api) -> str: return f"{PERSISTENT_BUCKET_NAME_PREFIX}-{bucket_hash}" [:BUCKET_NAME_LENGTH] +@backoff.on_exception( + backoff.expo, + DuplicateBucketName, + max_tries=3, + jitter=backoff.full_jitter, +) def get_or_create_persistent_bucket(b2_api: Api) -> Bucket: bucket_name = get_persistent_bucket_name(b2_api) try: From 01b8a69b13d5507149ba36e55eb1b98a3ea3eabf Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Thu, 19 Sep 2024 14:13:39 +0000 Subject: [PATCH 11/18] Improve changelog --- changelog.d/+persistent_bucket.added.md | 1 - changelog.d/+persistent_bucket_aggregate.added.md | 1 - changelog.d/+persistent_bucket_utils.added.md | 1 - changelog.d/+test_with_persistent_bucket.changed.md | 1 + changelog.d/+update_integration_tests.changed.md | 1 - 5 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 changelog.d/+persistent_bucket.added.md delete mode 100644 changelog.d/+persistent_bucket_aggregate.added.md delete mode 100644 changelog.d/+persistent_bucket_utils.added.md create mode 100644 changelog.d/+test_with_persistent_bucket.changed.md delete mode 100644 changelog.d/+update_integration_tests.changed.md diff --git a/changelog.d/+persistent_bucket.added.md b/changelog.d/+persistent_bucket.added.md deleted file mode 100644 index 1c9b6bf7..00000000 --- a/changelog.d/+persistent_bucket.added.md +++ /dev/null @@ -1 +0,0 @@ -Add persistent bucket fixtures for integration tests. diff --git a/changelog.d/+persistent_bucket_aggregate.added.md b/changelog.d/+persistent_bucket_aggregate.added.md deleted file mode 100644 index 72c0e5fb..00000000 --- a/changelog.d/+persistent_bucket_aggregate.added.md +++ /dev/null @@ -1 +0,0 @@ -Introduce PersistentBucketAggregate class to manage bucket name and subfolder. diff --git a/changelog.d/+persistent_bucket_utils.added.md b/changelog.d/+persistent_bucket_utils.added.md deleted file mode 100644 index 99d33bf3..00000000 --- a/changelog.d/+persistent_bucket_utils.added.md +++ /dev/null @@ -1 +0,0 @@ -Add utility functions for managing persistent buckets. diff --git a/changelog.d/+test_with_persistent_bucket.changed.md b/changelog.d/+test_with_persistent_bucket.changed.md new file mode 100644 index 00000000..39419d1b --- /dev/null +++ b/changelog.d/+test_with_persistent_bucket.changed.md @@ -0,0 +1 @@ +Improve internal testing infrastructure by updating integration tests to use persistent buckets. \ No newline at end of file diff --git a/changelog.d/+update_integration_tests.changed.md b/changelog.d/+update_integration_tests.changed.md deleted file mode 100644 index a039ded5..00000000 --- a/changelog.d/+update_integration_tests.changed.md +++ /dev/null @@ -1 +0,0 @@ -Update integration tests to use persistent buckets. From b5c5c1c6bd0e211843db5f04cd0bba90a668afe0 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Thu, 19 Sep 2024 14:14:38 +0000 Subject: [PATCH 12/18] Don't clean up the persistent bucket's subfolder after each test case --- test/integration/cleanup_buckets.py | 2 +- test/integration/conftest.py | 22 +++---------------- test/integration/persistent_bucket.py | 31 ++++----------------------- 3 files changed, 8 insertions(+), 47 deletions(-) diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index 84935d1c..921a0c06 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -14,9 +14,9 @@ def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything # set up. + pass # The persistent bucket is cleared manually now and not # when tests tear down, as otherwise we'd lose the main benefit # of a persistent bucket, whose identity is shared across tests. persistent_bucket = get_or_create_persistent_bucket(b2_api) b2_api.clean_bucket(persistent_bucket) - b2_api.api.list_buckets() diff --git a/test/integration/conftest.py b/test/integration/conftest.py index fe42f85a..bc4ef771 100755 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -17,13 +17,11 @@ import sys import tempfile import uuid -from contextlib import suppress from os import environ, path from tempfile import TemporaryDirectory import pytest from b2sdk.v2 import B2_ACCOUNT_INFO_ENV_VAR, XDG_CONFIG_HOME_ENV_VAR, Bucket -from b2sdk.v2.exception import NonExistentBucket from b2._internal.version_listing import ( CLI_VERSIONS, @@ -36,7 +34,6 @@ from .helpers import NODE_DESCRIPTION, RNG_SEED, Api, CommandLine, bucket_name_part, random_token from .persistent_bucket import ( PersistentBucketAggregate, - delete_files, get_or_create_persistent_bucket, ) @@ -413,11 +410,6 @@ def b2_uri_args(apiver_int): # -- Persistent bucket fixtures -- -@pytest.fixture -def persistent_bucket(b2_api) -> Bucket: - return get_or_create_persistent_bucket(b2_api) - - @pytest.fixture def unique_subfolder(): subfolder = f"test-{uuid.uuid4().hex[:8]}" @@ -425,18 +417,10 @@ def unique_subfolder(): @pytest.fixture -def persistent_bucket_aggregate( - persistent_bucket, unique_subfolder, b2_api -) -> PersistentBucketAggregate: +def persistent_bucket(unique_subfolder, b2_api) -> PersistentBucketAggregate: """ Since all consumers of the `bucket_name` fixture expect a new bucket to be created, we need to mirror this behavior by appending a unique subfolder to the persistent bucket name. """ - persistent_bucket_aggregate = PersistentBucketAggregate( - persistent_bucket.name, unique_subfolder - ) - yield persistent_bucket_aggregate - # Clean up all files in the persistent bucket after each test - with suppress(NonExistentBucket): - bucket = b2_api.api.get_bucket_by_name(persistent_bucket_aggregate.bucket_name) - delete_files(bucket, persistent_bucket_aggregate.subfolder) + persistent_bucket = get_or_create_persistent_bucket(b2_api) + yield PersistentBucketAggregate(persistent_bucket.name, unique_subfolder) diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index fc9b5273..15a231e3 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -30,38 +30,15 @@ def virtual_bucket_name(self): return f"{self.bucket_name}/{self.subfolder}" -@backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=10) -def delete_all_files(bucket: Bucket): - all_items = list(bucket.ls(recursive=True)) - for item, _ in all_items: - bucket.delete_file_version(item.id_, item.file_name) - - @backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=10) def delete_files(bucket: Bucket, subfolder: str): for file_version, _ in bucket.ls(recursive=True, folder_to_list=subfolder): bucket.delete_file_version(file_version.id_, file_version.file_name) -def cleanup_persistent_bucket(b2_api: Api): - all_buckets = b2_api.api.list_buckets() - for bucket in all_buckets: - if bucket.name.startswith(PERSISTENT_BUCKET_NAME_PREFIX): - print(f"Deleting all files in bucket {bucket.name}") - delete_all_files(bucket) - - def get_persistent_bucket_name(b2_api: Api) -> str: - if "CI" in os.environ: - # CI environment - repo_id = os.environ.get("GITHUB_REPOSITORY_ID") - if not repo_id: - raise ValueError("GITHUB_REPOSITORY_ID is not set") - bucket_hash = hashlib.sha256(repo_id.encode()).hexdigest() - else: - # Local development - bucket_hash = hashlib.sha256(b2_api.account_id.encode()).hexdigest() - + bucket_base = os.environ.get("GITHUB_REPOSITORY_ID", b2_api.account_id) + bucket_hash = hashlib.sha256(bucket_base.encode()).hexdigest() return f"{PERSISTENT_BUCKET_NAME_PREFIX}-{bucket_hash}" [:BUCKET_NAME_LENGTH] @@ -82,11 +59,11 @@ def get_or_create_persistent_bucket(b2_api: Api) -> Bucket: lifecycle_rules=[ { "daysFromHidingToDeleting": 1, - "daysFromUploadingToHiding": 14, + "daysFromUploadingToHiding": 1, "fileNamePrefix": "", } ], ) # add the new bucket name to the list of bucket names b2_api.bucket_name_log.append(bucket_name) - return bucket + return bucket \ No newline at end of file From a3b7485529b73a84e42a95f32d8302eee0b9d756 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Thu, 19 Sep 2024 14:16:56 +0000 Subject: [PATCH 13/18] Rename persistent bucket fixture --- test/integration/test_b2_command_line.py | 179 ++++++++++------------- 1 file changed, 76 insertions(+), 103 deletions(-) diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index 51e8757b..20636ab9 100755 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -271,23 +271,21 @@ def test_command_with_env_vars_reusing_existing_account_info( @pytest.fixture -def uploaded_sample_file(b2_tool, persistent_bucket_aggregate, sample_filepath): +def uploaded_sample_file(b2_tool, persistent_bucket, sample_filepath): return b2_tool.should_succeed_json( [ - 'file', 'upload', '--quiet', persistent_bucket_aggregate.bucket_name, - str(sample_filepath), 'sample_file' + 'file', 'upload', '--quiet', persistent_bucket.bucket_name, + str(sample_filepath), f'{persistent_bucket.subfolder}/sample_file' ] ) -def test_download( - b2_tool, persistent_bucket_aggregate, sample_filepath, uploaded_sample_file, tmp_path -): +def test_download(b2_tool, persistent_bucket, sample_filepath, uploaded_sample_file, tmp_path): output_a = tmp_path / 'a' b2_tool.should_succeed( [ 'file', 'download', '--quiet', - f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", + f"b2://{persistent_bucket.bucket_name}/{uploaded_sample_file['fileName']}", str(output_a) ] ) @@ -301,11 +299,9 @@ def test_download( assert output_b.read_text() == sample_filepath.read_text() -def test_basic( - b2_tool, persistent_bucket_aggregate, sample_file, tmp_path, b2_uri_args, apiver_int -): - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = f"{persistent_bucket_aggregate.subfolder}/" +def test_basic(b2_tool, persistent_bucket, sample_file, tmp_path, b2_uri_args, apiver_int): + bucket_name = persistent_bucket.bucket_name + subfolder = f"{persistent_bucket.subfolder}/" file_mod_time_str = str(file_mod_time_millis(sample_file)) file_data = read_file(sample_file) @@ -385,9 +381,7 @@ def test_basic( ], [f['fileName'] for f in list_of_files] ) - b2_tool.should_succeed( - ['file', 'unhide', f'b2://{persistent_bucket_aggregate.virtual_bucket_name}/c'] - ) + b2_tool.should_succeed(['file', 'unhide', f'b2://{persistent_bucket.virtual_bucket_name}/c']) list_of_files = b2_tool.should_succeed_json( ['ls', '--json', '--recursive', *b2_uri_args(bucket_name, f'{subfolder}')] @@ -509,9 +503,9 @@ def test_basic( b2_tool.should_succeed(['file', 'url', f"b2id://{second_c_version['fileId']}"]) b2_tool.should_succeed( - ['file', 'url', f"b2://{persistent_bucket_aggregate.virtual_bucket_name}/any-file-name"], + ['file', 'url', f"b2://{persistent_bucket.virtual_bucket_name}/any-file-name"], '^https://.*/file/{}/{}\r?$'.format( - persistent_bucket_aggregate.virtual_bucket_name, + persistent_bucket.virtual_bucket_name, 'any-file-name', ), ) # \r? is for Windows, as $ doesn't match \r\n @@ -526,13 +520,13 @@ def test_ls_b2id(b2_tool, uploaded_sample_file): @pytest.mark.apiver(from_ver=4) -def test_rm_b2id(b2_tool, persistent_bucket_aggregate, uploaded_sample_file): +def test_rm_b2id(b2_tool, persistent_bucket, uploaded_sample_file): # remove the file by id b2_tool.should_succeed(['rm', f"b2id://{uploaded_sample_file['fileId']}"]) # check that the file is gone b2_tool.should_succeed( - ['ls', f'b2://{persistent_bucket_aggregate.bucket_name}'], + ['ls', f'b2://{persistent_bucket.bucket_name}'], expected_pattern='^$', ) @@ -592,7 +586,7 @@ def test_debug_logs(b2_tool, is_running_on_docker, tmp_path): assert re.search(log_file_regex, log), log -def test_bucket(b2_tool, persistent_bucket_aggregate): +def test_bucket(b2_tool, persistent_bucket): rule = """{ "daysFromHidingToDeleting": 1, "daysFromUploadingToHiding": null, @@ -600,7 +594,7 @@ def test_bucket(b2_tool, persistent_bucket_aggregate): }""" output = b2_tool.should_succeed_json( [ - 'bucket', 'update', '--lifecycle-rule', rule, persistent_bucket_aggregate.bucket_name, + 'bucket', 'update', '--lifecycle-rule', rule, persistent_bucket.bucket_name, 'allPublic', *b2_tool.get_bucket_info_args() ], ) @@ -621,16 +615,9 @@ def test_bucket(b2_tool, persistent_bucket_aggregate): ] -def test_key_restrictions( - b2_tool, persistent_bucket_aggregate, sample_file, bucket_factory, b2_uri_args -): +def test_key_restrictions(b2_tool, bucket_name, sample_file, bucket_factory, b2_uri_args): # A single file for rm to fail on. - b2_tool.should_succeed( - [ - 'file', 'upload', '--no-progress', persistent_bucket_aggregate.bucket_name, sample_file, - 'test' - ] - ) + b2_tool.should_succeed(['file', 'upload', '--no-progress', bucket_name, sample_file, 'test']) key_one_name = 'clt-testKey-01' + random_hex(6) created_key_stdout = b2_tool.should_succeed( @@ -647,7 +634,7 @@ def test_key_restrictions( ['account', 'authorize', '--environment', b2_tool.realm, key_one_id, key_one], ) - b2_tool.should_succeed(['bucket', 'get', persistent_bucket_aggregate.bucket_name],) + b2_tool.should_succeed(['bucket', 'get', bucket_name],) second_bucket_name = bucket_factory().name b2_tool.should_succeed(['bucket', 'get', second_bucket_name],) @@ -657,7 +644,7 @@ def test_key_restrictions( 'key', 'create', '--bucket', - persistent_bucket_aggregate.bucket_name, + bucket_name, key_two_name, 'listFiles,listBuckets,readFiles', ] @@ -672,7 +659,7 @@ def test_key_restrictions( [ 'create-key', '--bucket', - persistent_bucket_aggregate.bucket_name, + bucket_name, key_three_name, 'listFiles,listBuckets,readFiles', ], @@ -683,8 +670,8 @@ def test_key_restrictions( b2_tool.should_succeed( ['account', 'authorize', '--environment', b2_tool.realm, key_two_id, key_two], ) - b2_tool.should_succeed(['bucket', 'get', persistent_bucket_aggregate.bucket_name],) - b2_tool.should_succeed(['ls', *b2_uri_args(persistent_bucket_aggregate.bucket_name)],) + b2_tool.should_succeed(['bucket', 'get', bucket_name],) + b2_tool.should_succeed(['ls', *b2_uri_args(bucket_name)],) b2_tool.should_succeed( ['account', 'authorize', '--environment', b2_tool.realm, key_three_id, key_three], @@ -695,21 +682,18 @@ def test_key_restrictions( failed_bucket_err = r'Deletion of file "test" \([^\)]+\) failed: unauthorized for ' \ r'application key with capabilities ' \ r"'(.*listFiles.*|.*listBuckets.*|.*readFiles.*){3}', " \ - r"restricted to bucket '%s' \(unauthorized\)" % persistent_bucket_aggregate.bucket_name + r"restricted to bucket '%s' \(unauthorized\)" % bucket_name b2_tool.should_fail( - [ - 'rm', '--recursive', '--no-progress', - *b2_uri_args(persistent_bucket_aggregate.bucket_name) - ], failed_bucket_err + ['rm', '--recursive', '--no-progress', *b2_uri_args(bucket_name)], failed_bucket_err ) - failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name + failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name b2_tool.should_fail(['bucket', 'get', second_bucket_name], failed_bucket_err) - failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name + failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name b2_tool.should_fail(['ls', *b2_uri_args(second_bucket_name)], failed_list_files_err) - failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + persistent_bucket_aggregate.bucket_name + failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name b2_tool.should_fail(['rm', *b2_uri_args(second_bucket_name)], failed_list_files_err) # reauthorize with more capabilities for clean up @@ -894,21 +878,19 @@ def encryption_summary(sse_dict, file_info): "dir_, encryption", [('sync', None), ('sync', SSE_B2_AES), ('sync', SSE_C_AES), ('', None)], ) -def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir_, encryption): - # persistent_bucket_aggregate.subfolder = persistent_bucket_aggregate.subfolder + random_hex(6) +def test_sync_up(tmp_path, b2_tool, persistent_bucket, apiver_int, dir_, encryption): + # persistent_bucket.subfolder = persistent_bucket.subfolder + random_hex(6) - sync_point_parts = [ - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder - ] + sync_point_parts = [persistent_bucket.bucket_name, persistent_bucket.subfolder] if dir_: sync_point_parts.append(dir_) - prefix = f'{persistent_bucket_aggregate.subfolder}/{dir_}/' + prefix = f'{persistent_bucket.subfolder}/{dir_}/' else: - prefix = persistent_bucket_aggregate.subfolder + '/' + prefix = persistent_bucket.subfolder + '/' b2_sync_point = 'b2:' + '/'.join(sync_point_parts) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([], file_version_summary(file_versions)) @@ -919,7 +901,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir # simulate action (nothing should be uploaded) b2_tool.should_succeed(['sync', '--no-progress', '--dry-run', tmp_path, b2_sync_point]) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([], file_version_summary(file_versions)) @@ -976,7 +958,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir assert re.search(r'd[\'"]? could not be accessed', stdout) assert status == (1 if apiver_int >= 4 else 0) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( @@ -1009,7 +991,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir assert re.search(r'd[\'"]? could not be accessed', stdout) assert status == (1 if apiver_int >= 4 else 0) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( [ @@ -1026,7 +1008,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir b2_tool.should_succeed(['sync', '--no-progress', '--delete', tmp_path, b2_sync_point]) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([ '+ ' + prefix + 'c', @@ -1043,7 +1025,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ] ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([ '+ ' + prefix + 'c', @@ -1057,7 +1039,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ] ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([ '+ ' + prefix + 'c', @@ -1075,7 +1057,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ] ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal([ '+ ' + prefix + 'c', @@ -1090,7 +1072,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ] ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( [ @@ -1109,7 +1091,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ['sync', '--no-progress', '--exclude-if-modified-after', mod_time, tmp_path, b2_sync_point] ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( [ @@ -1127,7 +1109,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir ['sync', '--no-progress', '--exclude-all-symlinks', tmp_path, b2_sync_point], ) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( [ @@ -1142,7 +1124,7 @@ def test_sync_up(tmp_path, b2_tool, persistent_bucket_aggregate, apiver_int, dir # confirm symlink target is uploaded (with symlink's name) b2_tool.should_succeed(['sync', '--no-progress', tmp_path, b2_sync_point]) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( [ @@ -1486,11 +1468,11 @@ def run_sync_copy_with_basic_checks( raise NotImplementedError(destination_encryption) -def test_sync_long_path(tmp_path, b2_tool, persistent_bucket_aggregate): +def test_sync_long_path(tmp_path, b2_tool, persistent_bucket): """ test sync with very long path (overcome windows 260 character limit) """ - b2_sync_point = f'b2://{persistent_bucket_aggregate.virtual_bucket_name}' + b2_sync_point = f'b2://{persistent_bucket.virtual_bucket_name}' long_path = '/'.join( ( @@ -1508,11 +1490,10 @@ def test_sync_long_path(tmp_path, b2_tool, persistent_bucket_aggregate): b2_tool.should_succeed(['sync', '--no-progress', '--delete', str(tmp_path), b2_sync_point]) file_versions = b2_tool.list_file_versions( - persistent_bucket_aggregate.bucket_name, persistent_bucket_aggregate.subfolder + persistent_bucket.bucket_name, persistent_bucket.subfolder ) should_equal( - [f'+ {persistent_bucket_aggregate.subfolder}/{long_path}'], - file_version_summary(file_versions) + [f'+ {persistent_bucket.subfolder}/{long_path}'], file_version_summary(file_versions) ) @@ -1561,9 +1542,9 @@ def test_default_sse_b2__create_bucket(b2_tool, schedule_bucket_cleanup): should_equal(second_bucket_default_sse, second_bucket_info['defaultServerSideEncryption']) -def test_sse_b2(b2_tool, persistent_bucket_aggregate, sample_file, tmp_path, b2_uri_args): - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = persistent_bucket_aggregate.subfolder +def test_sse_b2(b2_tool, persistent_bucket, sample_file, tmp_path, b2_uri_args): + bucket_name = persistent_bucket.bucket_name + subfolder = persistent_bucket.subfolder b2_tool.should_succeed( [ 'file', 'upload', '--destination-server-side-encryption=SSE-B2', '--quiet', bucket_name, @@ -1649,10 +1630,10 @@ def test_sse_b2(b2_tool, persistent_bucket_aggregate, sample_file, tmp_path, b2_ def test_sse_c( - b2_tool, persistent_bucket_aggregate, is_running_on_docker, sample_file, tmp_path, b2_uri_args + b2_tool, persistent_bucket, is_running_on_docker, sample_file, tmp_path, b2_uri_args ): - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = persistent_bucket_aggregate.subfolder + bucket_name = persistent_bucket.bucket_name + subfolder = persistent_bucket.subfolder sse_c_key_id = 'user-generated-key-id \nąóźćż\nœøΩ≈ç\nßäöü' if is_running_on_docker: # TODO: fix this once we figure out how to pass env vars with \n in them to docker, docker-compose should work @@ -3241,11 +3222,9 @@ def _assert_file_lock_configuration( assert legal_hold == actual_legal_hold -def test_upload_file__custom_upload_time( - b2_tool, persistent_bucket_aggregate, sample_file, b2_uri_args -): - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = persistent_bucket_aggregate.subfolder +def test_upload_file__custom_upload_time(b2_tool, persistent_bucket, sample_file, b2_uri_args): + bucket_name = persistent_bucket.bucket_name + subfolder = persistent_bucket.subfolder file_data = read_file(sample_file) cut = 12345 cut_printable = '1970-01-01 00:00:12' @@ -3280,12 +3259,10 @@ def test_upload_file__custom_upload_time( @skip_on_windows -def test_upload_file__stdin_pipe_operator( - request, bash_runner, b2_tool, persistent_bucket_aggregate -): +def test_upload_file__stdin_pipe_operator(request, bash_runner, b2_tool, persistent_bucket): """Test `file upload` from stdin using pipe operator.""" - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = persistent_bucket_aggregate.subfolder + bucket_name = persistent_bucket.bucket_name + subfolder = persistent_bucket.subfolder content = request.node.name run = bash_runner( f'echo -n {content!r} ' @@ -3297,11 +3274,11 @@ def test_upload_file__stdin_pipe_operator( @skip_on_windows def test_upload_unbound_stream__redirect_operator( - request, bash_runner, b2_tool, persistent_bucket_aggregate, is_running_on_docker + request, bash_runner, b2_tool, persistent_bucket, is_running_on_docker ): """Test upload-unbound-stream from stdin using redirect operator.""" - bucket_name = persistent_bucket_aggregate.bucket_name - subfolder = persistent_bucket_aggregate.subfolder + bucket_name = persistent_bucket.bucket_name + subfolder = persistent_bucket.subfolder if is_running_on_docker: pytest.skip('Not supported on Docker') content = request.node.name @@ -3313,13 +3290,12 @@ def test_upload_unbound_stream__redirect_operator( def test_download_file_stdout( - b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file + b2_tool, persistent_bucket, sample_filepath, tmp_path, uploaded_sample_file ): assert b2_tool.should_succeed( [ 'file', 'download', '--quiet', - f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", - '-' + f"b2://{persistent_bucket.bucket_name}/{uploaded_sample_file['fileName']}", '-' ], ) == sample_filepath.read_text() assert b2_tool.should_succeed( @@ -3328,11 +3304,12 @@ def test_download_file_stdout( def test_download_file_to_directory( - b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file + b2_tool, persistent_bucket, sample_filepath, tmp_path, uploaded_sample_file ): - downloads_directory = 'downloads' + downloads_directory = 'downloads/' target_directory = tmp_path / downloads_directory target_directory.mkdir() + (target_directory / persistent_bucket.subfolder).mkdir() filename_as_path = pathlib.Path(uploaded_sample_file['fileName']) sample_file_content = sample_filepath.read_text() @@ -3341,7 +3318,7 @@ def test_download_file_to_directory( 'file', 'download', '--quiet', - f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}", + f"b2://{persistent_bucket.bucket_name}/{uploaded_sample_file['fileName']}", str(target_directory), ], ) @@ -3369,19 +3346,16 @@ def test_download_file_to_directory( f'{new_files}, {new_files[0].read_text()}, {sample_file_content}' -def test_cat(b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path, uploaded_sample_file): +def test_cat(b2_tool, persistent_bucket, sample_filepath, tmp_path, uploaded_sample_file): assert b2_tool.should_succeed( - [ - 'file', 'cat', - f"b2://{persistent_bucket_aggregate.bucket_name}/{uploaded_sample_file['fileName']}" - ], + ['file', 'cat', f"b2://{persistent_bucket.bucket_name}/{uploaded_sample_file['fileName']}"], ) == sample_filepath.read_text() assert b2_tool.should_succeed(['file', 'cat', f"b2id://{uploaded_sample_file['fileId']}"] ) == sample_filepath.read_text() -def test_header_arguments(b2_tool, persistent_bucket_aggregate, sample_filepath, tmp_path): - bucket_name = persistent_bucket_aggregate.bucket_name +def test_header_arguments(b2_tool, persistent_bucket, sample_filepath, tmp_path): + bucket_name = persistent_bucket.bucket_name # yapf: disable args = [ '--cache-control', 'max-age=3600', @@ -3411,7 +3385,7 @@ def assert_expected(file_info, expected=expected_file_info): '--no-progress', bucket_name, str(sample_filepath), - f'{persistent_bucket_aggregate.subfolder}/sample_file', + f'{persistent_bucket.subfolder}/sample_file', *args, '--info', 'b2-content-disposition=will-be-overwritten', @@ -3428,7 +3402,7 @@ def assert_expected(file_info, expected=expected_file_info): copied_version = b2_tool.should_succeed_json( [ 'file', 'copy-by-id', '--quiet', *args, '--content-type', 'text/plain', - file_version['fileId'], bucket_name, 'copied_file' + file_version['fileId'], bucket_name, f'{persistent_bucket.subfolder}/copied_file' ] ) assert_expected(copied_version['fileInfo']) @@ -3443,8 +3417,7 @@ def assert_expected(file_info, expected=expected_file_info): assert re.search(r'Expires: *Thu, 01 Dec 2050 16:00:00 GMT', download_output) -def test_notification_rules(b2_tool, persistent_bucket_aggregate): - bucket_name = persistent_bucket_aggregate.bucket_name +def test_notification_rules(b2_tool, bucket_name): auth_dict = b2_tool.should_succeed_json(['account', 'get']) if 'writeBucketNotifications' not in auth_dict['allowed']['capabilities']: pytest.skip('Test account does not have writeBucketNotifications capability') From c4413d45cefd03ba8eb5c56fb780920c154e76b9 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Thu, 19 Sep 2024 16:31:04 +0000 Subject: [PATCH 14/18] Delete forgotten pass stmt --- test/integration/cleanup_buckets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index 921a0c06..edd2d77f 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -14,7 +14,6 @@ def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything # set up. - pass # The persistent bucket is cleared manually now and not # when tests tear down, as otherwise we'd lose the main benefit # of a persistent bucket, whose identity is shared across tests. From fc2a907d73299d867e432c77f9251554c4593d20 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 23 Sep 2024 12:21:04 +0000 Subject: [PATCH 15/18] Change changelog catregory --- ....changed.md => +test_with_persistent_bucket.infrastructure.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{+test_with_persistent_bucket.changed.md => +test_with_persistent_bucket.infrastructure.md} (100%) diff --git a/changelog.d/+test_with_persistent_bucket.changed.md b/changelog.d/+test_with_persistent_bucket.infrastructure.md similarity index 100% rename from changelog.d/+test_with_persistent_bucket.changed.md rename to changelog.d/+test_with_persistent_bucket.infrastructure.md From 6c844fe5b39788cf2acb280fdc05804bebb25991 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 23 Sep 2024 12:22:38 +0000 Subject: [PATCH 16/18] Don't clean up persistent bucket after on teardown --- test/integration/cleanup_buckets.py | 7 +------ test/integration/persistent_bucket.py | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index edd2d77f..8e4ae2f0 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -7,15 +7,10 @@ # License https://www.backblaze.com/using_b2_code.html # ###################################################################### -from .persistent_bucket import get_or_create_persistent_bucket def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything # set up. - # The persistent bucket is cleared manually now and not - # when tests tear down, as otherwise we'd lose the main benefit - # of a persistent bucket, whose identity is shared across tests. - persistent_bucket = get_or_create_persistent_bucket(b2_api) - b2_api.clean_bucket(persistent_bucket) + pass \ No newline at end of file diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index 15a231e3..0a1ea196 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -37,7 +37,7 @@ def delete_files(bucket: Bucket, subfolder: str): def get_persistent_bucket_name(b2_api: Api) -> str: - bucket_base = os.environ.get("GITHUB_REPOSITORY_ID", b2_api.account_id) + bucket_base = os.environ.get("GITHUB_REPOSITORY_ID", b2_api.api.get_account_id()) bucket_hash = hashlib.sha256(bucket_base.encode()).hexdigest() return f"{PERSISTENT_BUCKET_NAME_PREFIX}-{bucket_hash}" [:BUCKET_NAME_LENGTH] From 6de2bf79cc25fca6cad30bdb358d294edcf3566e Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Tue, 24 Sep 2024 13:04:37 +0000 Subject: [PATCH 17/18] Revert changes to cleanup_buckets.py --- test/integration/cleanup_buckets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index 8e4ae2f0..51d21652 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -12,5 +12,5 @@ def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything - # set up. - pass \ No newline at end of file + # set up + pass # b2_api calls b2_api.clean_buckets() in its finalizer From 0c99247394d97686e8b853abb38dda011d1730d0 Mon Sep 17 00:00:00 2001 From: kris-konina-reef Date: Mon, 30 Sep 2024 16:49:13 +0200 Subject: [PATCH 18/18] Clean dead code --- test/integration/persistent_bucket.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/integration/persistent_bucket.py b/test/integration/persistent_bucket.py index 0a1ea196..b67c45ba 100644 --- a/test/integration/persistent_bucket.py +++ b/test/integration/persistent_bucket.py @@ -30,12 +30,6 @@ def virtual_bucket_name(self): return f"{self.bucket_name}/{self.subfolder}" -@backoff.on_exception(backoff.expo, Exception, max_tries=3, max_time=10) -def delete_files(bucket: Bucket, subfolder: str): - for file_version, _ in bucket.ls(recursive=True, folder_to_list=subfolder): - bucket.delete_file_version(file_version.id_, file_version.file_name) - - def get_persistent_bucket_name(b2_api: Api) -> str: bucket_base = os.environ.get("GITHUB_REPOSITORY_ID", b2_api.api.get_account_id()) bucket_hash = hashlib.sha256(bucket_base.encode()).hexdigest()