diff --git a/CMakeLists.txt b/CMakeLists.txt index 47c289c4..d9bf009d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,7 +33,7 @@ if (NOT CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE) message(STATUS "Setting unspecified CMAKE_BUILD_TYPE to '${CMAKE_BUILD_TYPE}'") endif() -IRODS_MACRO_CHECK_DEPENDENCY_SET_FULLPATH_ADD_TO_IRODS_PACKAGE_DEPENDENCIES_LIST(S3 libs3e4674774-0) +IRODS_MACRO_CHECK_DEPENDENCY_SET_FULLPATH_ADD_TO_IRODS_PACKAGE_DEPENDENCIES_LIST(S3 libs3c0e278d2-0) string(REPLACE ";" ", " IRODS_PACKAGE_DEPENDENCIES_STRING "${IRODS_PACKAGE_DEPENDENCIES_LIST}") set(CMAKE_CXX_COMPILER ${IRODS_EXTERNALS_FULLPATH_CLANG}/bin/clang++) diff --git a/irods_consortium_continuous_integration_build_hook.py b/irods_consortium_continuous_integration_build_hook.py index ca61e1b1..71160737 100644 --- a/irods_consortium_continuous_integration_build_hook.py +++ b/irods_consortium_continuous_integration_build_hook.py @@ -9,26 +9,53 @@ import irods_python_ci_utilities +def update_local_package_repositories(): + # Updating via dnf or yum actually upgrades packages, so don't do anything in those cases (for now). + dispatch_map = { + 'Ubuntu': ['sudo', 'apt-get', 'update'], + 'Centos': None, + 'Centos linux': None, + 'Almalinux': None, + 'Opensuse ': None, + 'Debian gnu_linux': ['sudo', 'apt-get', 'update'] + } + try: + cmd = dispatch_map[irods_python_ci_utilities.get_distribution()] + if cmd: + irods_python_ci_utilities.subprocess_get_output(cmd, check_rc=True) + except KeyError: + irods_python_ci_utilities.raise_not_implemented_for_distribution() def add_cmake_to_front_of_path(): cmake_path = '/opt/irods-externals/cmake3.11.4-0/bin' os.environ['PATH'] = os.pathsep.join([cmake_path, os.environ['PATH']]) def install_building_dependencies(externals_directory): - externals_list = ['irods-externals-cmake3.11.4-0', - 'irods-externals-avro1.9.0-0', - 'irods-externals-boost1.67.0-0', - 'irods-externals-clang-runtime6.0-0', - 'irods-externals-clang6.0-0', - 'irods-externals-cppzmq4.2.3-0', - 'irods-externals-json3.7.3-0', - 'irods-externals-libarchive3.3.2-1', - 'irods-externals-libs3e4197a5e-0', - 'irods-externals-zeromq4-14.1.6-0'] + # The externals_list needs to include all dependencies, not the minimum set required for this plugin. If custom + # externals are being supplied via externals_directory, only the externals packages which exist in that directory + # will be installed. + externals_list = [ + 'irods-externals-avro1.9.0-0', + 'irods-externals-boost1.67.0-0', + 'irods-externals-catch22.3.0-0', + 'irods-externals-clang-runtime6.0-0', + 'irods-externals-clang6.0-0', + 'irods-externals-cmake3.11.4-0', + 'irods-externals-cppzmq4.2.3-0', + 'irods-externals-fmt6.1.2-1', + 'irods-externals-json3.7.3-0', + 'irods-externals-libarchive3.3.2-1', + 'irods-externals-libs3c0e278d2-0', + 'irods-externals-nanodbc2.13.0-1', + 'irods-externals-spdlog1.5.0-1', + 'irods-externals-zeromq4-14.1.6-0' + ] if externals_directory == 'None' or externals_directory is None: irods_python_ci_utilities.install_irods_core_dev_repository() irods_python_ci_utilities.install_os_packages(externals_list) else: + # Make sure the local package repositories are up to date so package dependencies can also be installed. + update_local_package_repositories() package_suffix = irods_python_ci_utilities.get_package_suffix() os_specific_directory = irods_python_ci_utilities.append_os_specific_directory(externals_directory) externals = [] @@ -39,12 +66,7 @@ def install_building_dependencies(externals_directory): install_os_specific_dependencies() def install_os_specific_dependencies_apt(): - if irods_python_ci_utilities.get_distribution() == 'Ubuntu': # cmake from externals requires newer libstdc++ on ub12 - irods_python_ci_utilities.subprocess_get_output(['sudo', 'apt-get', 'update'], check_rc=True) - if irods_python_ci_utilities.get_distribution_version_major() == '12': - irods_python_ci_utilities.install_os_packages(['python-software-properties']) - irods_python_ci_utilities.subprocess_get_output(['sudo', 'add-apt-repository', '-y', 'ppa:ubuntu-toolchain-r/test'], check_rc=True) - irods_python_ci_utilities.install_os_packages(['libstdc++6']) + update_local_package_repositories() irods_python_ci_utilities.install_os_packages(['make', 'libssl-dev', 'libxml2-dev', 'libcurl4-gnutls-dev', 'gcc']) def install_os_specific_dependencies_yum(): diff --git a/irods_consortium_continuous_integration_test_hook.py b/irods_consortium_continuous_integration_test_hook.py index b14e7e09..723ea462 100644 --- a/irods_consortium_continuous_integration_test_hook.py +++ b/irods_consortium_continuous_integration_test_hook.py @@ -3,6 +3,7 @@ import glob import optparse import os +import pwd import random import shutil import stat @@ -27,11 +28,15 @@ def download_and_start_minio_server(): root_username = ''.join(random.choice(string.ascii_letters) for i in list(range(10))) root_password = ''.join(random.choice(string.ascii_letters) for i in list(range(10))) + keypair_path = '/var/lib/irods/minio.keypair' - with open('/var/lib/irods/minio.keypair', 'w') as f: + with open(keypair_path, 'w') as f: f.write('%s\n' % root_username) f.write('%s\n' % root_password) + irods_user_info = pwd.getpwnam('irods') + os.chown(keypair_path, irods_user_info.pw_uid, irods_user_info.pw_gid) + os.environ['MINIO_ROOT_USER'] = root_username os.environ['MINIO_ROOT_PASSWORD'] = root_password diff --git a/packaging/resource_suite_s3_cache.py b/packaging/resource_suite_s3_cache.py index 22301e86..7a5485c1 100644 --- a/packaging/resource_suite_s3_cache.py +++ b/packaging/resource_suite_s3_cache.py @@ -598,6 +598,281 @@ def test_multiple_s3_endpoints_replication_issue1858(self): self.admin.assert_icommand('iadmin rmresc s3archive2_1858') + def test_itouch_nonexistent_file__issue_6479(self): + replica_number_in_s3 = 1 + filename = 'test_itouch_nonexistent_file__issue_6479' + logical_path = os.path.join(self.user0.session_collection, filename) + + try: + # Just itouch and ensure that the data object is created successfully. + self.user0.assert_icommand(['itouch', logical_path]) + self.assertTrue(lib.replica_exists(self.user0, logical_path, 0)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Trim the replica in cache so that we know we are working with the S3 replica. + self.user0.assert_icommand(['itrim', '-N1', '-n0', logical_path], 'STDOUT') + self.assertFalse(lib.replica_exists(self.user0, logical_path, 0)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + # Ensure that the replica actually exists. A new replica will be made on the cache resource. + self.user0.assert_icommand(['iget', logical_path, '-']) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, 2)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_istream_nonexistent_file__issue_6479(self): + replica_number_in_s3 = 1 + filename = 'test_istream_nonexistent_file__issue_6479' + logical_path = os.path.join(self.user0.session_collection, filename) + content = 'streamin and screamin' + + try: + # istream to a new data object and ensure that it is created successfully. + self.user0.assert_icommand(['istream', 'write', logical_path], input=content) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + self.assertTrue(lib.replica_exists(self.user0, logical_path, 0)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # Trim the replica in cache so that we know we are working with the S3 replica. + self.user0.assert_icommand(['itrim', '-N1', '-n0', logical_path], 'STDOUT') + self.assertFalse(lib.replica_exists(self.user0, logical_path, 0)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + # Ensure that the replica actually contains the contents streamed into it. A new replica will be made on + # the cache resource. + self.user0.assert_icommand(['iget', logical_path, '-'], 'STDOUT', content) + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + self.assertTrue(lib.replica_exists(self.user0, logical_path, 2)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_with_invalid_secret_key_and_overwrite__issue_6154(self): + replica_number_in_s3 = 1 + filename = 'test_iput_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + + try: + lib.make_file(physical_path, file_size_in_bytes) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Put the physical file, which should fail, leaving a data object with a stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['iput', physical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Now overwrite the data object with wild success. This is here to ensure that things are back to normal. + self.user0.assert_icommand(['iput', '-f', physical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_with_invalid_secret_key_and_remove__issue_6154(self): + replica_number_in_s3 = 1 + filename = 'test_iput_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + + try: + lib.make_file(physical_path, file_size_in_bytes) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Put the physical file, which should fail, leaving a data object with a stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['iput', physical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Attempt to remove the data object, which fails due to the invalid secret key. + self.user0.assert_icommand(['irm', '-f', logical_path], 'STDERR', 'S3_FILE_UNLINK_ERR') + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Attempt to remove the data object, which succeeds because the S3 object doesn't exist anyway. + self.user0.assert_icommand(['irm', '-f', logical_path]) + self.assertFalse(lib.replica_exists(self.user0, logical_path, 0)) + self.assertFalse(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_and_replicate_with_invalid_secret_key__issue_6154(self): + replica_number_in_s3 = 2 + filename = 'test_iput_and_replicate_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + test_resc = 'test_resc' + + try: + lib.create_ufs_resource(test_resc, self.admin) + + lib.make_file(physical_path, file_size_in_bytes) + + # Put the physical file to the test resource. The test will replicate to S3. + self.user0.assert_icommand(['iput', '-R', test_resc, physical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Replicate the data object to the compound resource hierarchy. The replica in the cache should be good + # and the replica in the archive should be stale due to the invalid secret key. + self.user0.assert_icommand(['irepl', logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 1)) + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Trim the cache replica so that we can try the replication again with a good secret key. + self.user0.assert_icommand(['itrim', '-N1', '-n1', logical_path], 'STDOUT') + self.user0.assert_icommand(['itrim', '-N1', '-n', str(replica_number_in_s3), logical_path], 'STDOUT') + self.assertFalse(lib.replica_exists(self.user0, logical_path, 1)) + self.assertFalse(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + # Now replicate with a good set of S3 keys and watch for success. + self.user0.assert_icommand(['irepl', logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 1)) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in range(0, 3): + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + lib.remove_resource(test_resc, self.admin) + + + def test_iput_and_icp_with_invalid_secret_key__issue_6154(self): + replica_number_in_s3 = 1 + filename = 'test_iput_and_icp_with_invalid_secret_key__issue_6154' + original_logical_path = os.path.join(self.user0.session_collection, filename + '_orig') + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + test_resc = 'test_resc' + + try: + lib.create_ufs_resource(test_resc, self.admin) + + lib.make_file(physical_path, file_size_in_bytes) + + # Put the physical file to the test resource. The test will copy to S3. + self.user0.assert_icommand(['iput', '-R', test_resc, physical_path, original_logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, os.path.basename(original_logical_path), 0)) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Copy the physical file, which should fail, leaving a data object with a stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['icp', original_logical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Now overwrite the data object with wild success. This is here to ensure that things are back to normal. + self.user0.assert_icommand(['icp', '-f', original_logical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + finally: + self.user0.run_icommand(['irm', '-f', original_logical_path]) + lib.remove_resource(test_resc, self.admin) + + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + class Test_S3_Cache_Glacier_Base(session.make_sessions_mixin([('otherrods', 'rods')], [('alice', 'apass'), ('bobby', 'bpass')])): def __init__(self, *args, **kwargs): diff --git a/packaging/resource_suite_s3_nocache.py b/packaging/resource_suite_s3_nocache.py index 7ec65a7c..142c4d1f 100644 --- a/packaging/resource_suite_s3_nocache.py +++ b/packaging/resource_suite_s3_nocache.py @@ -1964,6 +1964,241 @@ def test_missing_keyfile(self): self.admin.assert_icommand("iadmin rmresc s3resc1", 'EMPTY') + def test_itouch_nonexistent_file__issue_6479(self): + replica_number_in_s3 = 0 + filename = 'test_itouch_nonexistent_file__issue_6479' + logical_path = os.path.join(self.user0.session_collection, filename) + + try: + # Just itouch and ensure that the data object is created successfully. + self.user0.assert_icommand(['itouch', logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # Ensure that the replica actually exists. + self.user0.assert_icommand(['iget', logical_path, '-']) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number_in_s3), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_istream_nonexistent_file__issue_6479(self): + replica_number_in_s3 = 0 + filename = 'test_istream_nonexistent_file__issue_6479' + logical_path = os.path.join(self.user0.session_collection, filename) + content = 'streamin and screamin' + + try: + # istream to a new data object and ensure that it is created successfully. + self.user0.assert_icommand(['istream', 'write', logical_path], input=content) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # Ensure that the replica actually contains the contents streamed into it. + self.user0.assert_icommand(['iget', logical_path, '-'], 'STDOUT', content) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number_in_s3), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_with_invalid_secret_key_and_overwrite__issue_6154(self): + replica_number_in_s3 = 0 + filename = 'test_iput_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + + try: + lib.make_file(physical_path, file_size_in_bytes) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Put the physical file, which should fail, leaving a data object with a single stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['iput', physical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Now overwrite the data object with wild success. This is here to ensure that things are back to normal. + self.user0.assert_icommand(['iput', '-f', physical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number_in_s3), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_with_invalid_secret_key_and_remove__issue_6154(self): + replica_number_in_s3 = 0 + filename = 'test_iput_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + + try: + lib.make_file(physical_path, file_size_in_bytes) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Put the physical file, which should fail, leaving a data object with a single stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['iput', physical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Attempt to remove the data object, which fails due to the invalid secret key. + self.user0.assert_icommand(['irm', '-f', logical_path], 'STDERR', 'S3_FILE_UNLINK_ERR') + self.assertTrue(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Attempt to remove the data object, which succeeds because the S3 object doesn't exist anyway. + self.user0.assert_icommand(['irm', '-f', logical_path]) + self.assertFalse(lib.replica_exists(self.user0, logical_path, 0)) + self.assertFalse(lib.replica_exists(self.user0, logical_path, replica_number_in_s3)) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number_in_s3), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + + def test_iput_and_replicate_with_invalid_secret_key__issue_6154(self): + replica_number_in_s3 = 1 + filename = 'test_iput_and_replicate_with_invalid_secret_key__issue_6154' + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + test_resc = 'test_resc' + + try: + lib.create_ufs_resource(test_resc, self.admin) + + lib.make_file(physical_path, file_size_in_bytes) + + # Put the physical file to the test resource. The test will replicate to S3. + self.user0.assert_icommand(['iput', '-R', test_resc, physical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, 0)) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Replicate the data object to the compound resource hierarchy. The replica in the cache should be good + # and the replica in the archive should be stale due to the invalid secret key. + self.user0.assert_icommand(['irepl', logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Now replicate with a good set of S3 keys and watch for success. + self.user0.assert_icommand(['irepl', logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + finally: + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + lib.remove_resource(test_resc, self.admin) + + + def test_iput_and_icp_with_invalid_secret_key__issue_6154(self): + replica_number_in_s3 = 0 + filename = 'test_iput_and_icp_with_invalid_secret_key__issue_6154' + original_logical_path = os.path.join(self.user0.session_collection, filename + '_orig') + logical_path = os.path.join(self.user0.session_collection, filename) + physical_path = os.path.join(self.user0.local_session_dir, filename) + file_size_in_bytes = 10 + access_key = 'nopenopenopenope' + secret_key = 'wrongwrongwrong!' + test_resc = 'test_resc' + + try: + lib.create_ufs_resource(test_resc, self.admin) + + lib.make_file(physical_path, file_size_in_bytes) + + # Put the physical file to the test resource. The test will copy to S3. + self.user0.assert_icommand(['iput', '-R', test_resc, physical_path, original_logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, os.path.basename(original_logical_path), 0)) + + with lib.file_backed_up(self.keypairfile): + # Invalidate the existing keypairfile so that the S3 resource cannot communicate with the S3 backend. + with open(self.keypairfile, 'w') as f: + f.write('{}\n{}'.format(access_key, secret_key)) + + # Copy the physical file, which should fail, leaving a data object with a stale replica. The main + # purpose of this test is to ensure that the system is in a state from which it can recover. + self.user0.assert_icommand(['icp', original_logical_path, logical_path], 'STDERR', 'S3_PUT_ERROR') + self.assertEqual(str(0), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + # debugging + self.user0.assert_icommand(['ils', '-L', os.path.dirname(logical_path)], 'STDOUT', filename) + + # Now overwrite the data object with wild success. This is here to ensure that things are back to normal. + self.user0.assert_icommand(['icp', '-f', original_logical_path, logical_path]) + self.assertEqual(str(1), lib.get_replica_status(self.user0, filename, replica_number_in_s3)) + + finally: + self.user0.run_icommand(['irm', '-f', original_logical_path]) + lib.remove_resource(test_resc, self.admin) + + # Set the replica status here so that we can remove the object even if it is stuck in the locked status. + for replica_number in [0, 1]: + self.admin.run_icommand([ + 'iadmin', 'modrepl', + 'logical_path', logical_path, + 'replica_number', str(replica_number), + 'DATA_REPL_STATUS', '0']) + self.user0.run_icommand(['irm', '-f', logical_path]) + + class Test_S3_NoCache_Glacier_Base(session.make_sessions_mixin([('otherrods', 'rods')], [('alice', 'apass'), ('bobby', 'bpass')])): def __init__(self, *args, **kwargs): diff --git a/s3/libirods_s3.cpp b/s3/libirods_s3.cpp index 4b150094..098e63da 100644 --- a/s3/libirods_s3.cpp +++ b/s3/libirods_s3.cpp @@ -1669,7 +1669,7 @@ irods::error s3PutCopyFile( if(data.status >= 0) { msg += fmt::format(" - \"{}\"", S3_get_status_name((S3Status)data.status)); } - ret = ERROR(S3_PUT_ERROR, msg); + result = ERROR(S3_PUT_ERROR, msg); } // Clear up the S3PutProperties, if it exists diff --git a/s3/s3_operations.cpp b/s3/s3_operations.cpp index f1852dd1..02500b4b 100644 --- a/s3/s3_operations.cpp +++ b/s3/s3_operations.cpp @@ -934,7 +934,9 @@ namespace irods_s3 { // to be created if (!data.dstream_ptr) { char buff[1]; - s3_file_write_operation(_ctx, buff, 0); + if (const auto err = s3_file_write_operation(_ctx, buff, 0); !err.ok()) { + return PASS(err); + } data = fd_data.get(fd); }