diff --git a/README.md b/README.md index 8681938..0118335 100644 --- a/README.md +++ b/README.md @@ -84,3 +84,10 @@ To put data to a Spectra S3 appliance you have to do it inside the context of wh [An example of getting data with the Python SDK](samples/gettingData.py) [An example of how give objects on the server a different name than what is on the filesystem, and how to delete objects by folder](samples/renaming.py) + +## Creating a New Release +Update the version of the SDK before creating a new release. The format is `..`, where the +`.` numbers must match the version of BP. The `` is an incrementing number that increments with +each SDK release for a given major/minor release. + +The release number is specified in `setup.py`. diff --git a/ds3/ds3.py b/ds3/ds3.py index 5d9e6bc..d5b200c 100644 --- a/ds3/ds3.py +++ b/ds3/ds3.py @@ -330,8 +330,10 @@ def __init__(self): 'DefaultVerifyDataPriorToImport': None, 'Id': None, 'InstanceId': None, + 'IomCacheLimitationPercent': None, 'IomEnabled': None, 'LastHeartbeat': None, + 'MaxAggregatedBlobsPerChunk': None, 'PartiallyVerifyLastPercentOfTapes': None, 'UnavailableMediaPolicy': None, 'UnavailablePoolMaxJobRetryInMins': None, @@ -3169,7 +3171,7 @@ def __init__(self, full_details=None): class ModifyDataPathBackendSpectraS3Request(AbstractRequest): - def __init__(self, activated=None, allow_new_job_requests=None, auto_activate_timeout_in_mins=None, auto_inspect=None, cache_available_retry_after_in_seconds=None, default_verify_data_after_import=None, default_verify_data_prior_to_import=None, iom_enabled=None, partially_verify_last_percent_of_tapes=None, unavailable_media_policy=None, unavailable_pool_max_job_retry_in_mins=None, unavailable_tape_partition_max_job_retry_in_mins=None): + def __init__(self, activated=None, allow_new_job_requests=None, auto_activate_timeout_in_mins=None, auto_inspect=None, cache_available_retry_after_in_seconds=None, default_verify_data_after_import=None, default_verify_data_prior_to_import=None, iom_cache_limitation_percent=None, iom_enabled=None, max_aggregated_blobs_per_chunk=None, partially_verify_last_percent_of_tapes=None, unavailable_media_policy=None, unavailable_pool_max_job_retry_in_mins=None, unavailable_tape_partition_max_job_retry_in_mins=None): super(ModifyDataPathBackendSpectraS3Request, self).__init__() if activated is not None: self.query_params['activated'] = activated @@ -3185,8 +3187,12 @@ def __init__(self, activated=None, allow_new_job_requests=None, auto_activate_ti self.query_params['default_verify_data_after_import'] = default_verify_data_after_import if default_verify_data_prior_to_import is not None: self.query_params['default_verify_data_prior_to_import'] = default_verify_data_prior_to_import + if iom_cache_limitation_percent is not None: + self.query_params['iom_cache_limitation_percent'] = iom_cache_limitation_percent if iom_enabled is not None: self.query_params['iom_enabled'] = iom_enabled + if max_aggregated_blobs_per_chunk is not None: + self.query_params['max_aggregated_blobs_per_chunk'] = max_aggregated_blobs_per_chunk if partially_verify_last_percent_of_tapes is not None: self.query_params['partially_verify_last_percent_of_tapes'] = partially_verify_last_percent_of_tapes if unavailable_media_policy is not None: diff --git a/setup.py b/setup.py index dbd325a..9a817c2 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ from distutils.core import setup setup(name='DS3 SDK', - version='5.3.0', + version='5.4.0', description='Python3 SDK and CLI for Spectra S3', author_email='developer@spectralogic.com', packages=['ds3'])