diff --git a/datalab/datalab_session/data_operations/data_operation.py b/datalab/datalab_session/data_operations/data_operation.py index 042535b..a9aa37a 100644 --- a/datalab/datalab_session/data_operations/data_operation.py +++ b/datalab/datalab_session/data_operations/data_operation.py @@ -50,7 +50,7 @@ def wizard_description(): """ @abstractmethod - def operate(self, cache_key, input_files): + def operate(self): """ The method that performs the data operation. It should periodically update the percent completion during its operation. It should set the output and status into the cache when done. diff --git a/datalab/datalab_session/data_operations/long.py b/datalab/datalab_session/data_operations/long.py index 184e769..7f8d77c 100644 --- a/datalab/datalab_session/data_operations/long.py +++ b/datalab/datalab_session/data_operations/long.py @@ -36,7 +36,7 @@ def wizard_description(): } } - def operate(self, cache_key, input_files): + def operate(self): num_files = len(self.input_data.get('input_files', [])) per_image_timeout = ceil(float(self.input_data.get('duration', 60.0)) / num_files) for i, file in enumerate(self.input_data.get('input_files', [])): diff --git a/datalab/datalab_session/data_operations/median.py b/datalab/datalab_session/data_operations/median.py index 82b494c..e113c8d 100644 --- a/datalab/datalab_session/data_operations/median.py +++ b/datalab/datalab_session/data_operations/median.py @@ -38,23 +38,25 @@ def wizard_description(): } } - def operate(self, cache_key, input_files): + def operate(self): - log.info(f'Executing median operation on {len(input_files)} files') + input = self.input_data.get('input_files', []) - image_data_list = self.get_fits_npdata(input_files, percent=40.0, cur_percent=0.0) + log.info(f'Executing median operation on {len(input)} files') + + image_data_list = self.get_fits_npdata(input, percent=0.4, cur_percent=0.0) stacked_data = stack_arrays(image_data_list) # using the numpy library's median method median = np.median(stacked_data, axis=2) - hdu_list = create_fits(cache_key, median) + hdu_list = create_fits(self.cache_key, median) - output = self.create_and_store_fits(hdu_list, percent=60.0, cur_percent=40.0) + output = self.create_and_store_fits(hdu_list, percent=0.6, cur_percent=0.4) output = {'output_files': output} log.info(f'Median operation output: {output}') - self.set_percent_completion(1) + self.set_percent_completion(1.0) self.set_output(output) diff --git a/datalab/datalab_session/data_operations/noop.py b/datalab/datalab_session/data_operations/noop.py index 172a2f5..9905ef4 100644 --- a/datalab/datalab_session/data_operations/noop.py +++ b/datalab/datalab_session/data_operations/noop.py @@ -40,7 +40,7 @@ def wizard_description(): } } - def operate(self, cache_key, input_files): + def operate(self): print("No-op triggered!") output = { 'output_files': self.input_data.get('input_files', []) diff --git a/datalab/datalab_session/tasks.py b/datalab/datalab_session/tasks.py index 0b8b7f3..18411ca 100644 --- a/datalab/datalab_session/tasks.py +++ b/datalab/datalab_session/tasks.py @@ -16,7 +16,4 @@ def execute_data_operation(data_operation_name: str, input_data: dict): if operation_class is None: raise NotImplementedError("Operation not implemented!") else: - operation = operation_class(input_data) - cache_key = operation.generate_cache_key() - - operation.operate(cache_key, input_data.get('input_files', [])) + operation_class(input_data).operate()