From c519f1ba9649121dad31da42b4d89eceda317293 Mon Sep 17 00:00:00 2001 From: Kay Robbins <1189050+VisLab@users.noreply.github.com> Date: Thu, 26 Sep 2024 19:33:18 -0500 Subject: [PATCH] Updated the actions, did test cleanup --- .github/workflows/ci.yaml | 53 ++- tests/tools/analysis/test_hed_tag_manager.py | 30 +- tests/tools/analysis/test_hed_type_defs.py | 6 +- tests/tools/analysis/test_key_map.py | 2 +- tests/tools/analysis/test_sequence_map.py | 4 +- tests/tools/bids/test_bids_dataset.py | 2 +- .../tools/remodeling/cli/test_run_remodel.py | 2 +- .../remodeling/operations/test_base_op.py | 4 +- .../operations/test_number_groups.py | 12 +- .../operations/test_number_rows_op.py | 1 + .../test_summarize_column_values_op.py | 1 + .../test_summarize_definitions_op.py | 1 + .../operations/test_summarize_hed_tags_op.py | 7 +- .../operations/test_summarize_hed_type_op.py | 2 +- tests/tools/remodeling/test_validator.py | 354 +++++++++--------- tests/validator/test_def_validator.py | 4 +- tests/validator/test_sidecar_validator.py | 2 - tests/validator/test_tag_validator.py | 4 +- 18 files changed, 268 insertions(+), 223 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 309e4ae3..4fc1f6e7 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -48,6 +48,7 @@ jobs: path: ${{ env.pythonLocation }} key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('docs/requirements.txt') }} + # Install dependencies - name: Install dependencies run: | python -m pip install --upgrade --upgrade-strategy eager pip @@ -56,6 +57,7 @@ jobs: pip install -r requirements.txt pip install -r docs/requirements.txt + # Run flake8 only for Python 3.9 - name: Lint with flake8 if: matrix.python-version == '3.9' @@ -63,28 +65,53 @@ jobs: pip install flake8 flake8 . --count --show-source --statistics --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with unittest + + + # Run unittest with coverage for Python 3.9 + - name: Test with unittest and coverage (v3.9) env: HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - if [ "${{ matrix.python-version }}" == "3.9" ]; then + if: matrix.python-version == '3.9' + run: | pip install coverage coverage run -m unittest discover tests - else - python -m unittest tests - fi + continue-on-error: true + + - - name: Run spec_test coverage + # Run unittest without coverage for non Python 3.9 + - name: Test with unittest (non-3.9) env: HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: matrix.python-version != '3.9' run: | - if ["${{ matrix.python-version }}" == "3.9" ]; then - coverage run --append -m unittest discover tests/spec_tests - ls -a - continue-on-error: true - if + python -m unittest discover tests + continue-on-error: true + + + + # Run spec tests with coverage for Python 3.9 + - name: Run spec_test coverage + env: + HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: matrix.python-version == '3.9' + run: | + coverage run --append -m unittest discover tests/spec_tests + ls -a + continue-on-error: true + + + # Run spec tests without coverage for non Python 3.9 + - name: Run spec_test + env: + HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: matrix.python-version != '3.9' + run: | + python -m unittest discover tests/spec_tests + continue-on-error: true - - name: Archive code coverage results + # Archive code coverage results for Python 3.9 + - name: Archive code coverage results for Python 3.9 if: ${{matrix.python-version == '3.9'}} uses: actions/upload-artifact@v4 with: diff --git a/tests/tools/analysis/test_hed_tag_manager.py b/tests/tools/analysis/test_hed_tag_manager.py index cca0b551..c7cb40ea 100644 --- a/tests/tools/analysis/test_hed_tag_manager.py +++ b/tests/tools/analysis/test_hed_tag_manager.py @@ -76,24 +76,40 @@ def test_constructor_from_tabular_input(self): tag_man1 = HedTagManager(EventManager(self.input_data, self.schema)) self.assertIsInstance(tag_man1, HedTagManager) hed_objs1a = tag_man1.get_hed_objs(include_context=False, replace_defs=False) + self.assertNotIn('Event-context', str(hed_objs1a[1])) + self.assertIn('Def', str(hed_objs1a[1])) + self.assertNotIn('Condition-variable', str(hed_objs1a[1])) hed_objs1b = tag_man1.get_hed_objs(include_context=True, replace_defs=False) + self.assertIn('Event-context', str(hed_objs1b[1])) + self.assertIn('Def', str(hed_objs1b[1])) + self.assertNotIn('Condition-variable', str(hed_objs1b[1])) hed_objs1c = tag_man1.get_hed_objs(include_context=False, replace_defs=True) + self.assertNotIn('Event-context', str(hed_objs1c[1])) + self.assertNotIn('Def', str(hed_objs1c[1])) + self.assertIn('Condition-variable', str(hed_objs1c[1])) hed_objs1d = tag_man1.get_hed_objs(include_context=True, replace_defs=True) + self.assertIn('Event-context', str(hed_objs1d[1])) + self.assertNotIn('Def', str(hed_objs1d[1])) + self.assertIn('Condition-variable', str(hed_objs1d[1])) tag_man2 = HedTagManager(event_man, remove_types=['Condition-variable', 'Task']) hed_objs2a = tag_man2.get_hed_objs(include_context=False, replace_defs=False) + self.assertNotIn('Condition-variable', str(hed_objs2a[1])) hed_objs2b = tag_man2.get_hed_objs(include_context=True, replace_defs=False) - hed_objs1c = tag_man2.get_hed_objs(include_context=False, replace_defs=True) - hed_objs1d = tag_man2.get_hed_objs(include_context=True, replace_defs=True) + self.assertNotIn('Condition-variable', str(hed_objs2b[1])) + hed_objs2c = tag_man2.get_hed_objs(include_context=False, replace_defs=True) + self.assertNotIn('Condition-variable', str(hed_objs2c[1])) + hed_objs2d = tag_man2.get_hed_objs(include_context=True, replace_defs=True) + self.assertNotIn('Condition-variable', str(hed_objs2d[1])) self.assertIsInstance(tag_man2, HedTagManager) self.assertIsInstance(tag_man2, HedTagManager) def test_get_hed_objs(self): event_man = EventManager(self.input_data, self.schema) - tag_man1 = HedTagManager(EventManager(self.input_data, self.schema)) - # tag_man = HedTagManager(event_man, remove_types=['Condition-variable', 'Task']) - # hed_objs = tag_man.get_hed_objs() - # self.assertIsInstance(hed_objs, list) - # self.assertEqual(len(hed_objs), len(event_man.onsets)) + tag_man = HedTagManager(EventManager(self.input_data, self.schema)) + self.assertIsInstance(tag_man, HedTagManager) + hed_objs = tag_man.get_hed_objs() + self.assertIsInstance(hed_objs, list) + self.assertEqual(len(hed_objs), len(event_man.onsets)) # def test_constructor_variable_caps(self): # sidecar1 = Sidecar(self.sidecar_path, name='face_sub1_json') diff --git a/tests/tools/analysis/test_hed_type_defs.py b/tests/tools/analysis/test_hed_type_defs.py index 9e64c329..49e9ba21 100644 --- a/tests/tools/analysis/test_hed_type_defs.py +++ b/tests/tools/analysis/test_hed_type_defs.py @@ -87,11 +87,11 @@ def test_get_type_values(self): item1 = HedString("Sensory-event,((Red,Blue)),", self.schema) vars1 = def_man.get_type_values(item1) self.assertFalse(vars1, "get_type_values should return None if no condition type_variables") - item2 = HedString(f"Sensory-event,(Def/Cond1,(Red,Blue,Condition-variable/Trouble))", self.schema) + item2 = HedString("Sensory-event,(Def/Cond1,(Red,Blue,Condition-variable/Trouble))", self.schema) vars2 = def_man.get_type_values(item2) self.assertEqual(1, len(vars2), "get_type_values should return correct number of condition type_variables") - item3 = HedString(f"Sensory-event,(Def/Cond1,(Red,Blue,Condition-variable/Trouble))," - f"(Def/Cond2),Green,Yellow,Def/Cond5, Def/Cond6/4, Description/Tell me", self.schema) + item3 = HedString("Sensory-event,(Def/Cond1,(Red,Blue,Condition-variable/Trouble))," + + "(Def/Cond2),Green,Yellow,Def/Cond5, Def/Cond6/4, Description/Tell me", self.schema) vars3 = def_man.get_type_values(item3) self.assertEqual(len(vars3), 5, "get_type_values should return multiple condition type_variables") diff --git a/tests/tools/analysis/test_key_map.py b/tests/tools/analysis/test_key_map.py index 4ae2860d..30021b2b 100644 --- a/tests/tools/analysis/test_key_map.py +++ b/tests/tools/analysis/test_key_map.py @@ -61,7 +61,7 @@ def test_make_template(self): self.assertEqual(len(df1.columns), 1, "make_template should return 1 column single key, no additional columns") df2 = t_map.make_template(show_counts=True) self.assertEqual(len(df2.columns), 2, "make_template returns an extra column for counts") - + t_map2 = KeyMap(['event_type', 'type']) t_map2.update(self.stern_test1_path) df3 = t_map2.make_template() diff --git a/tests/tools/analysis/test_sequence_map.py b/tests/tools/analysis/test_sequence_map.py index f1133418..4c0ae08e 100644 --- a/tests/tools/analysis/test_sequence_map.py +++ b/tests/tools/analysis/test_sequence_map.py @@ -12,10 +12,10 @@ def setUpClass(cls): '/sub-01/ses-01/eeg/sub-01_ses-01_task-DriveRandomSound_run-1_events.tsv') def test_constructor(self): - codes1 = ['1111', '1112', '1121', '1122', '1131', '1132', '1141', + codes1 = ['1111', '1112', '1121', '1122', '1131', '1132', '1141', '1142', '1311', '1312', '1321', '1322', '4210', '4220', '4230', '4311', '4312'] - + smap1 = SequenceMap(codes=codes1) self.assertIsInstance(smap1, SequenceMap) # df = get_new_dataframe(self.events_path) diff --git a/tests/tools/bids/test_bids_dataset.py b/tests/tools/bids/test_bids_dataset.py index a1ca622c..ac169608 100644 --- a/tests/tools/bids/test_bids_dataset.py +++ b/tests/tools/bids/test_bids_dataset.py @@ -89,7 +89,7 @@ def test_validator_types(self): def test_with_schema_group(self): x = load_schema_version(["score_2.0.0", "test:testlib_1.0.2"]) bids = BidsDataset(self.library_path, schema=x, tabular_types=["participants"]) - self.assertIsInstance(bids, BidsDataset, + self.assertIsInstance(bids, BidsDataset, "BidsDataset with libraries should create a valid object from valid dataset") parts = bids.get_tabular_group("participants") self.assertIsInstance(parts, BidsFileGroup, "BidsDataset participants should be a BidsFileGroup") diff --git a/tests/tools/remodeling/cli/test_run_remodel.py b/tests/tools/remodeling/cli/test_run_remodel.py index 6e1a74a7..8bc59e6c 100644 --- a/tests/tools/remodeling/cli/test_run_remodel.py +++ b/tests/tools/remodeling/cli/test_run_remodel.py @@ -74,7 +74,7 @@ def test_parse_arguments(self): with self.assertRaises(ValueError) as context3: parse_arguments(arg_list3) self.assertEqual(context3.exception.args[0], "UnableToFullyParseOperations") - + def test_parse_tasks(self): tasks1 = parse_tasks(self.files, "*") self.assertIn('stopsignal', tasks1) diff --git a/tests/tools/remodeling/operations/test_base_op.py b/tests/tools/remodeling/operations/test_base_op.py index d79a7073..1162405b 100644 --- a/tests/tools/remodeling/operations/test_base_op.py +++ b/tests/tools/remodeling/operations/test_base_op.py @@ -20,7 +20,7 @@ class TestOp(BaseOp): def do_op(self, dispatcher, df, name, sidecar=None): return df - + @staticmethod def validate_input_data(parameters): return [] @@ -61,7 +61,7 @@ class TestOpNoName(BaseOp): def do_op(self, dispatcher, df, name, sidecar=None): return df - + with self.assertRaises(TypeError): TestOpNoName({}) diff --git a/tests/tools/remodeling/operations/test_number_groups.py b/tests/tools/remodeling/operations/test_number_groups.py index ac82cdba..2dcfc8be 100644 --- a/tests/tools/remodeling/operations/test_number_groups.py +++ b/tests/tools/remodeling/operations/test_number_groups.py @@ -158,25 +158,25 @@ def test_number_groups_new_column(self): # df_check = pd.DataFrame(self.numbered_data, columns=self.numbered_columns) # df_test = pd.DataFrame(self.sample_data, columns=self.sample_columns) # df_new = op.do_op(self.dispatcher, df_test, self.file_name) - # + # # self.assertTrue(list(df_new.columns) == list(self.numbered_columns), # "numbered_events should have the expected columns") # self.assertTrue(len(df_new) == len(df_test), # "numbered_events should have same length as original dataframe") # self.assertTrue(np.nanmax(df_new["number"]) == 5.0, # "max value in numbered_events should match the number of groups") - # + # # # fill na to match postprocessing dispatcher # df_new = df_new.fillna('n/a') # self.assertTrue(np.array_equal(df_new.to_numpy(), df_check.to_numpy()), # "numbered_events should not differ from check") - # + # # # Test that df has not been changed by the op # self.assertTrue(list(df.columns) == list(df_test.columns), # "number_rows should not change the input df columns") # self.assertTrue(np.array_equal(df.to_numpy(), df_test.to_numpy()), # "number_rows should not change the input df values") - # + # # def test_existing_column_overwrite_true(self): # # Test when existing column name is given with overwrite True # parms = json.loads(self.json_overwrite_true_parms) @@ -185,7 +185,7 @@ def test_number_groups_new_column(self): # df_test = pd.DataFrame(self.sample_data, columns=self.existing_sample_columns) # df_check = pd.DataFrame(self.overwritten_data, columns=self.existing_sample_columns) # df_new = op.do_op(self.dispatcher, df_test, self.file_name) - # + # # self.assertTrue(list(df_new.columns) == list(self.existing_sample_columns), # "numbered_events should have the same columns as original dataframe in case of overwrite") # self.assertTrue(len(df_new) == len(df_test), @@ -195,7 +195,7 @@ def test_number_groups_new_column(self): # df_new = df_new.fillna('n/a') # self.assertTrue(np.array_equal(df_new.to_numpy(), df_check.to_numpy()), # "numbered_events should not differ from check") - # + # # # Test that df has not been changed by the op # self.assertTrue(list(df.columns) == list(df_test.columns), # "split_rows should not change the input df columns") diff --git a/tests/tools/remodeling/operations/test_number_rows_op.py b/tests/tools/remodeling/operations/test_number_rows_op.py index ff1b71c0..26cf50ac 100644 --- a/tests/tools/remodeling/operations/test_number_rows_op.py +++ b/tests/tools/remodeling/operations/test_number_rows_op.py @@ -177,6 +177,7 @@ def test_number_rows_new_column(self): # Test when new column name is given with overwrite unspecified (=False) parms = json.loads(self.json_parms) op = NumberRowsOp(parms) + self.assertIsInstance(op, NumberRowsOp) # df = pd.DataFrame(self.sample_data, columns=self.sample_columns) # df_check = pd.DataFrame(self.numbered_data, columns=self.numbered_columns) # df_test = pd.DataFrame(self.sample_data, columns=self.sample_columns) diff --git a/tests/tools/remodeling/operations/test_summarize_column_values_op.py b/tests/tools/remodeling/operations/test_summarize_column_values_op.py index 9e838d5d..9cd6c376 100644 --- a/tests/tools/remodeling/operations/test_summarize_column_values_op.py +++ b/tests/tools/remodeling/operations/test_summarize_column_values_op.py @@ -59,6 +59,7 @@ def test_do_ops(self): "do_ops updating does not change number of categorical columns.") context = dispatch.summary_dicts['test summary'] text_sum = context.get_text_summary() + self.assertIsInstance(text_sum, dict) self.assertEqual(len(context.summary_dict), 2) def test_get_summary(self): diff --git a/tests/tools/remodeling/operations/test_summarize_definitions_op.py b/tests/tools/remodeling/operations/test_summarize_definitions_op.py index 38e65daa..c3ecc2a1 100644 --- a/tests/tools/remodeling/operations/test_summarize_definitions_op.py +++ b/tests/tools/remodeling/operations/test_summarize_definitions_op.py @@ -94,6 +94,7 @@ def test_ambiguous_def_errors(self): context = cont.get("get_definition_summary", None) self.assertIsInstance(context, DefinitionSummary, "get_summary testing DefinitionSummary") summary1a = context.get_summary() + self.assertIsInstance(summary1a, dict) if __name__ == '__main__': diff --git a/tests/tools/remodeling/operations/test_summarize_hed_tags_op.py b/tests/tools/remodeling/operations/test_summarize_hed_tags_op.py index 8a80563f..5303b576 100644 --- a/tests/tools/remodeling/operations/test_summarize_hed_tags_op.py +++ b/tests/tools/remodeling/operations/test_summarize_hed_tags_op.py @@ -149,7 +149,8 @@ def test_quick3(self): input_data = TabularInput(df, sidecar=my_sidecar, name="myName") tag_man = HedTagManager(EventManager(input_data, my_schema), remove_types=remove_types) counts = HedTagCounts('myName', 2) - summary_dict = {} + self.assertIsInstance(counts, HedTagCounts) + self.assertIsInstance(tag_man, HedTagManager) # hed_objs = tag_man.get_hed_objs(include_context=include_context, replace_defs=replace_defs) # for hed in hed_objs: # counts.update_event_counts(hed, 'myName') @@ -211,13 +212,13 @@ def test_get_summary_text_summary(self): self.assertIn('Dataset', text_sum_none) self.assertIsInstance(text_sum_none['Dataset'], str) self.assertFalse(text_sum_none.get("Individual files", {})) - + text_sum_consolidated = sum_context1.get_text_summary(individual_summaries="consolidated") self.assertIn('Dataset', text_sum_consolidated) self.assertIsInstance(text_sum_consolidated['Dataset'], str) self.assertFalse(text_sum_consolidated.get("Individual files", {})) self.assertGreater(len(text_sum_consolidated['Dataset']), len(text_sum_none['Dataset'])) - + text_sum_separate = sum_context1.get_text_summary(individual_summaries="separate") self.assertIn('Dataset', text_sum_separate) self.assertIsInstance(text_sum_separate['Dataset'], str) diff --git a/tests/tools/remodeling/operations/test_summarize_hed_type_op.py b/tests/tools/remodeling/operations/test_summarize_hed_type_op.py index c30b10ce..696ab227 100644 --- a/tests/tools/remodeling/operations/test_summarize_hed_type_op.py +++ b/tests/tools/remodeling/operations/test_summarize_hed_type_op.py @@ -79,7 +79,7 @@ def test_summary(self): self.assertEqual(len(summary2['Dataset']['Overall summary']['Files']), 2) summary2a = context2.get_summary(individual_summaries="separate") self.assertIsInstance(summary2a["Individual files"]["run-02"], dict) - + def test_text_summary_with_levels(self): with open(self.summary_path, 'r') as fp: parms = json.load(fp) diff --git a/tests/tools/remodeling/test_validator.py b/tests/tools/remodeling/test_validator.py index c465ab3c..14854a1b 100644 --- a/tests/tools/remodeling/test_validator.py +++ b/tests/tools/remodeling/test_validator.py @@ -1,177 +1,177 @@ -import os -import json -import unittest -from copy import deepcopy -from hed.tools.remodeling.remodeler_validator import RemodelerValidator - - -class Test(unittest.TestCase): - - @classmethod - def setUpClass(cls): - with open(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', - '../data/remodel_tests/all_remodel_operations.json'))) as f: - cls.remodel_file = json.load(f) - cls.validator = RemodelerValidator() - - @classmethod - def tearDownClass(cls): - pass - - def test_validator_build(self): - pass - - def test_validate_valid(self): - error_strings = self.validator.validate(self.remodel_file) - self.assertFalse(error_strings) - - def test_validate_array(self): - wrong_input_type = {"operation": "remove_columns"} - error_strings = self.validator.validate(wrong_input_type) - self.assertEqual(error_strings[0], - "Operations must be contained in a list or array. " + - "This is also true for a single operation.") - - no_operations = [] - error_strings = self.validator.validate(no_operations) - self.assertEqual(error_strings[0], - "There are no operations defined. Specify at least 1 operation for the remodeler to execute.") - - def test_validate_operations(self): - invalid_operation_type = ["string"] - error_strings = self.validator.validate(invalid_operation_type) - self.assertEqual(error_strings[0], "Each operation must be defined in a dictionary: " + - "string is not a dictionary object.") - - invalid_operation_missing = [self.remodel_file[0].copy()] - del invalid_operation_missing[0]["description"] - error_strings = self.validator.validate(invalid_operation_missing) - self.assertEqual(error_strings[0], "Operation dictionary 1 is missing 'description'. " + - "Every operation dictionary must specify the type of operation, a description, " + - "and the operation parameters.") - - invalid_operation_name = [self.remodel_file[0].copy()] - invalid_operation_name[0]["operation"] = "unlisted_operation" - error_strings = self.validator.validate(invalid_operation_name) - self.assertEqual(error_strings[0], "unlisted_operation is not a known remodeler operation. " + - "See the documentation for valid operations.") - - def test_validate_parameters(self): - missing_parameter = [deepcopy(self.remodel_file[0])] - del missing_parameter[0]["parameters"]["column_names"] - error_strings = self.validator.validate(missing_parameter) - self.assertEqual(error_strings[0], - "Operation 1: The parameter column_names is missing. " + - "column_names is a required parameter of remove_columns.") - - missing_parameter_nested = [deepcopy(self.remodel_file[10])] - del missing_parameter_nested[0]["parameters"]["new_events"]["response"]["onset_source"] - error_strings = self.validator.validate(missing_parameter_nested) - self.assertEqual(error_strings[0], - "Operation 1: The field onset_source is missing in response new_events. " + - "onset_source is a required parameter of response new_events.") - - invalid_parameter = [deepcopy(self.remodel_file[0])] - invalid_parameter[0]["parameters"]["invalid"] = "invalid_value" - error_strings = self.validator.validate(invalid_parameter) - self.assertEqual(error_strings[0], "Operation 1: Operation parameters for remove_columns " + - "contain an unexpected field 'invalid'.") - - invalid_parameter_nested = [deepcopy(self.remodel_file[10])] - invalid_parameter_nested[0]["parameters"]["new_events"]["response"]["invalid"] = "invalid_value" - error_strings = self.validator.validate(invalid_parameter_nested) - self.assertEqual(error_strings[0], "Operation 1: Operation parameters for response " + - "new_events contain an unexpected field 'invalid'.") - - invalid_type = [deepcopy(self.remodel_file[0])] - invalid_type[0]["parameters"]["column_names"] = 0 - error_strings = self.validator.validate(invalid_type) - self.assertEqual(error_strings[0], "Operation 1: The value of column_names in the remove_columns operation " + - "should be array. 0 is not a array.") - - invalid_type_nested = [deepcopy(self.remodel_file[10])] - invalid_type_nested[0]["parameters"]["new_events"]["response"]["onset_source"] = {"key": "value"} - error_strings = self.validator.validate(invalid_type_nested) - self.assertEqual(error_strings[0], "Operation 1: The value of onset_source response new_events " + - "in the split_rows operation should be array. {'key': 'value'} is not a array.") - - empty_array = [deepcopy(self.remodel_file[0])] - empty_array[0]["parameters"]["column_names"] = [] - error_strings = self.validator.validate(empty_array) - self.assertEqual(error_strings[0], "Operation 1: The list in column_names in the remove_columns " + - "operation should have at least 1 item(s).") - - empty_array_nested = [deepcopy(self.remodel_file[5])] - empty_array_nested[0]["parameters"]["map_list"][0] = [] - error_strings = self.validator.validate(empty_array_nested) - self.assertEqual(error_strings[0], "Operation 1: The list in item 1 map_list in the remap_columns " + - "operation should have at least 1 item(s).") - - # invalid_value = [deepcopy(self.remodel_file[18])] - # invalid_value[0]["parameters"]["convert_to"] = "invalid_value" - # error_strings = validator.validate(invalid_value) - # self.assertEqual(error_strings[0], "Operation 1: Operation parameter convert_to, in the " + - # "convert_columns operation, contains and unexpected value. " + - # "Value should be one of ['str', 'int', 'float', 'fixed'].") - - # value_dependency = [deepcopy(self.remodel_file[18])] - # value_dependency[0]["parameters"]["convert_to"] = "fixed" - # error_strings = validator.validate(value_dependency) - # self.assertEqual(error_strings[0], "Operation 1: The parameter decimal_places is missing. " + - # " The decimal_places is a required parameter of convert_columns.") - - property_dependency = [deepcopy(self.remodel_file[1])] - del property_dependency[0]["parameters"]["factor_values"] - error_strings = self.validator.validate(property_dependency) - self.assertEqual(error_strings[0], "Operation 1: The parameter factor_names is missing: " + - "factor_names is a required parameter of factor_column when ['factor_values'] is specified.") - - double_item_in_array = [deepcopy(self.remodel_file[0])] - double_item_in_array[0]["parameters"]["column_names"] = ['response', 'response'] - error_strings = self.validator.validate(double_item_in_array) - self.assertEqual(error_strings[0], "Operation 1: The list in column_names in the remove_columns " + - "operation should only contain unique items.") - - double_item_in_array_nested = [deepcopy(self.remodel_file[10])] - double_item_in_array_nested[0]["parameters"]["new_events"]["response"]["copy_columns"] = \ - ['response', 'response'] - error_strings = self.validator.validate(double_item_in_array_nested) - self.assertEqual(error_strings[0], - "Operation 1: The list in copy_columns response new_events in the split_rows " + - "operation should only contain unique items.") - - def test_validate_parameter_data(self): - factor_column_validate = [deepcopy(self.remodel_file)[1]] - factor_column_validate[0]["parameters"]["factor_names"] = ["stopped"] - error_strings = self.validator.validate(factor_column_validate) - self.assertEqual(error_strings[0], "Operation 1 (factor_column): factor_names must be " + - "same length as factor_values") - - factor_hed_tags_validate = [deepcopy(self.remodel_file)[2]] - factor_hed_tags_validate[0]["parameters"]["query_names"] = ["correct"] - error_strings = self.validator.validate(factor_hed_tags_validate) - self.assertEqual(error_strings[0], "Operation 1 (factor_hed_tags): QueryNamesLengthBad: " + - "The query_names length 1 must be empty or equal to the queries length 2.") - - merge_consecutive_validate = [deepcopy(self.remodel_file)[4]] - merge_consecutive_validate[0]["parameters"]["match_columns"].append("trial_type") - error_strings = self.validator.validate(merge_consecutive_validate) - self.assertEqual(error_strings[0], "Operation 1 (merge_consecutive): column_name `trial_type` " + - "cannot not be a match_column.") - - remap_columns_validate_same_length = [deepcopy(self.remodel_file)[5]] - remap_columns_validate_same_length[0]["parameters"]["map_list"][0] = [""] - error_strings = self.validator.validate(remap_columns_validate_same_length) - self.assertEqual(error_strings[0], "Operation 1 (remap_columns): all map_list arrays must be of length 3.") - - remap_columns_validate_right_length = [deepcopy(self.remodel_file[5])] - remap_columns_validate_right_length[0]["parameters"]["map_list"] = \ - [["string1", "string2"], ["string3", "string4"]] - error_strings = self.validator.validate(remap_columns_validate_right_length) - self.assertEqual(error_strings[0], "Operation 1 (remap_columns): all map_list arrays must be of length 3.") - - remap_columns_integer_sources = [deepcopy(self.remodel_file[5])] - remap_columns_integer_sources[0]["parameters"]["integer_sources"] = ["unknown_column"] - error_strings = self.validator.validate(remap_columns_integer_sources) - self.assertEqual(error_strings[0], "Operation 1 (remap_columns): the integer_sources {'unknown_column'} " + - "are missing from source_columns.") +import os +import json +import unittest +from copy import deepcopy +from hed.tools.remodeling.remodeler_validator import RemodelerValidator + + +class Test(unittest.TestCase): + + @classmethod + def setUpClass(cls): + with open(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', + '../data/remodel_tests/all_remodel_operations.json'))) as f: + cls.remodel_file = json.load(f) + cls.validator = RemodelerValidator() + + @classmethod + def tearDownClass(cls): + pass + + def test_validator_build(self): + pass + + def test_validate_valid(self): + error_strings = self.validator.validate(self.remodel_file) + self.assertFalse(error_strings) + + def test_validate_array(self): + wrong_input_type = {"operation": "remove_columns"} + error_strings = self.validator.validate(wrong_input_type) + self.assertEqual(error_strings[0], + "Operations must be contained in a list or array. " + + "This is also true for a single operation.") + + no_operations = [] + error_strings = self.validator.validate(no_operations) + self.assertEqual(error_strings[0], + "There are no operations defined. Specify at least 1 operation for the remodeler to execute.") + + def test_validate_operations(self): + invalid_operation_type = ["string"] + error_strings = self.validator.validate(invalid_operation_type) + self.assertEqual(error_strings[0], "Each operation must be defined in a dictionary: " + + "string is not a dictionary object.") + + invalid_operation_missing = [self.remodel_file[0].copy()] + del invalid_operation_missing[0]["description"] + error_strings = self.validator.validate(invalid_operation_missing) + self.assertEqual(error_strings[0], "Operation dictionary 1 is missing 'description'. " + + "Every operation dictionary must specify the type of operation, a description, " + + "and the operation parameters.") + + invalid_operation_name = [self.remodel_file[0].copy()] + invalid_operation_name[0]["operation"] = "unlisted_operation" + error_strings = self.validator.validate(invalid_operation_name) + self.assertEqual(error_strings[0], "unlisted_operation is not a known remodeler operation. " + + "See the documentation for valid operations.") + + def test_validate_parameters(self): + missing_parameter = [deepcopy(self.remodel_file[0])] + del missing_parameter[0]["parameters"]["column_names"] + error_strings = self.validator.validate(missing_parameter) + self.assertEqual(error_strings[0], + "Operation 1: The parameter column_names is missing. " + + "column_names is a required parameter of remove_columns.") + + missing_parameter_nested = [deepcopy(self.remodel_file[10])] + del missing_parameter_nested[0]["parameters"]["new_events"]["response"]["onset_source"] + error_strings = self.validator.validate(missing_parameter_nested) + self.assertEqual(error_strings[0], + "Operation 1: The field onset_source is missing in response new_events. " + + "onset_source is a required parameter of response new_events.") + + invalid_parameter = [deepcopy(self.remodel_file[0])] + invalid_parameter[0]["parameters"]["invalid"] = "invalid_value" + error_strings = self.validator.validate(invalid_parameter) + self.assertEqual(error_strings[0], "Operation 1: Operation parameters for remove_columns " + + "contain an unexpected field 'invalid'.") + + invalid_parameter_nested = [deepcopy(self.remodel_file[10])] + invalid_parameter_nested[0]["parameters"]["new_events"]["response"]["invalid"] = "invalid_value" + error_strings = self.validator.validate(invalid_parameter_nested) + self.assertEqual(error_strings[0], "Operation 1: Operation parameters for response " + + "new_events contain an unexpected field 'invalid'.") + + invalid_type = [deepcopy(self.remodel_file[0])] + invalid_type[0]["parameters"]["column_names"] = 0 + error_strings = self.validator.validate(invalid_type) + self.assertEqual(error_strings[0], "Operation 1: The value of column_names in the remove_columns operation " + + "should be array. 0 is not a array.") + + invalid_type_nested = [deepcopy(self.remodel_file[10])] + invalid_type_nested[0]["parameters"]["new_events"]["response"]["onset_source"] = {"key": "value"} + error_strings = self.validator.validate(invalid_type_nested) + self.assertEqual(error_strings[0], "Operation 1: The value of onset_source response new_events " + + "in the split_rows operation should be array. {'key': 'value'} is not a array.") + + empty_array = [deepcopy(self.remodel_file[0])] + empty_array[0]["parameters"]["column_names"] = [] + error_strings = self.validator.validate(empty_array) + self.assertEqual(error_strings[0], "Operation 1: The list in column_names in the remove_columns " + + "operation should have at least 1 item(s).") + + empty_array_nested = [deepcopy(self.remodel_file[5])] + empty_array_nested[0]["parameters"]["map_list"][0] = [] + error_strings = self.validator.validate(empty_array_nested) + self.assertEqual(error_strings[0], "Operation 1: The list in item 1 map_list in the remap_columns " + + "operation should have at least 1 item(s).") + + # invalid_value = [deepcopy(self.remodel_file[18])] + # invalid_value[0]["parameters"]["convert_to"] = "invalid_value" + # error_strings = validator.validate(invalid_value) + # self.assertEqual(error_strings[0], "Operation 1: Operation parameter convert_to, in the " + + # "convert_columns operation, contains and unexpected value. " + + # "Value should be one of ['str', 'int', 'float', 'fixed'].") + + # value_dependency = [deepcopy(self.remodel_file[18])] + # value_dependency[0]["parameters"]["convert_to"] = "fixed" + # error_strings = validator.validate(value_dependency) + # self.assertEqual(error_strings[0], "Operation 1: The parameter decimal_places is missing. " + + # " The decimal_places is a required parameter of convert_columns.") + + property_dependency = [deepcopy(self.remodel_file[1])] + del property_dependency[0]["parameters"]["factor_values"] + error_strings = self.validator.validate(property_dependency) + self.assertEqual(error_strings[0], "Operation 1: The parameter factor_names is missing: " + + "factor_names is a required parameter of factor_column when ['factor_values'] is specified.") + + double_item_in_array = [deepcopy(self.remodel_file[0])] + double_item_in_array[0]["parameters"]["column_names"] = ['response', 'response'] + error_strings = self.validator.validate(double_item_in_array) + self.assertEqual(error_strings[0], "Operation 1: The list in column_names in the remove_columns " + + "operation should only contain unique items.") + + double_item_in_array_nested = [deepcopy(self.remodel_file[10])] + double_item_in_array_nested[0]["parameters"]["new_events"]["response"]["copy_columns"] = \ + ['response', 'response'] + error_strings = self.validator.validate(double_item_in_array_nested) + self.assertEqual(error_strings[0], + "Operation 1: The list in copy_columns response new_events in the split_rows " + + "operation should only contain unique items.") + + def test_validate_parameter_data(self): + factor_column_validate = [deepcopy(self.remodel_file)[1]] + factor_column_validate[0]["parameters"]["factor_names"] = ["stopped"] + error_strings = self.validator.validate(factor_column_validate) + self.assertEqual(error_strings[0], "Operation 1 (factor_column): factor_names must be " + + "same length as factor_values") + + factor_hed_tags_validate = [deepcopy(self.remodel_file)[2]] + factor_hed_tags_validate[0]["parameters"]["query_names"] = ["correct"] + error_strings = self.validator.validate(factor_hed_tags_validate) + self.assertEqual(error_strings[0], "Operation 1 (factor_hed_tags): QueryNamesLengthBad: " + + "The query_names length 1 must be empty or equal to the queries length 2.") + + merge_consecutive_validate = [deepcopy(self.remodel_file)[4]] + merge_consecutive_validate[0]["parameters"]["match_columns"].append("trial_type") + error_strings = self.validator.validate(merge_consecutive_validate) + self.assertEqual(error_strings[0], "Operation 1 (merge_consecutive): column_name `trial_type` " + + "cannot not be a match_column.") + + remap_columns_validate_same_length = [deepcopy(self.remodel_file)[5]] + remap_columns_validate_same_length[0]["parameters"]["map_list"][0] = [""] + error_strings = self.validator.validate(remap_columns_validate_same_length) + self.assertEqual(error_strings[0], "Operation 1 (remap_columns): all map_list arrays must be of length 3.") + + remap_columns_validate_right_length = [deepcopy(self.remodel_file[5])] + remap_columns_validate_right_length[0]["parameters"]["map_list"] = \ + [["string1", "string2"], ["string3", "string4"]] + error_strings = self.validator.validate(remap_columns_validate_right_length) + self.assertEqual(error_strings[0], "Operation 1 (remap_columns): all map_list arrays must be of length 3.") + + remap_columns_integer_sources = [deepcopy(self.remodel_file[5])] + remap_columns_integer_sources[0]["parameters"]["integer_sources"] = ["unknown_column"] + error_strings = self.validator.validate(remap_columns_integer_sources) + self.assertEqual(error_strings[0], "Operation 1 (remap_columns): the integer_sources {'unknown_column'} " + + "are missing from source_columns.") diff --git a/tests/validator/test_def_validator.py b/tests/validator/test_def_validator.py index 7b690c3e..6051920b 100644 --- a/tests/validator/test_def_validator.py +++ b/tests/validator/test_def_validator.py @@ -180,9 +180,9 @@ def test_expand_def_tags(self): self.base_def_validator(basic_def_strings, expanded_def_strings_with_definition, expand_defs=True, shrink_defs=False, remove_definitions=False) - self.base_def_validator(basic_def_strings, basic_def_strings, + self.base_def_validator(basic_def_strings, basic_def_strings, expand_defs=False, shrink_defs=False, remove_definitions=False) - self.base_def_validator(basic_def_strings, basic_def_strings, + self.base_def_validator(basic_def_strings, basic_def_strings, expand_defs=False, shrink_defs=True, remove_definitions=False) self.base_def_validator(expanded_def_strings_with_definition, basic_def_strings, expand_defs=False, shrink_defs=True, diff --git a/tests/validator/test_sidecar_validator.py b/tests/validator/test_sidecar_validator.py index 6faa732c..919365e2 100644 --- a/tests/validator/test_sidecar_validator.py +++ b/tests/validator/test_sidecar_validator.py @@ -2,7 +2,6 @@ import os import io -from hed.errors import HedFileError, ValidationErrors from hed.models import ColumnMetadata, HedString, Sidecar from hed.validator import HedValidator from hed import schema @@ -98,7 +97,6 @@ def test_bad_structure_HED_in_ignored(self): } }, "HED": { - }, "OtherBad": { "subbad": ["thing1", "HED", "Other"] diff --git a/tests/validator/test_tag_validator.py b/tests/validator/test_tag_validator.py index 08b91510..65edb1cd 100644 --- a/tests/validator/test_tag_validator.py +++ b/tests/validator/test_tag_validator.py @@ -1,6 +1,6 @@ import unittest -from hed.errors.error_types import ValidationErrors, DefinitionErrors, TemporalErrors +from hed.errors.error_types import ValidationErrors, DefinitionErrors from tests.validator.test_tag_validator_base import TestValidatorBase from hed.schema.hed_schema_io import load_schema_version from functools import partial @@ -418,7 +418,7 @@ def test_no_duplicates(self): 'Purple-color/Purple', 'legalDuplicate': 'Item/Object/Man-made-object/VehicleTrain,(Item/Object/Man-made-object/VehicleTrain,' 'Event/Sensory-event)', - 'duplicateGroup': 'Sensory-event, (Sensory-event, Man-made-object/VehicleTrain),' + 'duplicateGroup': 'Sensory-event, (Sensory-event, Man-made-object/VehicleTrain),' '(Man-made-object/VehicleTrain, Sensory-event)', 'duplicateSubGroup': 'Sensory-event, (Event, (Sensory-event, Man-made-object/VehicleTrain)),' '(Event, (Man-made-object/VehicleTrain, Sensory-event))',