From 0efbd3769a53f3d92d431e1f05baa8a4611e2a5a Mon Sep 17 00:00:00 2001 From: Liz Gehret Date: Tue, 30 Apr 2024 11:27:36 -0700 Subject: [PATCH] maint: changes for test failures from python 3.9 update --- q2_feature_classifier/_skl.py | 2 +- q2_feature_classifier/classifier.py | 4 ++++ q2_feature_classifier/tests/test_classifier.py | 4 ++-- q2_feature_classifier/tests/test_custom.py | 4 ++-- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/q2_feature_classifier/_skl.py b/q2_feature_classifier/_skl.py index 0ffb834..6021327 100644 --- a/q2_feature_classifier/_skl.py +++ b/q2_feature_classifier/_skl.py @@ -61,7 +61,7 @@ def num_leaf_nodes(self) -> int: {'__type__': 'feature_extraction.text.HashingVectorizer', 'analyzer': 'char_wb', 'n_features': 8192, - 'ngram_range': [7, 7], + 'ngram_range': (7, 7), 'alternate_sign': False}], ['classify', {'__type__': 'custom.LowMemoryMultinomialNB', diff --git a/q2_feature_classifier/classifier.py b/q2_feature_classifier/classifier.py index 9f5bbe0..9ec107f 100644 --- a/q2_feature_classifier/classifier.py +++ b/q2_feature_classifier/classifier.py @@ -86,6 +86,8 @@ def default(self, obj): def pipeline_from_spec(spec): def as_steps(obj): + if 'ngram_range' in obj: + obj['ngram_range'] = tuple(obj['ngram_range']) if '__type__' in obj: klass = _load_class(obj['__type__']) return klass(**{k: v for k, v in obj.items() if k != '__type__'}) @@ -332,6 +334,8 @@ def generic_fitter(reference_reads: DNAIterator, kwargs[param] = json.loads(kwargs[param]) except (json.JSONDecodeError, TypeError): pass + if param == 'feat_ext__ngram_range': + kwargs[param] = tuple(kwargs[param]) pipeline = pipeline_from_spec(spec) pipeline.set_params(**kwargs) if class_weight is not None: diff --git a/q2_feature_classifier/tests/test_classifier.py b/q2_feature_classifier/tests/test_classifier.py index a3814bc..ad08289 100644 --- a/q2_feature_classifier/tests/test_classifier.py +++ b/q2_feature_classifier/tests/test_classifier.py @@ -66,7 +66,7 @@ def test_populate_class_weight(self): {'__type__': 'feature_extraction.text.HashingVectorizer', 'analyzer': 'char_wb', 'n_features': 8192, - 'ngram_range': [8, 8], + 'ngram_range': (8, 8), 'alternate_sign': False}], ['classify', {'__type__': 'naive_bayes.GaussianNB'}]] @@ -117,7 +117,7 @@ def test_class_weight(self): {'__type__': 'feature_extraction.text.HashingVectorizer', 'analyzer': 'char_wb', 'n_features': 8192, - 'ngram_range': [8, 8], + 'ngram_range': (8, 8), 'alternate_sign': False}], ['classify', {'__type__': 'linear_model.LogisticRegression'}]] diff --git a/q2_feature_classifier/tests/test_custom.py b/q2_feature_classifier/tests/test_custom.py index e38ab74..b2779c0 100644 --- a/q2_feature_classifier/tests/test_custom.py +++ b/q2_feature_classifier/tests/test_custom.py @@ -39,7 +39,7 @@ def test_low_memory_multinomial_nb(self): {'__type__': 'feature_extraction.text.HashingVectorizer', 'analyzer': 'char', 'n_features': 8192, - 'ngram_range': [8, 8], + 'ngram_range': (8, 8), 'alternate_sign': False}], ['classify', {'__type__': 'custom.LowMemoryMultinomialNB', @@ -68,7 +68,7 @@ def test_chunked_hashing_vectorizer(self): params = {'analyzer': 'char', 'n_features': 8192, - 'ngram_range': [8, 8], + 'ngram_range': (8, 8), 'alternate_sign': False} hv = HashingVectorizer(**params) unchunked = hv.fit_transform(X)