diff --git a/Makefile b/Makefile index 56b633f1..09b0fda5 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ SHELL = /bin/bash .PHONY: style style: black . - flake8 . + flake8 . --pytest-parametrize-names-type=csv python -m isort . rm -f .coverage rm -f .coverage.* diff --git a/pyproject.toml b/pyproject.toml index f2c74a04..605f6a98 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ dev = [ # Install wtih pip install .[dev] or pip install -e '.[dev]' in zsh "coverage>=7.2.3", "flake8>=6.0.0", "pytest<=7.4.4", + "flake8-pytest-style>=1.3.2", "pytest-cov>=4.0.0", "pytest-mock==3.10.0", "pre-commit>=3.2.0", diff --git a/src/metrics/base.py b/src/metrics/base.py index 13d13552..61eb6bd9 100644 --- a/src/metrics/base.py +++ b/src/metrics/base.py @@ -40,7 +40,7 @@ def reset(self, *args, **kwargs): raise NotImplementedError @abstractmethod - def load_state_dict(self, state_dict: dict, *args, **kwargs): + def load_state_dict(self, state_dict: dict): """ Used to load the metric state. """ diff --git a/src/metrics/functional.py b/src/metrics/functional.py deleted file mode 100644 index c6a30995..00000000 --- a/src/metrics/functional.py +++ /dev/null @@ -1,50 +0,0 @@ -""" - -WORK IN PROGRESS!!! -""" - -import warnings -from typing import Optional, Union - -import torch - -from src.utils.cache import ExplanationsCache as EC -from src.utils.explanations import ( - BatchedCachedExplanations, - TensorExplanations, -) - - -def function_example( - model: torch.nn.Module, - train_dataset: torch.utils.data.Dataset, - top_k: int = 1, - explanations: Union[str, torch.Tensor, TensorExplanations, BatchedCachedExplanations] = "./", - batch_size: Optional[int] = 8, - device="cpu", - **kwargs, -): - """ - I've copied the existing code from the memory-less metric version here, that can be reused in the future here. - It will not be called "function_example" in the future. There will be many reusable functions, but every metric - will get a functional version here. - - :param model: - :param train_dataset: - :param top_k: - :param explanations: - :param batch_size: - :param device: - :param kwargs: - :return: - """ - if isinstance(explanations, str): - explanations = EC.load(path=explanations, device=device) - if explanations.batch_size != batch_size: - warnings.warn( - "Batch size mismatch between loaded explanations and passed batch size. The inferred batch " - "size will be used instead." - ) - batch_size = explanations[0] - elif isinstance(explanations, torch.Tensor): - explanations = TensorExplanations(explanations, batch_size=batch_size, device=device) diff --git a/src/metrics/localization/identical_class.py b/src/metrics/localization/identical_class.py index 63e06638..fbd14034 100644 --- a/src/metrics/localization/identical_class.py +++ b/src/metrics/localization/identical_class.py @@ -8,11 +8,11 @@ def __init__( self, model: torch.nn.Module, train_dataset: torch.utils.data.Dataset, - device, + device: str, *args, **kwargs, ): - super().__init__(model, train_dataset, device, *args, **kwargs) + super().__init__(model=model, train_dataset=train_dataset, device=device, *args, **kwargs) self.scores = [] def update(self, test_labels: torch.Tensor, explanations: torch.Tensor): @@ -27,8 +27,8 @@ def update(self, test_labels: torch.Tensor, explanations: torch.Tensor): top_one_xpl_indices = explanations.argmax(dim=1) top_one_xpl_targets = torch.stack([self.train_dataset[i][1] for i in top_one_xpl_indices]) - score = (test_labels == top_one_xpl_targets) * 1.0 - self.scores.append(score) + scores = (test_labels == top_one_xpl_targets) * 1.0 + self.scores.append(scores) def compute(self): """ diff --git a/src/metrics/randomization/model_randomization.py b/src/metrics/randomization/model_randomization.py index 90d5a669..e8070f8e 100644 --- a/src/metrics/randomization/model_randomization.py +++ b/src/metrics/randomization/model_randomization.py @@ -64,16 +64,17 @@ def __init__( train_dataset=self.train_dataset, ) - self.results = {"rank_correlations": []} + self.results = {"scores": []} + # TODO: create a validation utility function if isinstance(correlation_fn, str) and correlation_fn in correlation_functions: - self.correlation_measure = correlation_functions.get(correlation_fn) + self.corr_measure = correlation_functions.get(correlation_fn) elif callable(correlation_fn): - self.correlation_measure = correlation_fn + self.corr_measure = correlation_fn else: raise ValueError( f"Invalid correlation function: expected one of {list(correlation_functions.keys())} or" - f"a Callable, but got {self.correlation_measure}." + f"a Callable, but got {self.corr_measure}." ) def update( @@ -82,37 +83,39 @@ def update( explanations: torch.Tensor, explanation_targets: torch.Tensor, ): - device = "cuda" if torch.cuda.is_available() else "cpu" rand_explanations = self.explain_fn( - model=self.rand_model, test_tensor=test_data, explanation_targets=explanation_targets, device=device + model=self.rand_model, test_tensor=test_data, explanation_targets=explanation_targets, device=self.device ) - corrs = self.correlation_measure(explanations, rand_explanations) - self.results["rank_correlations"].append(corrs) + corrs = self.corr_measure(explanations, rand_explanations) + self.results["scores"].append(corrs) def compute(self): - return torch.cat(self.results["rank_correlations"]).mean() + return torch.cat(self.results["scores"]).mean() def reset(self): - self.results = {"rank_correlations": []} + self.results = {"scores": []} self.generator.manual_seed(self.seed) self.rand_model = self._randomize_model(self.model) def state_dict(self): state_dict = { "results_dict": self.results, - "random_model_state_dict": self.model.state_dict(), - "seed": self.seed, - "generator_state": self.generator.get_state(), - "explain_fn": self.explain_fn, + "rnd_model": self.model.state_dict(), + # Note to Galip: I suggest removing this, because those are explicitly passed + # as init arguments and this is an unexpected side effect if we overwrite them. + # Plus, we only ever use seed to randomize the model once. + # "seed": self.seed, + # "generator_state": self.generator.get_state(), + # "explain_fn": self.explain_fn, } return state_dict def load_state_dict(self, state_dict: dict): self.results = state_dict["results_dict"] - self.seed = state_dict["seed"] - self.explain_fn = state_dict["explain_fn"] - self.rand_model.load_state_dict(state_dict["random_model_state_dict"]) - self.generator.set_state(state_dict["generator_state"]) + self.rand_model.load_state_dict(state_dict["rnd_model"]) + # self.seed = state_dict["seed"] + # self.explain_fn = state_dict["explain_fn"] + # self.generator.set_state(state_dict["generator_state"]) def _randomize_model(self, model: torch.nn.Module): rand_model = copy.deepcopy(model) diff --git a/src/utils/datasets/utils.py b/src/utils/datasets/utils.py index 6f9102fb..4043adc4 100644 --- a/src/utils/datasets/utils.py +++ b/src/utils/datasets/utils.py @@ -32,7 +32,7 @@ def load_datasets(dataset_name, dataset_type, **kwparams): elif dataset_type == "mark": ds = MarkDataset(ds, only_train=only_train) evalds = MarkDataset(evalds, only_train=only_train) - assert ds is not None and evalds is not None + # assert ds is not None and evalds is not None return ds, evalds diff --git a/tests/conftest.py b/tests/conftest.py index 349c79f5..7f95f81c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,24 +14,24 @@ RANDOM_SEED = 42 -@pytest.fixture() +@pytest.fixture def load_dataset(): x = torch.stack([torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2)]) y = torch.tensor([0, 1, 0]).long() return torch.utils.data.TensorDataset(x, y) -@pytest.fixture() +@pytest.fixture def load_rand_tensor(): return torch.rand(10, 10).float() -@pytest.fixture() +@pytest.fixture def load_rand_test_predictions(): return torch.randint(0, 10, (10000,)) -@pytest.fixture() +@pytest.fixture def load_mnist_model(): """Load a pre-trained LeNet classification model (architecture at quantus/helpers/models).""" model = LeNet() @@ -39,13 +39,13 @@ def load_mnist_model(): return model -@pytest.fixture() +@pytest.fixture def load_init_mnist_model(): """Load a not trained LeNet classification model (architecture at quantus/helpers/models).""" return LeNet() -@pytest.fixture() +@pytest.fixture def load_mnist_dataset(): """Load a batch of MNIST digits: inputs and outputs to use for testing.""" x_batch = ( @@ -58,7 +58,7 @@ def load_mnist_dataset(): return dataset -@pytest.fixture() +@pytest.fixture def load_mnist_dataloader(): """Load a batch of MNIST digits: inputs and outputs to use for testing.""" x_batch = ( @@ -72,26 +72,26 @@ def load_mnist_dataloader(): return dataloader -@pytest.fixture() +@pytest.fixture def load_mnist_test_samples_1(): return torch.load("tests/assets/mnist_test_suite_1/test_dataset.pt") -@pytest.fixture() +@pytest.fixture def load_mnist_test_labels_1(): return torch.load("tests/assets/mnist_test_suite_1/test_labels.pt") -@pytest.fixture() +@pytest.fixture def load_mnist_explanations_1(): return torch.load("tests/assets/mnist_test_suite_1/mnist_SimilarityInfluence_tda.pt") -@pytest.fixture() +@pytest.fixture def torch_cross_entropy_loss_object(): return torch.nn.CrossEntropyLoss() -@pytest.fixture() +@pytest.fixture def torch_sgd_optimizer(): return functools.partial(torch.optim.SGD, lr=0.01, momentum=0.9) diff --git a/tests/explainers/test_aggregators.py b/tests/explainers/test_aggregators.py index 0be36b35..d166ca27 100644 --- a/tests/explainers/test_aggregators.py +++ b/tests/explainers/test_aggregators.py @@ -6,17 +6,15 @@ @pytest.mark.aggregators @pytest.mark.parametrize( - "test_id, dataset, explanations", + "test_id, explanations", [ ( "mnist", - "load_mnist_dataset", "load_mnist_explanations_1", ), ], ) -def test_sum_aggregator(test_id, dataset, explanations, request): - dataset = request.getfixturevalue(dataset) +def test_sum_aggregator(test_id, explanations, request): explanations = request.getfixturevalue(explanations) aggregator = SumAggregator() aggregator.update(explanations) @@ -26,17 +24,15 @@ def test_sum_aggregator(test_id, dataset, explanations, request): @pytest.mark.aggregators @pytest.mark.parametrize( - "test_id, dataset, explanations", + "test_id, explanations", [ ( "mnist", - "load_mnist_dataset", "load_mnist_explanations_1", ), ], ) -def test_abs_aggregator(test_id, dataset, explanations, request): - dataset = request.getfixturevalue(dataset) +def test_abs_aggregator(test_id, explanations, request): explanations = request.getfixturevalue(explanations) aggregator = AbsSumAggregator() aggregator.update(explanations) diff --git a/tests/explainers/test_base_explainer.py b/tests/explainers/test_base_explainer.py index 113f0f7e..d4978c06 100644 --- a/tests/explainers/test_base_explainer.py +++ b/tests/explainers/test_base_explainer.py @@ -10,19 +10,21 @@ @pytest.mark.explainers @pytest.mark.parametrize( - "test_id, model, dataset, method_kwargs", + "test_id, model, dataset, explanations, method_kwargs", [ ( "mnist", "load_mnist_model", "load_mnist_dataset", + "load_mnist_explanations_1", {"layers": "relu_4", "similarity_metric": cosine_similarity}, ), ], ) -def test_base_explain_self_influence(test_id, model, dataset, method_kwargs, mocker, request): +def test_base_explain_self_influence(test_id, model, dataset, explanations, method_kwargs, mocker, request): model = request.getfixturevalue(model) dataset = request.getfixturevalue(dataset) + explanations = request.getfixturevalue(explanations) BaseExplainer.__abstractmethods__ = set() explainer = BaseExplainer( @@ -34,9 +36,9 @@ def test_base_explain_self_influence(test_id, model, dataset, method_kwargs, moc **method_kwargs, ) - # Patch the method + # Patch the method, because BaseExplainer has an abstract explain method. def mock_explain(test: torch.Tensor, targets: Optional[Union[List[int], torch.Tensor]] = None): - return torch.ones((test.shape[0], dataset.__len__())) + return explanations mocker.patch.object(explainer, "explain", wraps=mock_explain) diff --git a/tests/explainers/test_explainers.py b/tests/explainers/test_explainers.py deleted file mode 100644 index 3fb3805b..00000000 --- a/tests/explainers/test_explainers.py +++ /dev/null @@ -1,77 +0,0 @@ -import os - -import pytest -import torch - -from src.explainers.wrappers.captum_influence import ( - CaptumSimilarity, - captum_similarity_explain, -) -from src.utils.functions.similarities import cosine_similarity - - -@pytest.mark.explainers -@pytest.mark.parametrize( - "test_id, model, dataset, test_tensor, test_labels, method_kwargs, explanations", - [ - ( - "mnist", - "load_mnist_model", - "load_mnist_dataset", - "load_mnist_test_samples_1", - "load_mnist_test_labels_1", - {"layers": "relu_4", "similarity_metric": cosine_similarity}, - "load_mnist_explanations_1", - ), - ], -) -def test_explain_functional(test_id, model, dataset, test_tensor, test_labels, method_kwargs, explanations, request): - model = request.getfixturevalue(model) - dataset = request.getfixturevalue(dataset) - test_tensor = request.getfixturevalue(test_tensor) - test_labels = request.getfixturevalue(test_labels) - explanations_exp = request.getfixturevalue(explanations) - explanations = captum_similarity_explain( - model, - "test_id", - os.path.join("./cache", "test_id"), - test_tensor, - test_labels, - dataset, - device="cpu", - init_kwargs=method_kwargs, - ) - assert torch.allclose(explanations, explanations_exp), "Training data attributions are not as expected" - - -@pytest.mark.explainers -@pytest.mark.parametrize( - "test_id, model, dataset, explanations, test_tensor, test_labels, method_kwargs", - [ - ( - "mnist", - "load_mnist_model", - "load_mnist_dataset", - "load_mnist_explanations_1", - "load_mnist_test_samples_1", - "load_mnist_test_labels_1", - {"layers": "relu_4", "similarity_metric": cosine_similarity}, - ), - ], -) -def test_explain_stateful(test_id, model, dataset, explanations, test_tensor, test_labels, method_kwargs, request): - model = request.getfixturevalue(model) - dataset = request.getfixturevalue(dataset) - test_tensor = request.getfixturevalue(test_tensor) - test_labels = request.getfixturevalue(test_labels) - explanations_exp = request.getfixturevalue(explanations) - explainer = CaptumSimilarity( - model=model, - model_id="test_id", - cache_dir=os.path.join("./cache", "test_id"), - train_dataset=dataset, - device="cpu", - **method_kwargs, - ) - explanations = explainer.explain(test_tensor) - assert torch.allclose(explanations, explanations_exp), "Training data attributions are not as expected" diff --git a/tests/explainers/test_self_influence.py b/tests/explainers/test_self_influence.py deleted file mode 100644 index 931f3109..00000000 --- a/tests/explainers/test_self_influence.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import shutil -from collections import OrderedDict - -import pytest -import torch -from torch.utils.data import TensorDataset - -from src.explainers.wrappers.captum_influence import ( - CaptumSimilarity, - captum_similarity_self_influence, -) -from src.utils.functions.similarities import dot_product_similarity - - -@pytest.mark.self_influence -@pytest.mark.parametrize( - "test_id, init_kwargs", - [ - ( - "random_data", - {"layers": "identity", "similarity_metric": dot_product_similarity}, - ), - ], -) -def test_self_influence(test_id, init_kwargs, request): - model = torch.nn.Sequential(OrderedDict([("identity", torch.nn.Identity())])) - - torch.random.manual_seed(42) - X = torch.randn(100, 200) - y = torch.randint(0, 10, (100,)) - rand_dataset = TensorDataset(X, y) - - self_influence_rank_functional = captum_similarity_self_influence( - model=model, - model_id="0", - cache_dir="temp_captum", - train_dataset=rand_dataset, - init_kwargs=init_kwargs, - device="cpu", - ) - - explainer_obj = CaptumSimilarity( - model=model, - model_id="1", - cache_dir="temp_captum2", - train_dataset=rand_dataset, - device="cpu", - **init_kwargs, - ) - self_influence_rank_stateful = explainer_obj.self_influence() - - if os.path.isdir("temp_captum2"): - shutil.rmtree(os.path.join(os.getcwd(), "temp_captum2")) - if os.path.isdir("temp_captum"): - shutil.rmtree(os.path.join(os.getcwd(), "temp_captum")) - - assert torch.allclose(self_influence_rank_functional, torch.linalg.norm(X, dim=-1).argsort()) - assert torch.allclose(self_influence_rank_functional, self_influence_rank_stateful) diff --git a/tests/explainers/wrappers/__init__.py b/tests/explainers/wrappers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/explainers/wrappers/test_captum_influence.py b/tests/explainers/wrappers/test_captum_influence.py new file mode 100644 index 00000000..e88c353d --- /dev/null +++ b/tests/explainers/wrappers/test_captum_influence.py @@ -0,0 +1,151 @@ +import os +import shutil +from collections import OrderedDict + +import pytest +import torch +from torch.utils.data import TensorDataset + +from src.explainers.wrappers.captum_influence import ( + CaptumSimilarity, + captum_similarity_explain, + captum_similarity_self_influence, +) +from src.utils.functions.similarities import ( + cosine_similarity, + dot_product_similarity, +) + + +@pytest.mark.self_influence +@pytest.mark.parametrize( + "test_id, init_kwargs", + [ + ( + "random_data", + {"layers": "identity", "similarity_metric": dot_product_similarity}, + ), + ], +) +# TODO: I think a good naming convention is "test_..." or "test_...". +def test_self_influence(test_id, init_kwargs, request): + # TODO: this should be a fixture. + model = torch.nn.Sequential(OrderedDict([("identity", torch.nn.Identity())])) + + # TODO: those should be fixtures. We (most of the time) don't generate random data in tests. + torch.random.manual_seed(42) + X = torch.randn(100, 200) + y = torch.randint(0, 10, (100,)) + rand_dataset = TensorDataset(X, y) + + # TODO: One test should test one thing. This is test 1, .... + self_influence_rank_functional = captum_similarity_self_influence( + model=model, + model_id="0", + cache_dir="temp_captum", + train_dataset=rand_dataset, + init_kwargs=init_kwargs, + device="cpu", + ) + + # TODO: ...this is test 2, unless we want to compare that the outputs are the same. + # TODO: If we want to test that the outputs are the same, we should have a separate test for that. + explainer_obj = CaptumSimilarity( + model=model, + model_id="1", + cache_dir="temp_captum2", + train_dataset=rand_dataset, + device="cpu", + **init_kwargs, + ) + + # TODO: self_influence is defined in BaseExplainer - there is a test in test_base_explainer for that. + # TODO: here we then specifically test self_influence for CaptumSimilarity and should make it explicit in the name. + self_influence_rank_stateful = explainer_obj.self_influence() + + # TODO: we check "temp_captum2" but then remove os.path.join(os.getcwd(), "temp_captum2")? + # TODO: is there a reason to fear that the "temp_captum2" folder is not in os.getcwd()? + if os.path.isdir("temp_captum2"): + shutil.rmtree(os.path.join(os.getcwd(), "temp_captum2")) + if os.path.isdir("temp_captum"): + shutil.rmtree(os.path.join(os.getcwd(), "temp_captum")) + + # TODO: what if we pass a non-identity model? Then we don't expect torch.linalg.norm(X, dim=-1).argsort() + # TODO: let's put expectations in the parametrisation of tests. We want to test different scenarios, + # and not some super-specific case. This specific case definitely can be tested as well. + assert torch.allclose(self_influence_rank_functional, torch.linalg.norm(X, dim=-1).argsort()) + # TODO: I think it is best to stick to a single assertion per test (source: Google) + assert torch.allclose(self_influence_rank_functional, self_influence_rank_stateful) + + +@pytest.mark.explainers +@pytest.mark.parametrize( + "test_id, model, dataset, explanations, test_tensor, test_labels, method_kwargs", + [ + ( + "mnist", + "load_mnist_model", + "load_mnist_dataset", + "load_mnist_explanations_1", + "load_mnist_test_samples_1", + "load_mnist_test_labels_1", + {"layers": "relu_4", "similarity_metric": cosine_similarity}, + ), + ], +) +# TODO: I think a good naming convention is "test_..." or "test_...". +# TODO: I would call it test_captum_similarity, because it is a test for the CaptumSimilarity class. +# TODO: We could also make the explainer type (e.g. CaptumSimilarity) a param, then it would be test_explainer or something. +def test_explain_stateful(test_id, model, dataset, explanations, test_tensor, test_labels, method_kwargs, request): + model = request.getfixturevalue(model) + dataset = request.getfixturevalue(dataset) + test_tensor = request.getfixturevalue(test_tensor) + test_labels = request.getfixturevalue(test_labels) + explanations_exp = request.getfixturevalue(explanations) + + explainer = CaptumSimilarity( + model=model, + model_id="test_id", + cache_dir=os.path.join("./cache", "test_id"), + train_dataset=dataset, + device="cpu", + **method_kwargs, + ) + # TODO: activations folder clean-up + + explanations = explainer.explain(test_tensor) + assert torch.allclose(explanations, explanations_exp), "Training data attributions are not as expected" + + +@pytest.mark.explainers +@pytest.mark.parametrize( + "test_id, model, dataset, test_tensor, test_labels, method_kwargs, explanations", + [ + ( + "mnist", + "load_mnist_model", + "load_mnist_dataset", + "load_mnist_test_samples_1", + "load_mnist_test_labels_1", + {"layers": "relu_4", "similarity_metric": cosine_similarity}, + "load_mnist_explanations_1", + ), + ], +) +def test_explain_functional(test_id, model, dataset, test_tensor, test_labels, method_kwargs, explanations, request): + model = request.getfixturevalue(model) + dataset = request.getfixturevalue(dataset) + test_tensor = request.getfixturevalue(test_tensor) + test_labels = request.getfixturevalue(test_labels) + explanations_exp = request.getfixturevalue(explanations) + explanations = captum_similarity_explain( + model, + "test_id", + os.path.join("./cache", "test_id"), + test_tensor, + test_labels, + dataset, + device="cpu", + init_kwargs=method_kwargs, + ) + assert torch.allclose(explanations, explanations_exp), "Training data attributions are not as expected" diff --git a/tests/metrics/test_localization_metrics.py b/tests/metrics/test_localization_metrics.py index 92618cca..f35c1906 100644 --- a/tests/metrics/test_localization_metrics.py +++ b/tests/metrics/test_localization_metrics.py @@ -5,7 +5,7 @@ @pytest.mark.localization_metrics @pytest.mark.parametrize( - "test_id, model, dataset, test_labels, batch_size, explanations, expected_score", + "test_id,model,dataset,test_labels,batch_size,explanations,expected_score", [ ( "mnist", diff --git a/tests/metrics/test_randomization_metrics.py b/tests/metrics/test_randomization_metrics.py index e65429a1..7716e581 100644 --- a/tests/metrics/test_randomization_metrics.py +++ b/tests/metrics/test_randomization_metrics.py @@ -48,8 +48,7 @@ def test_randomization_metric_functional( # Can we come up with a special attributor that gets exactly 0 score? metric.update(test_data=test_data, explanations=tda, explanation_targets=test_labels) out = metric.compute() - assert (out.item() >= -1.0) and (out.item() <= 1.0), "Test failed." - assert isinstance(out, torch.Tensor), "Output is not a tensor." + assert (out.item() >= -1.0) & (out.item() <= 1.0), "Test failed." @pytest.mark.randomization_metrics @@ -63,7 +62,7 @@ def test_randomization_metric_functional( ), ], ) -def test_model_randomization(test_id, model, dataset, request): +def test_randomization_metric_model_randomization(test_id, model, dataset, request): model = request.getfixturevalue(model) dataset = request.getfixturevalue(dataset) metric = ModelRandomizationMetric(model=model, train_dataset=dataset, explain_fn=lambda x: x, seed=42, device="cpu") diff --git a/tox.ini b/tox.ini index 0ba7947f..4fdee0da 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,7 @@ [flake8] max-line-length = 127 max-complexity = 10 +pytest-parametrize-names-type = csv ignore = E203 [testenv]