From b6bcf03f961835cc24cb6e53f034389e93b5e396 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Wed, 2 Oct 2024 16:48:05 -0300 Subject: [PATCH] [benchmarks] Default to functionalization disabled. (#8093) --- benchmarks/benchmark_experiment.py | 18 ++++- benchmarks/experiment_runner.py | 5 ++ test/benchmarks/run_torchbench_tests.sh | 26 +++---- test/benchmarks/test_benchmark_experiment.py | 5 +- test/benchmarks/test_experiment_runner.py | 72 +++++++++++++------- 5 files changed, 86 insertions(+), 40 deletions(-) diff --git a/benchmarks/benchmark_experiment.py b/benchmarks/benchmark_experiment.py index d7159c0bb5b..a82490d373b 100644 --- a/benchmarks/benchmark_experiment.py +++ b/benchmarks/benchmark_experiment.py @@ -27,6 +27,7 @@ def list_experiment_configs(self): "torch_xla2": [None], # options only apply to torch_xla2 "test": ["eval", "train"], "keep_model_data_on_cuda": [False], + "enable_functionalization": [False], } # Apply command line choices. @@ -49,6 +50,10 @@ def list_experiment_configs(self): config_choices["keep_model_data_on_cuda"] = [ self._args.keep_model_data_on_cuda ] + if self._args.enable_functionalization: + config_choices["enable_functionalization"] = [ + self._args.enable_functionalization + ] # Expand experiment configs and add env vars. logger.debug(f"Expand experiment configs") @@ -136,6 +141,7 @@ def load_experiment(self, batch_size = experiment_config.get("batch_size", self._args.batch_size) torch_xla2 = experiment_config["torch_xla2"] keep_model_data_on_cuda = experiment_config["keep_model_data_on_cuda"] + enable_functionalization = experiment_config["enable_functionalization"] return BenchmarkExperiment( accelerator=accelerator, xla=xla, @@ -144,14 +150,17 @@ def load_experiment(self, torch_xla2=torch_xla2, keep_model_data_on_cuda=keep_model_data_on_cuda, test=test, - batch_size=batch_size) + batch_size=batch_size, + enable_functionalization=enable_functionalization, + ) class BenchmarkExperiment: def __init__(self, accelerator: str, xla: Optional[str], xla_flags: Optional[str], dynamo: str, torch_xla2: bool, - keep_model_data_on_cuda: bool, test: str, batch_size: str): + keep_model_data_on_cuda: bool, test: str, batch_size: str, + enable_functionalization: bool): self.accelerator = accelerator self.xla = xla self.xla_flags = xla_flags @@ -161,6 +170,7 @@ def __init__(self, accelerator: str, xla: Optional[str], self.test = test self.batch_size = batch_size self.accelerator_model = get_accelerator_model(self.accelerator) + self.enable_functionalization = enable_functionalization def update_process_env(self, process_env: Dict[str, str]): @@ -192,6 +202,9 @@ def update_process_env(self, process_env: Dict[str, str]): if self.xla_flags: process_env["XLA_FLAGS"] = self.xla_flags + if not self.enable_functionalization: + process_env["XLA_DISABLE_FUNCTIONALIZATION"] = "1" + def get_device(self): if self.torch_xla2: # Initiate the model in CPU first for xla2. We will move the model to jax device later. @@ -236,4 +249,5 @@ def to_dict(self): d["keep_model_data_on_cuda"] = self.keep_model_data_on_cuda d["test"] = self.test d["batch_size"] = self.batch_size + d["enable_functionalization"] = self.enable_functionalization return d diff --git a/benchmarks/experiment_runner.py b/benchmarks/experiment_runner.py index 815b00ec8bc..42f5e248293 100644 --- a/benchmarks/experiment_runner.py +++ b/benchmarks/experiment_runner.py @@ -832,6 +832,11 @@ def __str__(self): help="""ID of the benchmark suite partition to be run. Used to divide CI tasks""", ) + parser.add_argument( + "--enable-functionalization", + action="store_true", + help="Enable the functionalization layer by default", + ) parser.add_argument( "--dry-run", action="store_true", diff --git a/test/benchmarks/run_torchbench_tests.sh b/test/benchmarks/run_torchbench_tests.sh index 17cd61b5b29..560299e8486 100755 --- a/test/benchmarks/run_torchbench_tests.sh +++ b/test/benchmarks/run_torchbench_tests.sh @@ -81,18 +81,20 @@ function run_tests { # export GPU_NUM_DEVICES=$num_devices # pjrt_device="CUDA" # fi - for model in "${TORCHBENCH_MODELS[@]}"; do - echo "testing model: $model" - PJRT_DEVICE=$pjrt_device python -u benchmarks/experiment_runner.py \ - --suite-name=torchbench \ - --experiment-config='{"accelerator":"'"$pjrt_device"'","xla":"PJRT","dynamo":"openxla","test":"eval","torch_xla2":null,"xla_flags":null,"keep_model_data_on_cuda":false}' \ - --model-config='{"model_name":"'"$model"'"}' - if [ $? -ne 0 ]; then - echo "ERROR: Failed to test $model. Exiting with failure." >&2 - overall_status=1 - else - success_count=$((success_count + 1)) - fi + for functionalization in "true" "false"; do + for model in "${TORCHBENCH_MODELS[@]}"; do + echo "testing model: $model" + PJRT_DEVICE=$pjrt_device python -u benchmarks/experiment_runner.py \ + --suite-name=torchbench \ + --experiment-config='{"accelerator":"'"$pjrt_device"'","xla":"PJRT","dynamo":"openxla","test":"eval","torch_xla2":null,"xla_flags":null,"keep_model_data_on_cuda":false,"enable_functionalization": '"$functionalization"'}' \ + --model-config='{"model_name":"'"$model"'"}' + if [ $? -ne 0 ]; then + echo "ERROR: Failed to test $model. Exiting with failure." >&2 + overall_status=1 + else + success_count=$((success_count + 1)) + fi + done done return $overall_status } diff --git a/test/benchmarks/test_benchmark_experiment.py b/test/benchmarks/test_benchmark_experiment.py index afc3419e8b8..2c5efcd0583 100644 --- a/test/benchmarks/test_benchmark_experiment.py +++ b/test/benchmarks/test_benchmark_experiment.py @@ -7,9 +7,9 @@ class BenchmarkExperimentTest(unittest.TestCase): def test_to_dict(self): be = BenchmarkExperiment("cpu", "PJRT", "some xla_flags", "openxla", None, - False, "train", "123") + False, "train", "123", False) actual = be.to_dict() - self.assertEqual(9, len(actual)) + self.assertEqual(10, len(actual)) self.assertEqual("cpu", actual["accelerator"]) self.assertTrue("accelerator_model" in actual) self.assertEqual("PJRT", actual["xla"]) @@ -19,6 +19,7 @@ def test_to_dict(self): self.assertEqual(False, actual["keep_model_data_on_cuda"]) self.assertEqual("train", actual["test"]) self.assertEqual("123", actual["batch_size"]) + self.assertEqual(False, actual["enable_functionalization"]) if __name__ == '__main__': diff --git a/test/benchmarks/test_experiment_runner.py b/test/benchmarks/test_experiment_runner.py index 89765b045c3..4ce4167d0e4 100644 --- a/test/benchmarks/test_experiment_runner.py +++ b/test/benchmarks/test_experiment_runner.py @@ -29,10 +29,10 @@ def test_dummy_dry_run(self): expected_in_stderr = [ "Number of selected experiment configs: 4", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr) @@ -57,10 +57,10 @@ def test_dummy_dry_run_cuda(self): expected_in_stderr = [ "Number of selected experiment configs: 4", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr) @@ -85,8 +85,8 @@ def test_dummy_dry_run_inductor_cuda(self): expected_in_stderr = [ "Number of selected experiment configs: 2", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr) @@ -112,10 +112,10 @@ def test_dummy_openxla_train_cuda(self): expected_in_stderr = [ "Number of selected experiment configs: 4", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr) @@ -137,14 +137,14 @@ def test_dummy_dynamo_none_cuda(self): expected_in_stderr = [ "Number of selected experiment configs: 8", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": null, \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": null, \"xla_flags\": null, \"dynamo\": \"inductor\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": false}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr) @@ -168,8 +168,32 @@ def test_dummy_dry_run_cuda_with_keep_model_data_on_cuda(self): expected_in_stderr = [ "Number of selected experiment configs: 2", "Number of selected model configs: 1", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": true}", - "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": true}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": true, \"enable_functionalization\": false}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cuda\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": true, \"enable_functionalization\": false}", + ] + for expected in expected_in_stderr: + self.assertIn(expected, child.stderr) + + def test_dummy_dry_run_with_functionalization(self): + child = subprocess.run([ + "python", + EXPERIMENT_RUNNER_PY, + "--dynamo=openxla", + "--xla=PJRT", + "--test=eval", + "--test=train", + "--suite-name=dummy", + "--accelerator=cpu", + "--enable-functionalization", + "--dry-run", + ], + capture_output=True, + text=True) + expected_in_stderr = [ + "Number of selected experiment configs: 2", + "Number of selected model configs: 1", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"eval\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": true}", + "--model-config={\"model_name\": \"dummy\"} --experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"torch_xla2\": null, \"test\": \"train\", \"keep_model_data_on_cuda\": false, \"enable_functionalization\": true}", ] for expected in expected_in_stderr: self.assertIn(expected, child.stderr)