diff --git a/benchmarks/patches/mismatched_batch_size.patch b/benchmarks/patches/mismatched_batch_size.patch index 8185150dfcd..b30c26c5fd7 100644 --- a/benchmarks/patches/mismatched_batch_size.patch +++ b/benchmarks/patches/mismatched_batch_size.patch @@ -2,7 +2,7 @@ diff --git a/torchbenchmark/util/model.py b/torchbenchmark/util/model.py index 8593ba4c..57fef507 100644 --- a/torchbenchmark/util/model.py +++ b/torchbenchmark/util/model.py -@@ -182,6 +182,7 @@ class BenchmarkModel(metaclass=PostInitProcessor): +@@ -182,6 +182,8 @@ class BenchmarkModel(metaclass=PostInitProcessor): # use the device suggestion on CUDA inference tests, key should be either eval_batch_size or train_batch_size device_batch_size_key = f"{self.test}_batch_size"