From 2defe7921d0f7fc80636cba06239b68cb22c476e Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Wed, 28 Feb 2024 11:38:12 -0300 Subject: [PATCH 1/4] Remove leftover `self` argument. --- benchmarks/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/util.py b/benchmarks/util.py index 21f3736d91a..56896b9c1c8 100644 --- a/benchmarks/util.py +++ b/benchmarks/util.py @@ -158,7 +158,7 @@ def get_torchbench_test_name(test): return {"train": "training", "eval": "inference"}[test] -def find_near_file(self, names): +def find_near_file(names): """Find a file near the current directory. Looks for `names` in the current directory, up to its two direct parents. From 89981fb62c45b1cee06379688d38d9f0f8b31211 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Thu, 29 Feb 2024 10:04:13 -0300 Subject: [PATCH 2/4] More fixes. --- benchmarks/torchbench_model.py | 8 ++++++-- benchmarks/util.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/benchmarks/torchbench_model.py b/benchmarks/torchbench_model.py index c791cd8ca41..5b0cb969356 100644 --- a/benchmarks/torchbench_model.py +++ b/benchmarks/torchbench_model.py @@ -4,7 +4,6 @@ import importlib import logging import os -from os.path import abspath, exists import sys import torch import torch.amp @@ -306,7 +305,7 @@ def batch_size(self): def load_benchmark(self): cant_change_batch_size = ( not getattr(self.benchmark_cls(), "ALLOW_CUSTOMIZE_BSIZE", True) or - model_name in config_data()["dont_change_batch_size"]) + self.model_name in config_data()["dont_change_batch_size"]) if cant_change_batch_size: self.benchmark_experiment.batch_size = None @@ -318,6 +317,11 @@ def load_benchmark(self): elif self.is_inference( ) and self.model_name in self.batch_size["inference"]: batch_size = self.batch_size["inference"][self.model_name] + else: + # This should work, since TorchBench relies on class variables: + # DEFAULT_TRAIN_BSIZE and DEFAULT_EVAL_BSIZE for setting the default + # batch size, instead of default arguments. + batch_size = None # workaround "RuntimeError: not allowed to set torch.backends.cudnn flags" # torch.backends.__allow_nonbracketed_mutation_flag = True diff --git a/benchmarks/util.py b/benchmarks/util.py index 56896b9c1c8..ce56ceb4143 100644 --- a/benchmarks/util.py +++ b/benchmarks/util.py @@ -3,7 +3,7 @@ import logging import numpy as np import os -from os.path import abspath +from os.path import abspath, exists import random import subprocess import torch From 7bfab7faae416ece06e75d62a9f3b6416345eea2 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Thu, 29 Feb 2024 10:07:24 -0300 Subject: [PATCH 3/4] Use user provided `batch_size` as default. --- benchmarks/torchbench_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/torchbench_model.py b/benchmarks/torchbench_model.py index 5b0cb969356..55ef5cb67d3 100644 --- a/benchmarks/torchbench_model.py +++ b/benchmarks/torchbench_model.py @@ -321,7 +321,7 @@ def load_benchmark(self): # This should work, since TorchBench relies on class variables: # DEFAULT_TRAIN_BSIZE and DEFAULT_EVAL_BSIZE for setting the default # batch size, instead of default arguments. - batch_size = None + batch_size = self.benchmark_experiment.batch_size # workaround "RuntimeError: not allowed to set torch.backends.cudnn flags" # torch.backends.__allow_nonbracketed_mutation_flag = True From 15b6e79e6f979b2905d5d4fe4774149638e4e5c4 Mon Sep 17 00:00:00 2001 From: Yukio Siraichi Date: Thu, 29 Feb 2024 10:09:43 -0300 Subject: [PATCH 4/4] Better `batch_size` selection. --- benchmarks/torchbench_model.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/benchmarks/torchbench_model.py b/benchmarks/torchbench_model.py index 55ef5cb67d3..999a251f2c9 100644 --- a/benchmarks/torchbench_model.py +++ b/benchmarks/torchbench_model.py @@ -310,18 +310,14 @@ def load_benchmark(self): if cant_change_batch_size: self.benchmark_experiment.batch_size = None - if self.benchmark_experiment.batch_size is not None: - batch_size = self.benchmark_experiment.batch_size - elif self.is_training() and self.model_name in self.batch_size["training"]: - batch_size = self.batch_size["training"][self.model_name] - elif self.is_inference( - ) and self.model_name in self.batch_size["inference"]: - batch_size = self.batch_size["inference"][self.model_name] - else: - # This should work, since TorchBench relies on class variables: - # DEFAULT_TRAIN_BSIZE and DEFAULT_EVAL_BSIZE for setting the default - # batch size, instead of default arguments. - batch_size = self.benchmark_experiment.batch_size + batch_size = self.benchmark_experiment.batch_size + + if batch_size is None: + if self.is_training() and self.model_name in self.batch_size["training"]: + batch_size = self.batch_size["training"][self.model_name] + elif self.is_inference( + ) and self.model_name in self.batch_size["inference"]: + batch_size = self.batch_size["inference"][self.model_name] # workaround "RuntimeError: not allowed to set torch.backends.cudnn flags" # torch.backends.__allow_nonbracketed_mutation_flag = True