Skip to content

Commit

Permalink
Fix nb tests (#2230)
Browse files Browse the repository at this point in the history
* Fix nb tests

* INclude bnb import

* pprint

* Try this time

* greater than zero

* Fix test

* bnb

* Clean
  • Loading branch information
muellerzr authored Dec 11, 2023
1 parent 694f2e2 commit eff30e2
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 19 deletions.
2 changes: 1 addition & 1 deletion src/accelerate/launchers.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def train(*args):
)
# Check for specific libraries known to initialize CUDA that users constantly use
problematic_imports = are_libraries_initialized("bitsandbytes")
if len(problematic_imports) > 1:
if len(problematic_imports) > 0:
err = (
"Could not start distributed process. Libraries known to initialize CUDA upon import have been "
"imported already. Please keep these imports inside your training function to try and help with this:"
Expand Down
10 changes: 8 additions & 2 deletions src/accelerate/test_utils/scripts/test_notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def basic_function():
print(f"PartialState:\n{PartialState()}")


NUM_PROCESSES = os.environ.get("ACCELERATE_NUM_PROCESSES", 1)
NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1))


def test_can_initialize():
Expand All @@ -22,7 +22,9 @@ def test_can_initialize():

@require_bnb
def test_problematic_imports():
with raises(AssertionError, match="Please keep these imports"):
with raises(RuntimeError, match="Please keep these imports"):
import bitsandbytes as bnb # noqa: F401

notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)


Expand All @@ -32,3 +34,7 @@ def main():
if is_bnb_available():
print("Test problematic imports (bnb)")
test_problematic_imports()


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion src/accelerate/utils/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def are_libraries_initialized(*library_names: str) -> Dict[str, bool]:
"""
Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair.
"""
return [lib_name for lib_name in library_names if lib_name in sys.modules]
return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]


def get_gpu_info():
Expand Down
12 changes: 12 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import (
require_multi_gpu,
require_timm,
require_transformers,
run_command,
Expand All @@ -40,6 +41,7 @@ class AccelerateLauncherTester(unittest.TestCase):

mod_file = inspect.getfile(accelerate.test_utils)
test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"])
notebook_launcher_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_notebook.py"])

base_cmd = ["accelerate", "launch"]
config_folder = Path.home() / ".cache/huggingface/accelerate"
Expand Down Expand Up @@ -87,6 +89,16 @@ def test_invalid_keys(self):
def test_accelerate_test(self):
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())

@require_multi_gpu
def test_notebook_launcher(self):
"""
This test checks a variety of situations and scenarios
with the `notebook_launcher`
"""
cmd = ["python", self.notebook_launcher_path]
with patch_environment(omp_num_threads=1, accelerate_num_processes=2):
run_command(cmd, env=os.environ.copy())


class TpuConfigTester(unittest.TestCase):
"""
Expand Down
15 changes: 0 additions & 15 deletions tests/test_multigpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from accelerate import Accelerator
from accelerate.big_modeling import dispatch_model
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu
from accelerate.test_utils.testing import run_command
from accelerate.utils import patch_environment


Expand All @@ -34,9 +33,6 @@ def setUp(self):
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]
)
self.operation_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
self.notebook_launcher_path = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_notebook.py"]
)

@require_multi_gpu
def test_multi_gpu(self):
Expand Down Expand Up @@ -70,17 +66,6 @@ def test_distributed_data_loop(self):
with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"):
execute_subprocess_async(cmd, env=os.environ.copy())

@require_multi_gpu
def test_notebook_launcher(self):
"""
This test checks a variety of situations and scenarios
with the `notebook_launcher`
"""
cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.notebook_launcher_path]
print(f"Running {cmd}")
with patch_environment(omp_num_threads=1):
run_command(cmd, env=os.environ.copy())


if __name__ == "__main__":
accelerator = Accelerator()
Expand Down

0 comments on commit eff30e2

Please sign in to comment.