From 9a5977c534eda9289f12134921800cac1d786cd3 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 8 Oct 2024 23:39:09 -0400 Subject: [PATCH] [FEAT][Tests] --- clusterops/__init__.py | 8 +++- example.py | 28 +++++++++++++ tests/main.py | 91 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 example.py create mode 100644 tests/main.py diff --git a/clusterops/__init__.py b/clusterops/__init__.py index f14e5f4..f0d10af 100644 --- a/clusterops/__init__.py +++ b/clusterops/__init__.py @@ -6,4 +6,10 @@ execute_on_multiple_gpus, ) -__all__ = ["list_available_cpus", "execute_with_cpu_cores", "list_available_gpus", "execute_on_gpu", "execute_on_multiple_gpus"] +__all__ = [ + "list_available_cpus", + "execute_with_cpu_cores", + "list_available_gpus", + "execute_on_gpu", + "execute_on_multiple_gpus", +] diff --git a/example.py b/example.py new file mode 100644 index 0000000..2d54509 --- /dev/null +++ b/example.py @@ -0,0 +1,28 @@ +from clusterops import ( + list_available_cpus, + execute_with_cpu_cores, + list_available_gpus, + execute_on_gpu, + execute_on_multiple_gpus, + execute_on_cpu, +) + + +# Example function to run +def sample_task(n: int) -> int: + return n * n + + +# List CPUs and execute on CPU 0 +cpus = list_available_cpus() +execute_on_cpu(0, sample_task, 10) + +# List CPUs and execute using 4 CPU cores +execute_with_cpu_cores(4, sample_task, 10) + +# List GPUs and execute on GPU 0 +gpus = list_available_gpus() +execute_on_gpu(0, sample_task, 10) + +# Execute across multiple GPUs +execute_on_multiple_gpus([0, 1], sample_task, 10) diff --git a/tests/main.py b/tests/main.py new file mode 100644 index 0000000..ce14efc --- /dev/null +++ b/tests/main.py @@ -0,0 +1,91 @@ +import pytest +from unittest.mock import patch, MagicMock +from clusterops import ( + list_available_cpus, + execute_with_cpu_cores, + list_available_gpus, + execute_on_gpu, + execute_on_multiple_gpus, + execute_on_cpu, +) + + +# Example function to run +def sample_task(n: int) -> int: + return n * n + + +# Mock the environment for pytest +@pytest.fixture +def mock_psutil(): + with patch("psutil.cpu_count", return_value=12): + with patch("psutil.Process") as mock_process: + mock_process.return_value.cpu_affinity = MagicMock() + yield + + +@pytest.fixture +def mock_gputil(): + with patch("GPUtil.getGPUs") as mock_get_gpus: + mock_get_gpus.return_value = [ + MagicMock( + id=0, + name="GPU 0", + memoryFree=10000, + memoryTotal=16000, + ), + MagicMock( + id=1, name="GPU 1", memoryFree=8000, memoryTotal=16000 + ), + ] + yield + + +@pytest.fixture +def mock_ray(): + with patch("ray.init"), patch("ray.remote") as mock_remote, patch( + "ray.get" + ): + mock_remote.return_value = MagicMock(return_value=sample_task) + yield + + +# Test listing available CPUs +def test_list_available_cpus(mock_psutil): + cpus = list_available_cpus() + assert cpus == list(range(12)), "Should list 12 CPU cores." + + +# Test executing a function on a specific CPU +def test_execute_on_cpu(mock_psutil): + result = execute_on_cpu(0, sample_task, 10) + assert result == 100, "Expected task result to be 100." + + +# Test executing with multiple CPU cores +def test_execute_with_cpu_cores(mock_psutil): + result = execute_with_cpu_cores(4, sample_task, 10) + assert result == 100, "Expected task result to be 100." + + +# Test listing available GPUs +def test_list_available_gpus(mock_gputil): + gpus = list_available_gpus() + assert len(gpus) == 2, "Should list 2 available GPUs." + assert gpus[0]["name"] == "GPU 0" + assert gpus[1]["name"] == "GPU 1" + + +# Test executing on a specific GPU +def test_execute_on_gpu(mock_gputil, mock_ray): + result = execute_on_gpu(0, sample_task, 10) + assert result == 100, "Expected task result to be 100 on GPU 0." + + +# Test executing on multiple GPUs +def test_execute_on_multiple_gpus(mock_gputil, mock_ray): + results = execute_on_multiple_gpus([0, 1], sample_task, 10) + assert len(results) == 2, "Expected results from 2 GPUs." + assert all( + result == 100 for result in results + ), "Expected task results to be 100 on all GPUs."