diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 5197e41008..9c26216022 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -46,7 +46,7 @@ jobs: - name: Test with pytest shell: bash -el {0} run: | - python -m pytest ./hnn_core/tests/ --cov=hnn_core --cov-report=xml + python -m pytest ./hnn_core/tests/test_dipole.py --cov=hnn_core --cov-report=xml -s # - name: Upload coverage to Codecov # shell: bash -el {0} # run: | diff --git a/hnn_core/mpi_child.py b/hnn_core/mpi_child.py index c44322481e..00ca850d9e 100644 --- a/hnn_core/mpi_child.py +++ b/hnn_core/mpi_child.py @@ -8,12 +8,29 @@ import pickle import base64 import re - +import logging +import os +# import debugpy +# debugpy.listen(("localhost", 5679)) # Use a different port for the subprocess +# print("Waiting for debugger to attach to subprocess...") +# debugpy.wait_for_client() +# print("Debugger attached to subprocess") + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) +logger.info("MPICHILD.PY") +virtual_env = os.getenv('VIRTUAL_ENV') +logger.info(f"MPICHILD.PY {virtual_env}") +if virtual_env is not None: + # Extract the name of the virtual environment from the path + env_name = os.path.basename(virtual_env) + print(f"MPICHILD.PY Running in virtual environment: {env_name}") +else: + print("MPICHILD.PY Not running in a virtual environment") from hnn_core.parallel_backends import _extract_data, _extract_data_length -import logging -logging.basicConfig(filename=f'k_mpi_log.txt', level=logging.DEBUG) -logger = logging.getLogger() + + def _pickle_data(sim_data): # pickle the data and encode as base64 before sending to stderr @@ -159,7 +176,9 @@ def run(self, net, tstop, dt, n_trials): if __name__ == '__main__': """This file is called on command-line from nrniv""" - logger.info("MPICHILD.PY") + + #logging.basicConfig(filename=f'k_mpi_log.txt', level=logging.DEBUG) + import traceback rc = 0 diff --git a/hnn_core/parallel_backends.py b/hnn_core/parallel_backends.py index bbb941b3c6..9296a7d373 100644 --- a/hnn_core/parallel_backends.py +++ b/hnn_core/parallel_backends.py @@ -23,8 +23,16 @@ _BACKEND = None import logging logging.basicConfig(filename=f'k_parallel_backends_log.txt', level=logging.DEBUG) -logger = logging.getLogger() -import subprocess +logger = logging.getLogger(__name__) +logger.info("parallel_backends.py") +virtual_env = os.getenv('VIRTUAL_ENV') +logger.info(f"parallel_backends.py {virtual_env}") +if virtual_env is not None: + # Extract the name of the virtual environment from the path + env_name = os.path.basename(virtual_env) + print(f"parallel_backends.py Running in virtual environment: {env_name}") +else: + print("parallel_backends.py Not running in a virtual environment") def _thread_handler(event, out, queue): while not event.is_set(): @@ -128,10 +136,10 @@ def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs): threads_started = False try: - logger.info(f"Parallel backends Popen") - proc = Popen(command, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + logger.info(f"Parallel backends Popen") + proc = Popen(command, stdin=PIPE, + stdout=PIPE, + stderr=PIPE, *args, **kwargs) logger.info(f"Parallel backends after Popen") @@ -156,16 +164,16 @@ def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs): # loop while the process is running the simulation while True: - logger.info(f"Parallel backends proc.poll()") + #logger.info(f"Parallel backends proc.poll()") child_terminated = proc.poll() is not None if not data_received: if _echo_child_output(out_q): count_since_last_output = 0 - logger.info(f" if _echo_child_output ") + #logger.info(f" if _echo_child_output ") else: count_since_last_output += 1 - logger.info(f" if no _echo_child_output ") + #logger.info(f" if no _echo_child_output ") # look for data in stderr and print child stdout data_len, proc_data_bytes = _get_data_from_child_err(err_q) if data_len > 0: @@ -179,21 +187,23 @@ def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs): kill_proc_name('nrniv') logger.info(f"child_terminated ") break - else: - logger.info(f"Nothing happend ") + # else: + # logger.info(f"Nothing happend ") if not sent_network: # Send network object to child so it can start try: logger.info(f"if not sent_network try ") _write_net(proc.stdin, pickled_obj) - except BrokenPipeError: + except BrokenPipeError as e: # child failed during _write_net(). get the # output and break out of loop on the next # iteration + import traceback logger.info(f"if not sent_network except") warn("Received BrokenPipeError exception. " "Child process failed unexpectedly") + traceback.print_exc() continue else: logger.info(f"if not sent_network else ") @@ -231,21 +241,23 @@ def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs): # wait for the process to terminate. we need use proc.communicate to # read any output at its end of life. try: - outs, errs = proc.communicate(timeout=1) + outs, errs = proc.communicate(timeout=2) if proc.returncode != 0: logger.info(f"Error: {errs}") - else: logger.info(f"Output:{outs}") + logger.info(f"proc.communicate(timeout=1)") except TimeoutExpired: proc.kill() # wait for output again after kill signal - outs, errs = proc.communicate(timeout=1) + outs, errs = proc.communicate(timeout=2) if proc.returncode != 0: logger.info(f"Error: {errs}") else: logger.info(f"Output:{outs}") logger.info(f"except TimeoutExpired") + except Exception as e: + logger.info(f"An error occurred: {e}") sys.stdout.write(outs) sys.stdout.write(errs) @@ -383,8 +395,8 @@ def _has_psutil(): def requires_mpi4py(function): """Decorator for testing functions that require MPI.""" + print("INIT requires_mpi4py") import pytest - try: import mpi4py assert hasattr(mpi4py, '__version__') @@ -400,6 +412,7 @@ def requires_mpi4py(function): def requires_psutil(function): """Decorator for testing functions that require psutil.""" + print("INIT requires_psutil") import pytest try: @@ -680,10 +693,11 @@ def __init__(self, n_procs=None, mpi_cmd='mpiexec'): self.mpi_cmd += ' -np ' + str(self.n_procs) #self.mpi_cmd += ' /mnt/c/Projects/Github/hnn-core/venv/bin/nrniv -python -mpi -nobanner ' + \ - self.mpi_cmd += ' nrniv -python -mpi -nobanner ' + \ + self.mpi_cmd += ' /mnt/c/Projects/Github/hnn-core/venv/bin/nrniv -python -mpi -nobanner ' + \ sys.executable + ' ' + \ os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'mpi_child.py') + logger.info(self.mpi_cmd) # Split the command into shell arguments for passing to Popen if 'win' in sys.platform: diff --git a/hnn_core/tests/test_dipole.py b/hnn_core/tests/test_dipole.py index a753608e59..63b2755ae7 100644 --- a/hnn_core/tests/test_dipole.py +++ b/hnn_core/tests/test_dipole.py @@ -209,7 +209,7 @@ def test_dipole_simulation(): @requires_psutil def test_cell_response_backends(run_hnn_core_fixture): """Test cell_response outputs across backends.""" - + print("INIT test_cell_response_backends") # reduced simulation has n_trials=2 trial_idx, n_trials, gid = 0, 2, 7 _, joblib_net = run_hnn_core_fixture(backend='joblib', n_jobs=1, diff --git a/pytest.ini b/pytest.ini index c00948689c..f8b79ea5bb 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,3 +1,4 @@ [pytest] markers = incremental: run tests with prerequisites in incremental order +addopts = --cov=hnn_core --cov-report=xml -s