From a7797ea61b704fad536b9b819c6207b9bbf8f56a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82=C3=B3dka?= Date: Sun, 22 Jul 2018 22:25:07 +0200 Subject: [PATCH 1/2] script generation for result dumping --- mcpartools/generatemc.py | 3 + mcpartools/generator.py | 26 +++++++- mcpartools/mcengine/common.py | 6 +- mcpartools/mcengine/data/collect.sh | 33 +++++++++- .../mcengine/data/dump_function_fluka.sh | 2 + .../mcengine/data/dump_function_shieldhit.sh | 22 +++++++ mcpartools/mcengine/data/run_fluka.sh | 2 +- mcpartools/mcengine/data/run_shieldhit.sh | 4 +- mcpartools/mcengine/fluka.py | 6 +- mcpartools/mcengine/shieldhit.py | 9 ++- mcpartools/scheduler/base.py | 58 +++++++++++++++-- mcpartools/scheduler/common.py | 6 +- mcpartools/scheduler/data/dump_slurm.sh | 63 +++++++++++++++++++ mcpartools/scheduler/data/dump_torque.sh | 5 ++ mcpartools/scheduler/data/run_slurm.sh | 6 +- mcpartools/scheduler/data/run_torque.sh | 6 +- mcpartools/scheduler/data/submit_slurm.sh | 6 ++ mcpartools/scheduler/slurm.py | 6 +- mcpartools/scheduler/torque.py | 7 ++- tests/test_generatemc.py | 7 +++ 20 files changed, 254 insertions(+), 29 deletions(-) create mode 100644 mcpartools/mcengine/data/dump_function_fluka.sh create mode 100644 mcpartools/mcengine/data/dump_function_shieldhit.sh create mode 100644 mcpartools/scheduler/data/dump_slurm.sh create mode 100644 mcpartools/scheduler/data/dump_torque.sh diff --git a/mcpartools/generatemc.py b/mcpartools/generatemc.py index 55ade53..cb49efb 100644 --- a/mcpartools/generatemc.py +++ b/mcpartools/generatemc.py @@ -65,6 +65,9 @@ def main(args=sys.argv[1:]): type=int, required=True, help='number of parallel jobs') + parser.add_argument('-D', '--dump', + action='store_true', + help='Generate dumping script') parser.add_argument('input', type=str, help='path to input configuration') diff --git a/mcpartools/generator.py b/mcpartools/generator.py index 8fcfc19..be359d4 100644 --- a/mcpartools/generator.py +++ b/mcpartools/generator.py @@ -92,6 +92,9 @@ def __init__(self, args): # no checks needed - argparse does it self.batch = args.batch + # no checks needed - argparse does it + self.dump = args.dump + @property def valid(self): return self._valid @@ -103,7 +106,8 @@ def __init__(self, options): self.mc_engine = EngineDiscover.get_mcengine(input_path=self.options.input_path, mc_run_script=self.options.mc_run_template, collect_method=self.options.collect, - mc_engine_options=self.options.mc_engine_options) + mc_engine_options=self.options.mc_engine_options, + dump_opt=self.options.dump) # assigned in methods self.scheduler = None self.input_dir = None @@ -120,14 +124,15 @@ def run(self): # get scheduler and pass main dir for log file if not self.options.batch: - self.scheduler = SchedulerDiscover.get_scheduler(self.options.scheduler_options, self.main_dir) + self.scheduler = SchedulerDiscover.get_scheduler(self.options.scheduler_options, self.options.dump, + self.main_dir) else: # get desired scheduler class and pass arguments scheduler_class = [class_obj for class_obj in SchedulerDiscover.supported if class_obj.id == self.options.batch] if scheduler_class: # if not empty # list should have only 1 element - that's why we call scheduler_class[0] (list is not callable) - self.scheduler = scheduler_class[0](self.options.scheduler_options) + self.scheduler = scheduler_class[0](self.options.scheduler_options, self.options.dump) logger.info("Using: " + self.scheduler.id) else: logger.error("Given scheduler: \'%s\' is not on the list of supported batch systems: %s", @@ -140,6 +145,10 @@ def run(self): # generate submit script self.generate_submit_script() + # generate dump script + if self.options.dump: + self.generate_dump_script() + # copy input files self.copy_input() @@ -201,6 +210,17 @@ def generate_submit_script(self): jobs_no=self.options.jobs_no, workspace_dir=self.workspace_dir) + def generate_dump_script(self): + script_path = os.path.join(self.main_dir, self.scheduler.dump_script) + logger.debug("Preparation to generate " + script_path) + self.scheduler.write_dump_script( + main_dir=self.main_dir, + script_basename=self.scheduler.dump_script, + jobs_no=self.options.jobs_no, + workspace_dir=self.workspace_dir, + dump_function=self.mc_engine.dump_function, + dump_signal=self.mc_engine.dump_signal) + def copy_input(self): indir_name = 'input' indir_path = os.path.join(self.main_dir, indir_name) diff --git a/mcpartools/mcengine/common.py b/mcpartools/mcengine/common.py index 8364f43..921b385 100644 --- a/mcpartools/mcengine/common.py +++ b/mcpartools/mcengine/common.py @@ -12,13 +12,13 @@ def __init__(self): pass @classmethod - def get_mcengine(cls, input_path, mc_run_script, collect_method, mc_engine_options): + def get_mcengine(cls, input_path, mc_run_script, collect_method, mc_engine_options, dump_opt): if os.path.isfile(input_path) and input_path.endswith('.inp'): logger.debug("Discovered MC engine FLUKA") - return Fluka(input_path, mc_run_script, collect_method, mc_engine_options) + return Fluka(input_path, mc_run_script, collect_method, mc_engine_options, dump_opt) elif os.path.isdir(input_path): logger.debug("Discovered MC engine SHIELDHIT") - return ShieldHit(input_path, mc_run_script, collect_method, mc_engine_options) + return ShieldHit(input_path, mc_run_script, collect_method, mc_engine_options, dump_opt) else: logger.error("Input file doesn't match available MC codes") return None diff --git a/mcpartools/mcengine/data/collect.sh b/mcpartools/mcengine/data/collect.sh index 83198ab..49c131c 100755 --- a/mcpartools/mcengine/data/collect.sh +++ b/mcpartools/mcengine/data/collect.sh @@ -3,11 +3,40 @@ # Exit immediately if a simple command exits with a non-zero status. set -e +function usage () {{ + cat <] +where: + -d collect on results from directory +EOF + exit 0 +}} + INPUT_WILDCARD={output_dir:s}/workspace/job_*/{wildcard:s} OUTPUT_DIRECTORY={output_dir:s}/output -# change working directory -cd {output_dir:s} +while getopts "d:" opt; do + case $opt in + d) + INPUT_WILDCARD="$OPTARG/job_*/*.bdo" + OUTPUT_DIRECTORY="$OPTARG/output" + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + usage + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + usage + exit 1 + ;; + *) + usage + exit 1 + ;; + esac +done # make output folder mkdir -p $OUTPUT_DIRECTORY diff --git a/mcpartools/mcengine/data/dump_function_fluka.sh b/mcpartools/mcengine/data/dump_function_fluka.sh new file mode 100644 index 0000000..0c062f6 --- /dev/null +++ b/mcpartools/mcengine/data/dump_function_fluka.sh @@ -0,0 +1,2 @@ +echo "Not supported feature for fluka" +exit 1 diff --git a/mcpartools/mcengine/data/dump_function_shieldhit.sh b/mcpartools/mcengine/data/dump_function_shieldhit.sh new file mode 100644 index 0000000..a55834c --- /dev/null +++ b/mcpartools/mcengine/data/dump_function_shieldhit.sh @@ -0,0 +1,22 @@ +function dump_function(){ + echo -e "\n\t##############################\n" + echo "THIS FEATURE WILL ONLY WORK CORRECTLY WHEN SECOND ARGUMENT OF NSTAT" + echo "(Step of saving) IN FILE input/beam.dat IS SET TO -1" + echo -e "\n\t##############################\n" + echo "Waiting for results..." + sleep 10 # waiting 10 sec for results to dump and meantime user can read information + + for i in $WORKSPACE_DIR/* ; do # Goes through all directories job_* in workspace + if [ -d "$i" ]; then + DUMP_SUBDIR=$DUMP_DIR`basename $i` + mkdir -p $DUMP_SUBDIR + find "$i" -name "*bdo" -exec cp -- "{}" $DUMP_SUBDIR \; # copy output files to dump directory + BDO_NUM=$(ls -l "$DUMP_SUBDIR" | grep ".*.bdo" | wc -l 2>/dev/null) # check number of bdo files copied + if [[ $BDO_NUM -eq 0 ]]; then + echo "Did not copied any files from `basename $i`. Most probably job has not started yet" + else + echo "Copied $BDO_NUM .bdo files from `basename $i` to dump dir..." + fi + fi + done +} \ No newline at end of file diff --git a/mcpartools/mcengine/data/run_fluka.sh b/mcpartools/mcengine/data/run_fluka.sh index 3595927..1994a78 100755 --- a/mcpartools/mcengine/data/run_fluka.sh +++ b/mcpartools/mcengine/data/run_fluka.sh @@ -10,7 +10,7 @@ FLUKA_BIN={fluka_bin:s} cd {working_directory:s} # run rfluka -$FLUKA_BIN -N0 -M1 {engine_options:s} {input_basename:s} +$FLUKA_BIN -N0 -M1 {engine_options:s} {input_basename:s} # each fluka run will save files with same name, in order to distinguish output from multiple runs # we rename output files, appending suffix with jobid to each of them diff --git a/mcpartools/mcengine/data/run_shieldhit.sh b/mcpartools/mcengine/data/run_shieldhit.sh index a8f395c..8c49c52 100755 --- a/mcpartools/mcengine/data/run_shieldhit.sh +++ b/mcpartools/mcengine/data/run_shieldhit.sh @@ -25,5 +25,7 @@ DETECT_FILE={detect_file:s} cd {working_directory:s} # execute simulation -$SHIELDHIT_BIN --beamfile=$BEAM_FILE --geofile=$GEO_FILE --matfile=$MAT_FILE --detectfile=$DETECT_FILE -n $PARTICLE_NO -N $RNG_SEED {engine_options:s} $WORK_DIR +$SHIELDHIT_BIN --beamfile=$BEAM_FILE --geofile=$GEO_FILE --matfile=$MAT_FILE --detectfile=$DETECT_FILE -n $PARTICLE_NO -N $RNG_SEED {engine_options:s} $WORK_DIR {dumping:s} +# save PID of SHIELDHIT proces so it can be read by main_run script +PID=$! diff --git a/mcpartools/mcengine/fluka.py b/mcpartools/mcengine/fluka.py index d9f31d6..04299f9 100644 --- a/mcpartools/mcengine/fluka.py +++ b/mcpartools/mcengine/fluka.py @@ -10,9 +10,10 @@ class Fluka(Engine): default_run_script_path = os.path.join('data', 'run_fluka.sh') + default_dump_function_path = os.path.join('data', 'dump_function_fluka.sh') output_wildcard = "*_fort*" - def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options): + def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options, dump_opt): Engine.__init__(self, input_path, mc_run_script, collect_method, mc_engine_options) # user didn't provided path to input scripts, use default @@ -31,6 +32,9 @@ def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options) in_fd.close() self.collect_script_content = resource_string(__name__, self.collect_script).decode('ascii') + self.dump_function = resource_string(__name__, self.default_dump_function_path).decode('ascii') + self.dump_signal = 'None' + self.dump_available = dump_opt @property def input_files(self): diff --git a/mcpartools/mcengine/shieldhit.py b/mcpartools/mcengine/shieldhit.py index 4323c33..0216ed2 100644 --- a/mcpartools/mcengine/shieldhit.py +++ b/mcpartools/mcengine/shieldhit.py @@ -10,9 +10,10 @@ class ShieldHit(Engine): default_run_script_path = os.path.join('data', 'run_shieldhit.sh') + default_dump_function_path = os.path.join('data', 'dump_function_shieldhit.sh') output_wildcard = "*.bdo" - def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options): + def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options, dump_opt): Engine.__init__(self, input_path, mc_run_script, collect_method, mc_engine_options) # user didn't provided path to input scripts, use default @@ -28,8 +29,11 @@ def __init__(self, input_path, mc_run_script, collect_method, mc_engine_options) self.collect_script_content = resource_string(__name__, self.collect_script).decode('ascii') + self.dump_function = resource_string(__name__, self.default_dump_function_path).decode('ascii') self.particle_no = 1 self.rng_seed = 1 + self.dump_signal = 'USR1' + self.dump_available = dump_opt @property def input_files(self): @@ -63,7 +67,8 @@ def save_run_script(self, output_dir, job_id): beam_file=os.path.join(input_dir, os.path.basename(beam_file)), geo_file=os.path.join(input_dir, os.path.basename(geo_file)), mat_file=os.path.join(input_dir, os.path.basename(mat_file)), - detect_file=os.path.join(input_dir, os.path.basename(detect_file)) + detect_file=os.path.join(input_dir, os.path.basename(detect_file)), + dumping="&" if self.dump_available else "" ) out_file_name = "run.sh" out_file_path = os.path.join(output_dir, out_file_name) diff --git a/mcpartools/scheduler/base.py b/mcpartools/scheduler/base.py index 3f58efa..13ef5d8 100644 --- a/mcpartools/scheduler/base.py +++ b/mcpartools/scheduler/base.py @@ -5,7 +5,7 @@ class JobScheduler: - def __init__(self, scheduler_options): + def __init__(self, scheduler_options, dump_opt): # check if user provided path to options file if scheduler_options is None: self.options_header = "# no user options provided" @@ -22,9 +22,27 @@ def __init__(self, scheduler_options): self.options_header = "# no user options provided" self.options_args = scheduler_options[1:-1] logger.debug("Scheduler options argument:" + self.options_args) + self.dump_available = dump_opt submit_script = 'submit.sh' main_run_script = 'main_run.sh' + dump_script = 'dump.sh' + _dump_functions = { + 'check_running': """# Check if executable is still running +if [[ ! -z $PID ]]; then + IS_RUNNING=`eval ps -p $PID | wc -l` + while [[ $IS_RUNNING -eq 2 ]]; do + IS_RUNNING=`eval ps -p $PID | wc -l` + sleep 0.5 + done +fi""", + 'trap_sig': """_term() { + echo Caught SIGUSR1 signal in main run script, resending! + kill -SIGUSR1 $PID 2>/dev/null +} + +trap _term SIGUSR1""" + } def submit_script_body(self, jobs_no, main_dir, workspace_dir): from pkg_resources import resource_string @@ -43,12 +61,34 @@ def submit_script_body(self, jobs_no, main_dir, workspace_dir): main_dir=main_dir, collect_script_name='collect.sh') + def dump_script_body(self, jobs_no, main_dir, workspace_dir, dump_function, dump_signal): + from pkg_resources import resource_string + tpl = resource_string(__name__, self.dump_script_template) + self.dump_script = tpl.decode('ascii') + + return self.dump_script.format(options_args=self.options_args, + jobs_no=jobs_no, + workspace_dir=workspace_dir, + calculate_script_name='main_run.sh', + main_dir=main_dir, + collect_script_name='collect.sh', + dump_function=dump_function, + dump_signal=dump_signal) + def main_run_script_body(self, jobs_no, workspace_dir): from pkg_resources import resource_string tpl = resource_string(__name__, self.main_run_script_template) - self.main_run_script = tpl.decode('ascii').format(options_header=self.options_header, - workspace_dir=workspace_dir, - jobs_no=jobs_no) + if self.dump_available: + self.main_run_script = tpl.decode('ascii').format(options_header=self.options_header, + workspace_dir=workspace_dir, + jobs_no=jobs_no, + check_running=self._dump_functions["check_running"], + trap_sig=self._dump_functions["trap_sig"]) + else: + self.main_run_script = tpl.decode('ascii').format(options_header=self.options_header, + workspace_dir=workspace_dir, + check_running="", + trap_sig="") return self.main_run_script def write_submit_script(self, main_dir, script_basename, jobs_no, workspace_dir): @@ -63,6 +103,16 @@ def write_submit_script(self, main_dir, script_basename, jobs_no, workspace_dir) logger.debug("Jobs no " + str(jobs_no)) logger.debug("Workspace " + abs_path_workspace) + def write_dump_script(self, main_dir, script_basename, jobs_no, workspace_dir, dump_function, dump_signal): + script_path = os.path.join(main_dir, script_basename) + fd = open(script_path, 'w') + abs_path_workspace = os.path.abspath(workspace_dir) + abs_path_main_dir = os.path.abspath(main_dir) + fd.write(self.dump_script_body(jobs_no, abs_path_main_dir, abs_path_workspace, dump_function, dump_signal)) + fd.close() + os.chmod(script_path, 0o750) + logger.debug("Saved dump script: " + script_path) + def write_main_run_script(self, jobs_no, output_dir): output_dir_abspath = os.path.abspath(output_dir) out_file_path = os.path.join(output_dir_abspath, self.main_run_script) diff --git a/mcpartools/scheduler/common.py b/mcpartools/scheduler/common.py index a7152b3..0e15d98 100644 --- a/mcpartools/scheduler/common.py +++ b/mcpartools/scheduler/common.py @@ -14,20 +14,20 @@ def __init__(self): pass @classmethod - def get_scheduler(cls, scheduler_options, log_location): + def get_scheduler(cls, scheduler_options, dump_opt, log_location): file_logger = logging.getLogger('file_logger') try: srun_output = check_output(['srun --version'], shell=True) file_logger.info("srun version: {}".format(srun_output[:-1])) logger.debug("Discovered job scheduler SLURM") - return Slurm(scheduler_options) + return Slurm(scheduler_options, dump_opt) except CalledProcessError as e: logger.debug("Slurm not found: %s", e) try: qsub_output = check_output(['qsub --version'], shell=True) file_logger.info("qsub version: {}".format(qsub_output[:-1])) logger.debug("Discovered job scheduler Torque") - return Torque(scheduler_options) + return Torque(scheduler_options, dump_opt) except CalledProcessError as e: logger.debug("Torque not found: %s", e) raise SystemError("No known batch system found!") diff --git a/mcpartools/scheduler/data/dump_slurm.sh b/mcpartools/scheduler/data/dump_slurm.sh new file mode 100644 index 0000000..b7d8942 --- /dev/null +++ b/mcpartools/scheduler/data/dump_slurm.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +JOB_NUM={jobs_no:d} +RUN_DIR={main_dir:s} +DUMP_DIR="{main_dir:s}/dumped/`date '+%Y%m%d_%H%M%S'`/" +COLLECT=0 +COLLECT_BIN="{main_dir:s}/{collect_script_name:s} -d $DUMP_DIR" +WORKSPACE_DIR={workspace_dir:s} + +function usage () {{ + cat <] +where: + -c collect after dump +EOF + exit 0 +}} + +while getopts ":c" opt; do + case $opt in + c) + COLLECT=1 + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + usage + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + usage + exit 1 + ;; + *) + usage + exit 1 + ;; + esac +done + +{dump_function:s} + +SIG_SENDER='scancel -b -s' +SIGNAL='{dump_signal:s}' +PID='' + +if [[ ! -z "$PID" ]] ; then + eval "$SIG_SENDER $SIGNAL $PID" 2>/dev/null + if [[ $? -ne 0 ]] ; then + echo "Nothing to dump from. All jobs finished" + exit 1 + fi + mkdir -p $DUMP_DIR + dump_function + if [[ $COLLECT -eq 1 ]]; then + echo "Collecting..." + $COLLECT_BIN + fi + echo "Results dumped to $DUMP_DIR" +else + echo "First run submit.sh and then try to dump results" +fi + diff --git a/mcpartools/scheduler/data/dump_torque.sh b/mcpartools/scheduler/data/dump_torque.sh new file mode 100644 index 0000000..b9b8415 --- /dev/null +++ b/mcpartools/scheduler/data/dump_torque.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# Exit immediately if a simple command exits with a non-zero status. +echo "Not supported future for torque" +exit 1 diff --git a/mcpartools/scheduler/data/run_slurm.sh b/mcpartools/scheduler/data/run_slurm.sh index 141dda2..dcf8b89 100755 --- a/mcpartools/scheduler/data/run_slurm.sh +++ b/mcpartools/scheduler/data/run_slurm.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash # Exit immediately if a simple command exits with a non-zero status. set -e - +{trap_sig:s} {options_header:s} # Run individual jobs -{workspace_dir:s}/job_`printf %04d $SLURM_ARRAY_TASK_ID`/run.sh +source {workspace_dir:s}/job_`printf %04d $SLURM_ARRAY_TASK_ID`/run.sh + +{check_running:s} \ No newline at end of file diff --git a/mcpartools/scheduler/data/run_torque.sh b/mcpartools/scheduler/data/run_torque.sh index a8734d5..b503166 100644 --- a/mcpartools/scheduler/data/run_torque.sh +++ b/mcpartools/scheduler/data/run_torque.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash # Exit immediately if a simple command exits with a non-zero status. set -e - +{trap_sig:s} {options_header:s} # Run individual jobs -{workspace_dir:s}/job_`printf %04d $PBS_ARRAYID`/run.sh +source {workspace_dir:s}/job_`printf %04d $PBS_ARRAYID`/run.sh + +{check_running:s} \ No newline at end of file diff --git a/mcpartools/scheduler/data/submit_slurm.sh b/mcpartools/scheduler/data/submit_slurm.sh index b060d3d..a674d6a 100755 --- a/mcpartools/scheduler/data/submit_slurm.sh +++ b/mcpartools/scheduler/data/submit_slurm.sh @@ -5,6 +5,9 @@ LOGFILE="$(cd $(dirname $0) && pwd)/submit.log" echo -n "" > "$LOGFILE" +#Submit script will change PID variable in dump script +DUMPSCRIPT="$(cd $(dirname $0) && pwd)/dump.sh" + # Create temporary files for parsing stdout and stderr output from sbatch command before storing them in submit.log OUT=`mktemp` ERR=`mktemp` @@ -25,6 +28,9 @@ if [ $? -eq 0 ] ; then CALC_JOBID=`cat $OUT | cut -d ";" -f 1` echo "Job ID: $CALC_JOBID" >> "$LOGFILE" echo "Submission time: `date +"%Y-%m-%d %H:%M:%S"`" >> "$LOGFILE" + if [ -f $DUMPSCRIPT ]; then + sed -i "s/PID='[0-9]*'/PID='$CALC_JOBID'/g" $DUMPSCRIPT # Find variable PID in DUMPSCRIPT and fill it with CALC_JOBID + fi fi # If output from stderr isn't an empty string then log it as well to submit.log diff --git a/mcpartools/scheduler/slurm.py b/mcpartools/scheduler/slurm.py index b809ac2..ee236c8 100644 --- a/mcpartools/scheduler/slurm.py +++ b/mcpartools/scheduler/slurm.py @@ -7,9 +7,11 @@ class Slurm(JobScheduler): id = "slurm" - def __init__(self, options_content): - JobScheduler.__init__(self, options_content) + def __init__(self, options_content, dump_opt): + JobScheduler.__init__(self, scheduler_options=options_content, dump_opt=dump_opt) submit_script_template = os.path.join('data', 'submit_slurm.sh') main_run_script_template = os.path.join('data', 'run_slurm.sh') + + dump_script_template = os.path.join('data', 'dump_slurm.sh') diff --git a/mcpartools/scheduler/torque.py b/mcpartools/scheduler/torque.py index 7fcf067..8d2fb51 100644 --- a/mcpartools/scheduler/torque.py +++ b/mcpartools/scheduler/torque.py @@ -4,12 +4,13 @@ class Torque(JobScheduler): - id = "torque" - def __init__(self, options_content): - JobScheduler.__init__(self, scheduler_options=options_content) + def __init__(self, options_content, dump_opt): + JobScheduler.__init__(self, scheduler_options=options_content, dump_opt=dump_opt) submit_script_template = os.path.join('data', 'submit_torque.sh') main_run_script_template = os.path.join('data', 'run_torque.sh') + + dump_script_template = os.path.join('data', 'dump_torque.sh') diff --git a/tests/test_generatemc.py b/tests/test_generatemc.py index e686a5c..2def7ed 100644 --- a/tests/test_generatemc.py +++ b/tests/test_generatemc.py @@ -38,6 +38,13 @@ def test_slurm_shieldhit_input_ok(self): self.assertEqual(ret_code, 0) shutil.rmtree(working_dir) + def test_slurm_shieldhit_dump_input_ok(self): + working_dir = tempfile.mkdtemp() # make temp working dir + shieldhit_input = os.path.join(self.main_dir, "shieldhit") + ret_code = generatemc.main(["-D", "-j", "2", "-p", "100", "-w", working_dir, "-b", "slurm", shieldhit_input]) + self.assertEqual(ret_code, 0) + shutil.rmtree(working_dir) + def test_slurm_shieldhit_scheduler_options(self): working_dir = tempfile.mkdtemp() # make temp working dir shieldhit_input = os.path.join(self.main_dir, "shieldhit") From d814456df29e59cc3d8feeab5a7c1aa6ce3e6639 Mon Sep 17 00:00:00 2001 From: Kudyyy Date: Fri, 23 Nov 2018 19:42:49 +0100 Subject: [PATCH 2/2] Fix typo --- mcpartools/scheduler/data/dump_torque.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mcpartools/scheduler/data/dump_torque.sh b/mcpartools/scheduler/data/dump_torque.sh index b9b8415..3c6a7e6 100644 --- a/mcpartools/scheduler/data/dump_torque.sh +++ b/mcpartools/scheduler/data/dump_torque.sh @@ -1,5 +1,5 @@ #!/bin/bash # Exit immediately if a simple command exits with a non-zero status. -echo "Not supported future for torque" +echo "Not supported feature for torque" exit 1