-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsubmit-MPI
32 lines (28 loc) · 958 Bytes
/
submit-MPI
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
#!/bin/sh
#SBATCH --partition=valhalla --qos=valhalla
#SBATCH --clusters=faculty
##SBATCH --partition=general-compute --qos=general-compute
##SBATCH --clusters=ub-hpc
##SBATCH --partition=scavenger --qos=scavenger
#SBATCH --time=00:30:00
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=3
#SBATCH --ntasks=12
#SBATCH --cpus-per-task=8
#SBATCH --mem=60000
echo "SLURM_JOBID="$SLURM_JOBID
echo "SLURM_JOB_NODELIST="$SLURM_JOB_NODELIST
echo "SLURM_NNODES="$SLURM_NNODES
echo "SLURMTMPDIR="$SLURMTMPDIR
echo "working directory="$SLURM_SUBMIT_DIR
# Here load whatever which is needed for running dynemol
module load intel-oneapi
module load intel-mpi/2020.2
module load mkl/2020.2
# For GNU compilers
# For slurm environments
export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi.so
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export DYNEMOLDIR="/projects/academic/cyberwksp21/Software/dynemol/Dynemol-MPI"
export DYNEMOLWORKDIR=$SLURM_SUBMIT_DIR
srun $DYNEMOLDIR/dynemol