forked from colbrydi/getexample
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
cp abaqus explicit to zeus examples directory
- Loading branch information
Chris Bording
committed
Aug 16, 2017
1 parent
aedefe8
commit 0447b26
Showing
2 changed files
with
170 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
#!/bin/bash --login | ||
# This is the README file is an executable script. | ||
# To run type: ./README | ||
# | ||
# Example OBJECTIVE: to demostrate how to run ABAQUS with a simple example | ||
# on Zeus with the explicit solvers. | ||
# | ||
# E3.inp: This benchmark consists of forming a sheet metal part by the deep drawing process. | ||
# The deformable sheet metal blank is meshed with shell elements of type S4R and uses an | ||
# isotropic hardening Mises plasticity material model. The tools are meshed using surface | ||
# elements of type SFM3D4R which are declared rigid. General contact is defined between the | ||
# blank and tools. The analysis sequence consists of two steps. During the first step the | ||
# blank is clamped between the binder and die and then during the second step the punch is | ||
# displaced to form the part. Since the process is essentially quasi-static the computations | ||
# are performed over a sufficiently long time period to render inertial effects negligible. | ||
# The performance of this analysis is a direct measure of the performance of the | ||
# three-dimensional general contact algorithm. | ||
|
||
cp $GE_DIR/src/abaqus/e3.inp . | ||
|
||
# To run this code load the necessary modules. | ||
# and specify the total number of MPI processes. | ||
|
||
# SLURM directives | ||
# | ||
# Here we specify to SLURM that we want 2 nodes | ||
# "#SBATCH --nodes=2" | ||
# Then, to specify the queue or "partition" to --partition=workq | ||
# "#SBATCH --partition=workq" | ||
# To ensure that we have a correct defined environment use | ||
# "#SBATCH --export=NONE" | ||
|
||
# we need to load the abaqus module as shown below: | ||
module load abaqus | ||
|
||
# To submit the job to Zeus | ||
echo "commmand to submit the slurm script is " | ||
echo "sbatch abaqus_explicit.slurm" | ||
jobid=`sbatch abaqus_explicit.slurm | cut -d " " -f 4` | ||
|
||
echo "The sbatch command returns what the jobid is for this job." | ||
echo "To check the status of your job, use the slurm command:" | ||
echo "squeue -u $USER" | ||
echo " " | ||
echo "Your job will be run in $MYSCRATCH/run_helloOmpC_gnu/${jobid}." | ||
echo " " | ||
echo "Your results will be saved in ${MYGROUP}/helloOmpC_gnu_results_zeus/${jobid}" | ||
echo "and the scratch directory will then be deleted." | ||
echo " " | ||
echo "To check the results change to your jobid directory, type:" | ||
echo "cd ${MYGROUP}/helloOmpC_gnu_results_zeus/${jobid}" | ||
echo " " | ||
echo "To view the results, use the cat command and type:" | ||
echo "cat helloOmpC_gnu.log" | ||
echo " " | ||
echo " PAWSEY user shortcuts! " | ||
echo '$MYSCRATCH' "is an environment variable it is set to $MYSCRATCH" | ||
echo '$MYGROUP' "is an environment variable it is set to $MYGROUP" | ||
echo " example: cd \$MYGROUP " | ||
echo " " | ||
echo "more information about Zeus/Zython can be found at:" | ||
echo " https://support.pawsey.org.au/documentation/pages/viewpage.action?pageId=2162999" | ||
echo " " | ||
echo "more information about SLURM and aprun can be found at:" | ||
echo " https://support.pawsey.org.au/documentation/display/US/Scheduling+and+Running+Jobs" | ||
echo " " | ||
echo " " | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
#!/bin/bash --login | ||
#SBATCH --job-name=e3_GE-abaqus-mpi | ||
#SBATCH --partition=workq | ||
#SBATCH --account=pawsey0001 | ||
#SBATCH --cpus-per-task=16 | ||
#SBATCH --nodes=2 | ||
#SBATCH --time=01:00:00 | ||
##SBATCH --output e3_abaqus-mpi.%j.out | ||
##SBATCH --error e3_abaqus-mpi.%j.err | ||
|
||
# Default loaded compiler module is gcc module | ||
# load the necessary modules to compile with GNU | ||
module load abaqus | ||
|
||
# leave in, it lists the environment loaded by the modules | ||
#module list | ||
|
||
# Note: SLURM_JOBID is a unique number for every job. | ||
# These are generic variables | ||
INPUT=e3 | ||
SRC_DIR=${SLURM_SUBMIT_DIR} | ||
EXECUTABLE=abaqus | ||
SCRATCH=$MYSCRATCH/run_abaqus-mpi/$SLURM_JOBID | ||
RESULTS=$MYGROUP/abaqus-mpi-results_zeus/$SLURM_JOBID | ||
unset SLURM_GTIDS | ||
############################################### | ||
# Creates a unique directory in the SCRATCH directory for this job to run in. | ||
if [ ! -d $SCRATCH ]; then | ||
mkdir -p $SCRATCH | ||
fi | ||
echo SCRATCH is $SCRATCH | ||
|
||
############################################### | ||
# Creates a unique directory in your GROUP directory for the results of this job | ||
if [ ! -d $RESULTS ]; then | ||
mkdir -p $RESULTS | ||
fi | ||
echo the results directory is $RESULTS | ||
|
||
################################################ | ||
# declare the name of the output file or log file | ||
|
||
OUTPUT=abaqus_${INPUT}_mpi | ||
|
||
############################################# | ||
# Copy input files to $SCRATCH | ||
# then change directory to $SCRATCH | ||
cp ${INPUT}.inp $SCRATCH | ||
|
||
cd $SCRATCH | ||
|
||
###################################### | ||
# create the abaqus_v6.env file | ||
envFile=abaqus_v6.env | ||
|
||
# The number of processors/cores per node | ||
ncpus=`echo $SLURM_CPUS_PER_TASK` | ||
# | cut -c1-2` | ||
echo cpu per task $SLURM_CPUS_PER_TASK | ||
echo "number of cores per nodes ${ncpus}" | ||
# total number of cores | ||
echo "total number of nodes $SLURM_NNODES" | ||
ncores=`expr $SLURM_NNODES \* $ncpus` | ||
echo "total number of cores is $ncores" | ||
# file of the node lists | ||
node_list=ABAQUS_NODES | ||
|
||
srun hostname | sort > $node_list | ||
echo 'the node list is $node_list' | ||
mp_host_list="[" | ||
for i in $(cat ${node_list}) ; do | ||
mp_host_list="${mp_host_list}['$i', $ncpus]," | ||
done | ||
echo host list ${mp_host_list} | ||
mp_host_list=`echo ${mp_host_list} | sed -e "s/,$//"` | ||
mp_host_list="${mp_host_list}]" | ||
|
||
export mp_host_list | ||
#Write the environment variables to abaqus_v6.env | ||
echo "import os" > ${envFile} | ||
echo "os.environ['ABA_BATCH_OVERRIDE'] = '1'" >> ${envFile} | ||
echo "verbose=1" >> $envFile | ||
echo "mp_host_list=${mp_host_list}" >> ${envFile} | ||
echo "mp_mpi_implementation=PMPI" >> ${envFile} | ||
# | ||
|
||
abaqus job=$OUTPUT input=$INPUT cpus=$ncores \ | ||
standard_parallel=all mp_mode=mpi interactive | ||
|
||
############################################# | ||
# move entire scratch directory to the unique results directory | ||
# note this can be a copy or move | ||
mv *.dat *.abq *.m* *.odb *.res *.p* *.s* ${RESULTS} | ||
|
||
cd $HOME | ||
|
||
########################### | ||
# Clean up $SCRATCH | ||
|
||
rm -r $SCRATCH | ||
|
||
echo abaqus-mpi job finished at `date` |