Example job scripts
- x2.compte8.mpi8Einklappen
-
#!/bin/tcsh#PBS -l nodes=1:ppn=16:compute8,walltime=100:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------# Set environment if necessary (mpirun, MPI and MKL libraries)module load intel_parallel_studio_xe_2016_update4#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------# Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------# Output and go to working directoryechodateechoecho -n 'Job id: 'echo $PBS_JOBIDechoecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho -n 'mpirun version: '$MPI_RUN -version#-------------------------------------------------------------------------------# Get the hostfileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendif#-------------------------------------------------------------------------------# Drop every second proc. socket (Hyperthreading) and save proc. list in mpd.hostsawk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts#-------------------------------------------------------------------------------#Outputecho -n 'Number of MPI processes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistechoecho 'Start the job!'#-------------------------------------------------------------------------------#Execute$MPI_RUN -genv I_MPI_DEBUG=4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------#Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x2.compute8.mpi2.omp4Einklappen
-
#!/bin/tcsh#PBS -l nodes=1:ppn=16:compute8,walltime=100:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------# Set environment if necessary (mpirun, MPI and MKL libraries)module load intel_parallel_studio_xe_2016_update4#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------# Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------# Output and go to working directoryechodateechoecho -n 'Job id: 'echo $PBS_JOBIDechoecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho -n 'mpirun version: '$MPI_RUN -version#-------------------------------------------------------------------------------# Get the hostfileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendif#-------------------------------------------------------------------------------# OMP: There are two sockets with 4 cores (16 hyperthreads) per node.# Each socket has one physical memory and is one NUMA domain# => Start 2 processes per node (one per socket) and pin 10 (20) OMP threads# to each process.# => Drop all but every 8th slot in mpd hosts (Hyperthreading) and save# slots in mpd.hostsawk 'NR%8==0' $PBS_NODEFILE > mpd.hosts#-------------------------------------------------------------------------------# Set number of threads (per process) to 4 (no hyperthreads)setenv OMP_NUM_THREADS 4#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts#-------------------------------------------------------------------------------# Outputecho -n 'Number of MPI processes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistecho -n 'Number of OMP threads per MPI process:'echo $OMP_NUM_THREADSechoecho 'Start the job!'#-------------------------------------------------------------------------------# Execute (MPI processes are scatterd by default, OMP threads shall be pinned to physical cores)$MPI_RUN -genv I_MPI_DEBUG=4 -genv I_MPI_PIN_DOMAIN=socket -genv KMP_AFFINITY=verbose,respect,granularity=core,compact,1,0 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------# Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x2.compute20.mpi20Einklappen
-
#!/bin/tcsh#PBS -l nodes=2:ppn=40:compute20,walltime=100:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------# Set environment if necessary (mpirun, MPI and MKL libraries)module load intel_parallel_studio_xe_2016_update4#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------# Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------# Output and go to working directoryechodateechoecho -n 'Job id: 'echo $PBS_JOBIDechoecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho -n 'mpirun version: '$MPI_RUN -version#-------------------------------------------------------------------------------# Get the hostfileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendif#-------------------------------------------------------------------------------# Drop every second proc. socket (Hyperthreading) and save proc. list in mpd.hostsawk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts#-------------------------------------------------------------------------------#Outputecho -n 'Number of MPI processes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistechoecho 'Start the job!'#-------------------------------------------------------------------------------#Execute$MPI_RUN -genv I_MPI_DEBUG=4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------#Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x2.compute20.mpi2.omp10Einklappen
-
#!/bin/tcsh#PBS -l nodes=1:ppn=40:compute20,walltime=100:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------# Set environment if necessary (mpirun, MPI and MKL libraries)module load intel_parallel_studio_xe_2016_update4#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------# Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------# Output and go to working directoryechodateechoecho -n 'Job id: 'echo $PBS_JOBIDechoecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho -n 'mpirun version: '$MPI_RUN -version#-------------------------------------------------------------------------------# Get the hostfileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendif#-------------------------------------------------------------------------------# OMP: There are two sockets with 10 cores (20 hyperthreads) per node.# Each socket has one physical memory and is one NUMA domain# => Start 2 processes per node (one per socket) and pin 10 (20) OMP threads# to each process.# => Drop all but every 20th slot in mpd hosts (Hyperthreading) and save# slots in mpd.hostsawk 'NR%20==0' $PBS_NODEFILE > mpd.hosts#-------------------------------------------------------------------------------# Set number of threads (per process) to 10 (no hyperthreads)setenv OMP_NUM_THREADS 10#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts#-------------------------------------------------------------------------------# Outputecho -n 'Number of MPI processes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistecho -n 'Number of OMP threads per MPI process:'echo $OMP_NUM_THREADSechoecho 'Start the job!'#-------------------------------------------------------------------------------# Execute (MPI processes are scatterd by default, OMP threads shall be pinned to physical cores)$MPI_RUN -genv I_MPI_DEBUG=4 -genv I_MPI_PIN_DOMAIN=socket -genv KMP_AFFINITY=verbose,respect,granularity=core,compact,1,0 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------# Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x3.p2.mpi12Einklappen
-
#!/bin/tcsh#PBS -l nodes=1:ppn=24:p2,walltime=200:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------#Set environment if necessary (mpirun, MPI and MKL libraries)module load intel-cluster-studio-2016module load mpi/intel/5.1.3.210#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------#Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------# Output and go to working directoryechodateechoecho -n 'Job id: 'echo $PBS_JOBIDechoecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho -n 'mpirun version: '$MPI_RUN -version#-------------------------------------------------------------------------------# Get the hostfileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendif#-------------------------------------------------------------------------------# Drop every second slot and save proc. list in mpd.hostsawk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts#-------------------------------------------------------------------------------#Outputecho -n 'Number of MPI processes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistechoecho 'Start the job!'#-------------------------------------------------------------------------------#Clear memory$DROP_BUFFERS mpd.hosts#-------------------------------------------------------------------------------#Execute$MPI_RUN -genv I_MPI_DEBUG 4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------#Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x5.qdrEinklappen
-
#!/bin/tcsh#PBS -l nodes=1:ppn=8:qdr,walltime=100:00:00#PBS -j oe#PBS -m abe#PBS -M [MAIL_ADDRESS]#-------------------------------------------------------------------------------#Load environmental modulesmodule load compiler/intel-cluster-studio-2015module load mpi/intel/5.0.2.044#[MORE_MODULES]#-------------------------------------------------------------------------------# Set your programset PROG = [YOUR_PROGRAM]#-------------------------------------------------------------------------------#Unlimitunlimitlimit coredumpsize 0#-------------------------------------------------------------------------------#Output and go to working directoryechodateecho -n 'Directory: 'echo $PBS_O_WORKDIRcd $PBS_O_WORKDIRechoecho 'mpirun version:'$MPI_RUN -versionechoecho $PBS_JOBID > JobID.out#-------------------------------------------------------------------------------# Get ths hostfile which contains the name of the assigned nodes# The variable $PBS_NODEFILE contains the path to this fileif ( -e mpd.hosts) thenecho 'Delete mpd.hosts!'rm -f mpd.hostsendifcp $PBS_NODEFILE mpd.hosts#-------------------------------------------------------------------------------# Extract the number of hosts from the hostfileset hostlist = (`cat mpd.hosts`)set nodes = $#hostlist#-------------------------------------------------------------------------------#Set variables for BTDFT binariesset BTDFT = /home/36/bt302536/opt/BTDFT/v1.6.9_x5/btdft_tdechoecho -n 'BTDFT: 'echo $BTDFTecho#-------------------------------------------------------------------------------# Clear memory on the compute nodes/cluster/bayreuth/iws/bin/memsweeper mpd.hosts/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts#-------------------------------------------------------------------------------#Output and drop buffersecho -n 'Number of nodes: 'echo $nodesecho -n 'Hostlist: 'echo $hostlistechoecho 'Start the job!'#-------------------------------------------------------------------------------#Execute$MPI_RUN -genv I_MPI_DEBUG 4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out#-------------------------------------------------------------------------------#Output and clean upecho 'Job ended!'unset $hostlistunset $nodesechodateexit
- x1.N2.ppn32.MPI4.OMP16Einklappen
-
#!/bin/tcsh # Reserve 2 nodes with 32 cores each for 24 hours and run 4 MPI processes (2 per node) with 16 OpenMP threads each #SBATCH -J Testjob #Jobname #SBATCH -o job.%j.out #Job output file #SBATCH -N 2 #Number of nodes #SBATCH --ntasks-per-node 32 #Cores per node #SBATCH -t 24:00:00 #Requested Walltime #------------------------------------------------------------------------------- #Unlimit unlimit limit coredumpsize 0 #------------------------------------------------------------------------------- #Print environment variables echo date printenv >> job.env echo 'mpirun version:' mpirun -version #------------------------------------------------------------------------------- #Set number of threads (per process) setenv OMP_NUM_THREADS 16 #------------------------------------------------------------------------------- #Execute mpirun -genv I_MPI_DEBUG=4 -prepend-rank -perhost 2 -np 4 [Your Program] >& output.out set ERR = $? #------------------------------------------------------------------------------- #Output and clean up echo 'Job ended!' exit $ERR
- x1.N2.ppn32.MP64onlyEinklappen
-
#!/bin/tcsh
# Reserve 2 nodes with 32 cores each for 24 hours and run 64 MPI processes
#SBATCH -J Testjob #Jobname
#SBATCH -o job.%j.out #Job output file
#SBATCH -N 2 #Number of nodes
#SBATCH --ntasks-per-node 32 #Cores per node
#SBATCH -t 24:00:00 #Requested Walltime
#-------------------------------------------------------------------------------
#Unlimit
unlimit
limit coredumpsize 0
#-------------------------------------------------------------------------------
#Print environment variables
echo
date
printenv >> job.env
#-------------------------------------------------------------------------------
#Execute
mpirun -genv I_MPI_DEBUG=4 -prepend-rank -np ${SLURM_NTASKS} [Your Program] >& output.out
set ERR = $?
#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
exit $ERR