Druckansicht der Internetadresse:

Forschungszentrum für wissenschaftliches Rechnen an der Universität Bayreuth

Seite drucken

Example job scripts

x2.compte8.mpi8Einklappen
#!/bin/tcsh
#PBS -l nodes=1:ppn=16:compute8,walltime=100:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]

#-------------------------------------------------------------------------------
# Set environment if necessary (mpirun, MPI and MKL libraries)
module load intel_parallel_studio_xe_2016_update4
#[MORE_MODULES]

#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]

#-------------------------------------------------------------------------------
# Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
# Output and go to working directory
echo
date
echo
echo -n 'Job id: '
echo $PBS_JOBID
echo
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo -n 'mpirun version: '
$MPI_RUN -version

#-------------------------------------------------------------------------------
# Get the hostfile
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif

#-------------------------------------------------------------------------------
# Drop every second proc. socket (Hyperthreading) and save proc. list in mpd.hosts
awk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts

#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist

#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts
/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts

#-------------------------------------------------------------------------------
#Output
echo -n 'Number of MPI processes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo
echo 'Start the job!'

#-------------------------------------------------------------------------------
#Execute
$MPI_RUN -genv I_MPI_DEBUG=4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x2.compute8.mpi2.omp4Einklappen
#!/bin/tcsh
#PBS -l nodes=1:ppn=16:compute8,walltime=100:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]
#-------------------------------------------------------------------------------
# Set environment if necessary (mpirun, MPI and MKL libraries)
module load intel_parallel_studio_xe_2016_update4
#[MORE_MODULES]
#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]
#-------------------------------------------------------------------------------
# Unlimit
unlimit
limit coredumpsize 0
#-------------------------------------------------------------------------------
# Output and go to working directory
echo
date
echo
echo -n 'Job id: '
echo $PBS_JOBID
echo
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo -n 'mpirun version: '
$MPI_RUN -version
#-------------------------------------------------------------------------------
# Get the hostfile
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif
#-------------------------------------------------------------------------------
# OMP: There are two sockets with 4 cores (16 hyperthreads) per node.
# Each socket has one physical memory and is one NUMA domain
# => Start 2 processes per node (one per socket) and pin 10 (20) OMP threads
# to each process.
# => Drop all but every 8th slot in mpd hosts (Hyperthreading) and save
# slots in mpd.hosts
awk 'NR%8==0' $PBS_NODEFILE > mpd.hosts
#-------------------------------------------------------------------------------
# Set number of threads (per process) to 4 (no hyperthreads)
setenv OMP_NUM_THREADS 4
#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist
#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts
/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts
#-------------------------------------------------------------------------------
# Output
echo -n 'Number of MPI processes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo -n 'Number of OMP threads per MPI process:'
echo $OMP_NUM_THREADS
echo
echo 'Start the job!'
#-------------------------------------------------------------------------------
# Execute (MPI processes are scatterd by default, OMP threads shall be pinned to physical cores)
$MPI_RUN -genv I_MPI_DEBUG=4 -genv I_MPI_PIN_DOMAIN=socket -genv KMP_AFFINITY=verbose,respect,granularity=core,compact,1,0 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out
#-------------------------------------------------------------------------------
# Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x2.compute20.mpi20Einklappen
#!/bin/tcsh
#PBS -l nodes=2:ppn=40:compute20,walltime=100:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]

#-------------------------------------------------------------------------------
# Set environment if necessary (mpirun, MPI and MKL libraries)
module load intel_parallel_studio_xe_2016_update4
#[MORE_MODULES]

#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]

#-------------------------------------------------------------------------------
# Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
# Output and go to working directory
echo
date
echo
echo -n 'Job id: '
echo $PBS_JOBID
echo
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo -n 'mpirun version: '
$MPI_RUN -version

#-------------------------------------------------------------------------------
# Get the hostfile
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif

#-------------------------------------------------------------------------------
# Drop every second proc. socket (Hyperthreading) and save proc. list in mpd.hosts
awk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts

#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist

#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts
/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts

#-------------------------------------------------------------------------------
#Output
echo -n 'Number of MPI processes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo
echo 'Start the job!'

#-------------------------------------------------------------------------------
#Execute
$MPI_RUN -genv I_MPI_DEBUG=4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x2.compute20.mpi2.omp10Einklappen
#!/bin/tcsh
#PBS -l nodes=1:ppn=40:compute20,walltime=100:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]

#-------------------------------------------------------------------------------
# Set environment if necessary (mpirun, MPI and MKL libraries)
module load intel_parallel_studio_xe_2016_update4
#[MORE_MODULES]

#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]

#-------------------------------------------------------------------------------
# Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
# Output and go to working directory
echo
date
echo
echo -n 'Job id: '
echo $PBS_JOBID
echo
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo -n 'mpirun version: '
$MPI_RUN -version

#-------------------------------------------------------------------------------
# Get the hostfile
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif

#-------------------------------------------------------------------------------
# OMP: There are two sockets with 10 cores (20 hyperthreads) per node.
# Each socket has one physical memory and is one NUMA domain
# => Start 2 processes per node (one per socket) and pin 10 (20) OMP threads
# to each process.
# => Drop all but every 20th slot in mpd hosts (Hyperthreading) and save
# slots in mpd.hosts
awk 'NR%20==0' $PBS_NODEFILE > mpd.hosts

#-------------------------------------------------------------------------------
#Set number of threads (per process) to 10 (no hyperthreads)
setenv OMP_NUM_THREADS 10

#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist

#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts
/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts

#-------------------------------------------------------------------------------
# Output
echo -n 'Number of MPI processes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo -n 'Number of OMP threads per MPI process:'
echo $OMP_NUM_THREADS
echo
echo 'Start the job!'

#-------------------------------------------------------------------------------
# Execute (MPI processes are scatterd by default, OMP threads shall be pinned to physical cores)
$MPI_RUN -genv I_MPI_DEBUG=4 -genv I_MPI_PIN_DOMAIN=socket -genv KMP_AFFINITY=verbose,respect,granularity=core,compact,1,0 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out

#-------------------------------------------------------------------------------
# Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x3.p2.mpi12Einklappen
#!/bin/tcsh
#PBS -l nodes=1:ppn=24:p2,walltime=200:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]

#-------------------------------------------------------------------------------
#Set environment if necessary (mpirun, MPI and MKL libraries)
module load intel-cluster-studio-2016
module load mpi/intel/5.1.3.210
#[MORE_MODULES]

#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]

#-------------------------------------------------------------------------------
#Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
# Output and go to working directory
echo
date
echo
echo -n 'Job id: '
echo $PBS_JOBID
echo
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo -n 'mpirun version: '
$MPI_RUN -version

#-------------------------------------------------------------------------------
# Get the hostfile
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif

#-------------------------------------------------------------------------------
# Drop every second slot and save proc. list in mpd.hosts
awk '{if(NR %2 ==0) print $1 ;}' $PBS_NODEFILE > mpd.hosts

#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist

#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts

#-------------------------------------------------------------------------------
#Output
echo -n 'Number of MPI processes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo
echo 'Start the job!'

#-------------------------------------------------------------------------------
#Clear memory
$DROP_BUFFERS mpd.hosts

#-------------------------------------------------------------------------------
#Execute
$MPI_RUN -genv I_MPI_DEBUG 4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x5.qdrEinklappen
#!/bin/tcsh
#PBS -l nodes=1:ppn=8:qdr,walltime=100:00:00
#PBS -j oe
#PBS -m abe
#PBS -M [MAIL_ADDRESS]

#-------------------------------------------------------------------------------
#Load environmental modules
module load compiler/intel-cluster-studio-2015
module load mpi/intel/5.0.2.044
#[MORE_MODULES]

#-------------------------------------------------------------------------------
# Set your program
set PROG = [YOUR_PROGRAM]

#-------------------------------------------------------------------------------
#Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
#Output and go to working directory
echo
date
echo -n 'Directory: '
echo $PBS_O_WORKDIR
cd $PBS_O_WORKDIR
echo
echo 'mpirun version:'
$MPI_RUN -version
echo
echo $PBS_JOBID > JobID.out

#-------------------------------------------------------------------------------
# Get ths hostfile which contains the name of the assigned nodes
# The variable $PBS_NODEFILE contains the path to this file
if ( -e mpd.hosts) then
echo 'Delete mpd.hosts!'
rm -f mpd.hosts
endif
cp $PBS_NODEFILE mpd.hosts

#-------------------------------------------------------------------------------
# Extract the number of hosts from the hostfile
set hostlist = (`cat mpd.hosts`)
set nodes = $#hostlist

#-------------------------------------------------------------------------------
#Set variables for BTDFT binaries
set BTDFT = /home/36/bt302536/opt/BTDFT/v1.6.9_x5/btdft_td
echo
echo -n 'BTDFT: '
echo $BTDFT
echo

#-------------------------------------------------------------------------------
# Clear memory on the compute nodes
/cluster/bayreuth/iws/bin/memsweeper mpd.hosts
/cluster/bayreuth/iws/bin/drop_buffers mpd.hosts

#-------------------------------------------------------------------------------
#Output and drop buffers
echo -n 'Number of nodes: '
echo $nodes
echo -n 'Hostlist: '
echo $hostlist
echo
echo 'Start the job!'

#-------------------------------------------------------------------------------
#Execute
$MPI_RUN -genv I_MPI_DEBUG 4 -prepend-rank -machinefile mpd.hosts -n $nodes $PROG >& output.out

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
unset $hostlist
unset $nodes
echo
date
exit
x1.N2.ppn32.MPI4.OMP16Einklappen
#!/bin/tcsh
# Reserve 2 nodes with 32 cores each for 24 hours and run 4 MPI processes (2 per node) with 16 OpenMP threads each
#SBATCH -J Testjob #Jobname
#SBATCH -o job.%j.out #Job output file
#SBATCH -N 2 #Number of nodes
#SBATCH --ntasks-per-node 32 #Cores per node
#SBATCH -t 24:00:00 #Requested Walltime

#-------------------------------------------------------------------------------
#Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
#Print environment variables
echo
date
printenv >> job.env
echo 'mpirun version:'
mpirun -version

#-------------------------------------------------------------------------------
#Set number of threads (per process)
setenv OMP_NUM_THREADS 16

#-------------------------------------------------------------------------------
#Execute
mpirun -genv I_MPI_DEBUG=4 -prepend-rank -perhost 2 -np 4 [Your Program] >& output.out
set ERR = $?

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
exit $ERR
x1.N2.ppn32.MP64onlyEinklappen

#!/bin/tcsh
# Reserve 2 nodes with 32 cores each for 24 hours and run 64 MPI processes

#SBATCH -J Testjob #Jobname
#SBATCH -o job.%j.out #Job output file
#SBATCH -N 2 #Number of nodes
#SBATCH --ntasks-per-node 32 #Cores per node
#SBATCH -t 24:00:00 #Requested Walltime

#-------------------------------------------------------------------------------
#Unlimit
unlimit
limit coredumpsize 0

#-------------------------------------------------------------------------------
#Print environment variables
echo
date
printenv >> job.env

#-------------------------------------------------------------------------------
#Execute
mpirun -genv I_MPI_DEBUG=4 -prepend-rank -np ${SLURM_NTASKS} [Your Program] >& output.out
set ERR = $?

#-------------------------------------------------------------------------------
#Output and clean up
echo 'Job ended!'
exit $ERR


Verantwortlich für die Redaktion: Dr.rer.nat. Ingo Schelter

Facebook Twitter Youtube-Kanal Instagram UBT-A Kontakt