1#------------------------------------------------------------------------------- 2# 3# Batch options for SLURM (Simple Linux Utility for Resource Management) 4# ======================= 5# 6#SBATCH --nodes=2 7#SBATCH --ntasks-per-node=12 8#SBATCH --cpus-per-task=1 9#SBATCH --time=0:30:00 10#SBATCH --partition=par 11#SBATCH --output=job_%j.out.log 12#SBATCH --error=job_%j.err.log 13#SBATCH --job-name=nameandcase 14# 15# -t<time>, --time=<time> : walltime in minutes, minutes:seconds, 16# hours:minutes:seconds, days-hours, 17# days-hours:minutes, or 18# days-hours:minutes:seconds 19# -N, --nodes=<minnodes[-maxnodes]> : number of allocated nodes 20# --ntasks=24, -n24 : number of total tasks 21# --ntasks-per-node=<ntasks> : number of tasks per node 22# --ntasks-per-socket=<ntasks> : number of tasks per socket 23# --ntasks-per-core=<ntasks> : number of tasks per core 24# --cpus-per-task=<nthreads> : number of threads per task 25# --cpu-bind=cores, sockets : bind CPUs to cores or sockets 26# --mem-bind=local 27# --qos=<qos> : request given quality of service 28# (for example "debug") 29# --contiguous : use contiguous nodes for minimal latency 30# --exclusive : do not share nodes with other jobs 31# --switches=<count>[@max-delay] : try to run on no more than count 32# switches (for better performance) 33# --partition=<name>, -p<name> : partition (queue) (run "sinfo -s" 34# for list of partitions) 35# --reservation=<name> : allocate resources from reservation 36# -o<pattern>, --output=<pattern> : output file name pattern 37# -e<pattern>, --error=<pattern> : error file name pattern 38# -J<jobname>, --job-name=<jobname> : job name 39# 40#------------------------------------------------------------------------------- 41