#!/bin/bash #SBATCH --ntasks=[NTASKS] #SBATCH --partition=[PARTITION/QUEUE] #SBATCH --time=[DAYS]-[HOURS]:[MINUTES]:[SECONDS] #SBATCH --job-name=[JOBNAME] #SBATCH --mail-user=[USERNAME]@memphis.edu #SBATCH --output=[STDOUT].out #SBATCH --error=[STDERR].err #SBATCH --mem-per-cpu=[MEMORY_NEEDED_PER_CPU] ################################################# # [SOMETHING] # #-----------------------------------------------# # Please replace anything in [] brackes with # # what you want. # # Example: # # #SBATCH --partition=[PARTITION/QUEUE] # # Becomes: # # #SBATCH --partition=computeq # ################################################# ################################################# # --ntasks=[NTASKS] # #-----------------------------------------------# # Number of mpi threads (CPU-cores) your job # # will need. You can use this option alone, # # without specifying nodes or ntasks-per-node. # ################################################# # --ntasks-per-node=[CPUs-Per-Node] # #-----------------------------------------------# # If you need to balance your job across nodes, # # for better performance or memory constraints, # # specify the number of threads per node with # # this option. # ################################################# # --nodes=[NNODES] # #-----------------------------------------------# # Number of nodes your MPI job will use. You can# # use up to 16, but it will remain in queue for # # a longer time if there are many jobs running. # ################################################# # --partition=[PARTITION/QUEUE] # #-----------------------------------------------# # computeq: 40 cores, 192 GB mem # # bigmemq: 40 cores, 768-1536 GB mem # # gpuq: 40 cores, 192 GB mem, 2 v100 GPUs # ################################################# # Go to submission directory cd $SLURM_SUBMIT_DIR ################################################# # modules # #-----------------------------------------------# # Any modules you need can be found with # # 'module avail'. If you compile something with # # a particular compiler using a module, you # # probably want to call that module here. # # You probably want to load the openmpi module. # ################################################# module load openmpi/3.1.1/gcc.8.2.0/noncuda #module load openmpi/4.0.5/gcc.8.2.0/noncuda #module load openmpi/2.0.2/gcc.8.2.0/noncuda #module load mpich/ge/gcc/64/3.2.1 #module load intel/2019.5 # What is my job ID? echo "$SLURM_JOB_ID" ################################################# # Run your executable here # #-----------------------------------------------# # Note that in some documentation of mpirun, it # # may tell you to use '-n' for number of # # processors. Default is [NTASKS]*[NNODES]. # # Furthermore, some documentation might tell you# # to use mpiexec. In most cases, this isn't # # needed, but if you need to manually set up the# # mpi launcher, you might try that before # # execution. # # You might be interested in these environment # # variables: # # SLURM_JOB_NODELIST # # The list of nodes used for an MPI job. This # # is a file name. # # SLURM_NNODES # # Number of nodes used in an MPI job. # # SLURM_NTASKS_PER_NODE # # Number of threads per node. # # SLURM_NTASKS # # Number of tasks on all nodes in job. # ################################################# mpirun [EXECUTABLE] [OPTIONS]