#!/bin/bash -l
#SBATCH --job-name=serial # Job name
#SBATCH --partition=cpu # Partition
#SBATCH --nodes=1 # Number of nodes
#SBATCH --ntasks-per-node=1 # Number of cores
#SBATCH --output=job.%j.out # Stdout (%j=jobId)
#SBATCH --error=job.%j.err # Stderr (%j=jobId)
#SBATCH -A ops # Accounting project
# Load any necessary modules
module load GCC/11.3.0
# Launch the executable exe.out
./exe.out
#!/bin/bash -l
#SBATCH --job-name=parallel # Job name
#SBATCH --partition=cpu # Partition
#SBATCH --nodes=6 # Number of nodes
#SBATCH --ntasks-per-node=40 # Number of cores
#SBATCH --output=job.%j.out # Stdout (%j=jobId)
#SBATCH --error=job.%j.err # Stderr (%j=jobId)
#SBATCH --time=24:00:00 # Walltime
#SBATCH -A ops # Accounting project
# Load any necessary modules
module load OpenMPI/4.1.4-GCC-11.3.0
# Launch the executable exe.out
srun ./exe.out
Note: Users can use mpirun
instead of srun
.
#!/bin/bash -l
#SBATCH --job-name=parallel_gpu # Job name
#SBATCH --partition=gpu # Partition
#SBATCH --nodes=1 # Number of nodes
#SBATCH --gres=gpu:4 # Number of GPUs
#SBATCH --ntasks-per-node=4 # Number of tasks
#SBATCH --output=job.%j.out # Stdout (%j=jobId)
#SBATCH --error=job.%j.err # Stderr (%j=jobId)
#SBATCH --time=12:00:00 # Walltime
#SBATCH -A ops # Accounting project
# Load any necessary modules
module load OpenMPI/4.1.4-GCC-11.3.0
# Launch the executable exe.out
srun ./exe.out