[1042] | 1 | #!/bin/bash |
---|
| 2 | |
---|
| 3 | # This script provides site-specific functions |
---|
| 4 | # called by the site-independent scripts |
---|
| 5 | # create_runs.sh and submit.sh |
---|
| 6 | |
---|
| 7 | #----------------------------- split_XXX ---------------------------- |
---|
| 8 | |
---|
| 9 | function split_mpi_32() { |
---|
| 10 | setvar nbp 32 nsplit_i 1 nsplit_j 2 |
---|
| 11 | } |
---|
| 12 | function split_serial_40() { |
---|
| 13 | setvar nbp 40 nsplit_i 1 nsplit_j 2 |
---|
| 14 | } |
---|
| 15 | function split_mpi_40() { |
---|
| 16 | setvar nbp 40 nsplit_i 1 nsplit_j 2 |
---|
| 17 | } |
---|
| 18 | function split_mpi_80() { |
---|
| 19 | setvar nbp 80 nsplit_i 1 nsplit_j 2 |
---|
| 20 | } |
---|
| 21 | function split_mpi_omp_40() { |
---|
| 22 | setvar nbp 40 nsplit_i 1 nsplit_j 2 omp_level_size 1 |
---|
| 23 | } |
---|
| 24 | |
---|
| 25 | #----------------- job submission ------------------ |
---|
| 26 | |
---|
| 27 | function submit_job_X64_IRENE() # $1=script $* = SLURM OPTIONS |
---|
| 28 | { |
---|
| 29 | TMP=$(mktemp) |
---|
| 30 | ccc_msub $1 > $TMP |
---|
| 31 | cat $TMP 1>&2 |
---|
| 32 | cat $TMP | awk '{print $NF }' |
---|
| 33 | rm -f $TMP |
---|
| 34 | } |
---|
| 35 | |
---|
| 36 | #------------------------------ job_XXX ----------------------------- |
---|
| 37 | |
---|
| 38 | # Serial jobs |
---|
| 39 | function job_serial() { # EXP_NAME |
---|
| 40 | job_generic $1 1 1 "./gcm.exe" |
---|
| 41 | } |
---|
| 42 | function job_serial_40() { # EXP_NAME |
---|
| 43 | job_serial $1 |
---|
| 44 | } |
---|
| 45 | |
---|
| 46 | # MPI jobs |
---|
| 47 | function job_mpi() { #EXP_NAME NB_MPI |
---|
| 48 | job_generic $* "srun ./gcm.exe" |
---|
| 49 | } |
---|
| 50 | function job_mpi_32() { # EXP_NAME |
---|
| 51 | job_mpi $1 1 4 4 |
---|
| 52 | } |
---|
| 53 | function job_mpi_40() { # EXP_NAME |
---|
| 54 | job_mpi $1 1 4 4 |
---|
| 55 | } |
---|
| 56 | function job_mpi_80() { # EXP_NAME |
---|
| 57 | job_mpi $1 1 4 4 |
---|
| 58 | } |
---|
| 59 | |
---|
| 60 | # MPI-OMP jobs |
---|
| 61 | function job_mpi_omp() { # EXP_NAME NODES GPUS GPU_PER_NODE |
---|
| 62 | job_generic $* "srun ./gcm.exe" |
---|
| 63 | } |
---|
| 64 | |
---|
| 65 | function job_mpi_omp_40() { # EXP_NAME |
---|
| 66 | job_mpi_omp $1 1 4 4 |
---|
| 67 | } |
---|
| 68 | |
---|
| 69 | # Generic |
---|
| 70 | function job_generic() { # EXP_NAME NODES GPUS GPU_PER_NODE CMD |
---|
| 71 | cat <<EOF |
---|
| 72 | #!/bin/bash |
---|
| 73 | ## Request name |
---|
| 74 | #SBATCH --job-name=$1 |
---|
| 75 | |
---|
| 76 | #SBATCH --partition=gpu_p1 # partition GPU choisie |
---|
| 77 | #SBATCH --nodes=$2 # nombre de noeud |
---|
| 78 | #SBATCH --ntasks=$3 # nombre de tache MPI (= nombre de GPU ici) |
---|
| 79 | #SBATCH --ntasks-per-node=$4 # nombre de tache MPI par noeud (= nombre de GPU ici) |
---|
| 80 | #SBATCH --gres=gpu:$4 # nombre de GPU par noeud |
---|
| 81 | #SBATCH --cpus-per-task=1 # nombre de coeurs CPU par tache |
---|
| 82 | |
---|
| 83 | ## computing project |
---|
| 84 | #SBATCH -A wuu@gpu |
---|
| 85 | ## Elapsed time limit HH:MM:SS |
---|
| 86 | #SBATCH --time=00:10:00 |
---|
| 87 | |
---|
| 88 | # do not use hyperthreading |
---|
| 89 | #SBATCH --hint=nomultithread |
---|
| 90 | # standard outputs |
---|
| 91 | #SBATCH --output=DYNAMICO%j.out |
---|
| 92 | #SBATCH --error=DYNAMICO%j.out |
---|
| 93 | |
---|
| 94 | export OMP_NUM_THREADS=1 |
---|
| 95 | # OpenMP binding |
---|
| 96 | export OMP_PLACES=cores |
---|
| 97 | |
---|
| 98 | # stack |
---|
| 99 | export OMP_STACKSIZE=128M |
---|
| 100 | ulimit -s unlimited |
---|
| 101 | |
---|
| 102 | # move to submission directory |
---|
| 103 | cd \${SLURM_SUBMIT_DIR} |
---|
| 104 | |
---|
| 105 | # load the same modules as during compilation |
---|
| 106 | source build/arch.env |
---|
| 107 | module list |
---|
| 108 | |
---|
| 109 | # cleanup execution directory and run |
---|
| 110 | rm -rf gcm.log logs *.nc netcdf |
---|
| 111 | date > gcm.log |
---|
| 112 | ulimit -s unlimited |
---|
| 113 | $5 >> gcm.log |
---|
| 114 | date >> gcm.log |
---|
| 115 | |
---|
| 116 | # move output files to netcdf subdir |
---|
| 117 | mkdir -p netcdf |
---|
| 118 | cp gcm.log *.def netcdf |
---|
| 119 | mv *.nc netcdf |
---|
| 120 | |
---|
| 121 | # keep log files, source code and executable for debug |
---|
| 122 | mkdir -p logs |
---|
| 123 | cp *.xml logs |
---|
| 124 | cp -pr gcm.exe logs |
---|
| 125 | cp -pr build/src logs |
---|
| 126 | mv xios_client_*.err xios_client_*.out gcm.log logs |
---|
| 127 | |
---|
| 128 | EOF |
---|
| 129 | } |
---|
| 130 | |
---|
| 131 | #------------------------------ post-processing job ----------------------------- |
---|
| 132 | |
---|
| 133 | function job_post_X64_IRENE() # LIST |
---|
| 134 | { |
---|
| 135 | DEPLIST=afterany |
---|
| 136 | for ID in $* ; do |
---|
| 137 | DEPLIST="$DEPLIST:$ID" |
---|
| 138 | done |
---|
| 139 | cat <<EOF |
---|
| 140 | #!/bin/bash |
---|
| 141 | ## Request name |
---|
| 142 | #MSUB -r testing |
---|
| 143 | #MSUB -n 1 |
---|
| 144 | ## Elapsed time limit in seconds |
---|
| 145 | #MSUB -T 600 |
---|
| 146 | #MSUB -q standard |
---|
| 147 | #MSUB -A $project |
---|
| 148 | #MSUB -q skylake |
---|
| 149 | #MSUB -m work |
---|
| 150 | #MSUB -E "--dependency=$DEPLIST" |
---|
| 151 | ## Number of tasks (=MPI processes) to use |
---|
| 152 | ## Quality of Service required (long [3 days], normal [1 day], test [30 min]) |
---|
| 153 | #MSUB -Q normal |
---|
| 154 | |
---|
| 155 | export OMP_NUM_THREADS=1 |
---|
| 156 | # this script is submitted from $ROOT/logs |
---|
| 157 | cd \${BRIDGE_MSUB_PWD}/.. |
---|
| 158 | |
---|
| 159 | $(cat $ROOT/DYNAMICO/arch/arch-X64_IRENE.env) |
---|
| 160 | module load python |
---|
| 161 | |
---|
| 162 | bash/post.sh |
---|
| 163 | EOF |
---|
| 164 | } |
---|