1 | #!/bin/ksh |
---|
2 | #set -x |
---|
3 | |
---|
4 | host=`uname -n` |
---|
5 | user=`whoami` |
---|
6 | |
---|
7 | ## - Define paths |
---|
8 | srcdir=`pwd` |
---|
9 | datadir=$srcdir/data_oasis |
---|
10 | casename=`basename $srcdir` |
---|
11 | |
---|
12 | ## - Define case |
---|
13 | if [ $# -eq 0 ] ; then |
---|
14 | echo "By default, i.e. without arguments, the source grid is bggd," |
---|
15 | echo "the target grid is nogt and the remapping is 1st order conservative;" |
---|
16 | echo "2 nodes, 1 MPI task per node and 1 OpenMP thread per MPI task are used for the run." |
---|
17 | SRC_GRID=bggd |
---|
18 | TGT_GRID=nogt |
---|
19 | remap=conserv1st |
---|
20 | n_p_t=2_1_1 |
---|
21 | nnode=2 |
---|
22 | mpiprocs=1 |
---|
23 | threads=1 |
---|
24 | elif [ $# -ne 4 ] ; then |
---|
25 | echo "If you don't want to run the default case without arguments, " |
---|
26 | echo "you must run the script with 4 arguments i.e. './run_testinterp.sh src tgt remap nnodes_nprocs_nthreads'" |
---|
27 | echo "where 'src' is the source grid, 'tgt' the target grid and 'remap' the remapping," |
---|
28 | echo "'nnodes' the total number of nodes for the run, 'nprocs' the number of MPI tasks per node" |
---|
29 | echo "and 'nthreads' the number of OpenMP threads per MPI task" |
---|
30 | exit |
---|
31 | else |
---|
32 | SRC_GRID=$1 |
---|
33 | TGT_GRID=$2 |
---|
34 | remap=$3 |
---|
35 | n_p_t=$4 |
---|
36 | nnode=`echo $n_p_t | awk -F _ '{print $1}'` |
---|
37 | mpiprocs=`echo $n_p_t | awk -F _ '{print $2}'` |
---|
38 | threads=`echo $n_p_t | awk -F _ '{print $3}'` |
---|
39 | fi |
---|
40 | ## |
---|
41 | ## User's choice of computing architecture |
---|
42 | arch=pgi20.4_openmpi_openmp_linux # nemo_lenovo_intel_impi_openmp, kraken_intel_impi_openmp, |
---|
43 | # training_computer, gfortran_openmpi_openmp_linux, belenos, mac |
---|
44 | # pgi_openmpi_openmp_linux, |
---|
45 | # pgi20.4_openmpi_openmp_linux (not work with 4.0) |
---|
46 | # gnu1020_openmpi_openmp_linux (not work with 4.0) |
---|
47 | ## |
---|
48 | ###################################################################### |
---|
49 | ## - Verification source grid type and remapping |
---|
50 | # |
---|
51 | ## - Source grid : bggd, ssea or icos |
---|
52 | ## bggd is an atmosphere structured (LR) grid |
---|
53 | ## ssea is an atmosphere gaussian reduced grid (D) : no 2nd order conservative remapping |
---|
54 | ## icos is an atmosphere unstructured grid (U) : no bilinear, no bicubic nor 2nd order conservative remapping |
---|
55 | ## |
---|
56 | ## - Target grid : nogt |
---|
57 | ## nogt is an ocean structured (LR) grid |
---|
58 | ## |
---|
59 | ## - Remapping : distwgt (nearest-neighbour), bili (bilinear), bicu (bicubic), conserv1st or conserv2nd (1st or 2nd order conservative remapping) |
---|
60 | ## |
---|
61 | ## Configuration files 'namcouple' are given in /data_oasis3 |
---|
62 | ## Warning: If you add any extra lines in one of the namcouple given as examples you will have to |
---|
63 | ## change the definition of SRC_GRID_TYPE, SRC_GRID_PERIOD and SRC_GRID_OVERLAP in this script (see below lines 140-142) |
---|
64 | ## |
---|
65 | ## - Verification source grid type and remapping |
---|
66 | if [ ${SRC_GRID} == "ssea" ]; then |
---|
67 | if [ ${remap} == "conserv2nd" ]; then |
---|
68 | echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea" |
---|
69 | exit |
---|
70 | fi |
---|
71 | fi |
---|
72 | if [ ${SRC_GRID} == "icos" ]; then |
---|
73 | if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then |
---|
74 | echo "Impossible to perform ${remap} remapping from unstructured grid icos" |
---|
75 | exit |
---|
76 | fi |
---|
77 | fi |
---|
78 | ## |
---|
79 | rundir=$srcdir/${casename}_${SRC_GRID}_${TGT_GRID}_${remap}_${nnode}_${mpiprocs}_${threads} |
---|
80 | ## |
---|
81 | ###################################################################### |
---|
82 | ## |
---|
83 | ## - Name of the executables |
---|
84 | exe1=model1 |
---|
85 | exe2=model2 |
---|
86 | ## |
---|
87 | ## - Define number of processes to run each executable |
---|
88 | (( nproc = $nnode * $mpiprocs )) |
---|
89 | (( nproc_exe2 = $nproc / 2 )) |
---|
90 | (( nproc_exe1 = $nproc - $nproc_exe2 )) |
---|
91 | |
---|
92 | echo '' |
---|
93 | echo '**************************************************************************************************************' |
---|
94 | echo '*** '$casename' : '$run |
---|
95 | echo '' |
---|
96 | echo "Running test_interpolation on $nnode nodes with $mpiprocs MPI tasks per node and $threads threads per MPI task" |
---|
97 | echo '**************************************************************************************************************' |
---|
98 | echo 'Source grid :' $SRC_GRID |
---|
99 | echo 'Target grid :' $TGT_GRID |
---|
100 | echo 'Rundir :' $rundir |
---|
101 | echo 'Architecture :' $arch |
---|
102 | echo 'Host : '$host |
---|
103 | echo 'User : '$user |
---|
104 | echo 'Grids : '$SRC_GRID'-->'$TGT_GRID |
---|
105 | echo 'Remap : '$remap |
---|
106 | echo '' |
---|
107 | echo $exe1' runs on '$nproc_exe1 'processes' |
---|
108 | echo $exe2' runs on '$nproc_exe2 'processes' |
---|
109 | echo '' |
---|
110 | echo '' |
---|
111 | |
---|
112 | ## - Copy everything needed into rundir |
---|
113 | \rm -fr $rundir/* |
---|
114 | mkdir -p $rundir |
---|
115 | |
---|
116 | ln -sf $datadir/grids.nc $rundir/grids.nc |
---|
117 | ln -sf $datadir/masks.nc $rundir/masks.nc |
---|
118 | ln -sf $datadir/areas.nc $rundir/areas.nc |
---|
119 | ln -sf $srcdir/$exe1 $rundir/. |
---|
120 | ln -sf $srcdir/$exe2 $rundir/. |
---|
121 | cp -f $datadir/namcouple_${SRC_GRID}_${TGT_GRID}_${remap} $rundir/namcouple |
---|
122 | |
---|
123 | ## - Grid source characteristics |
---|
124 | # These are read in the namcouple file. If you decide to use another namcouple than the ones coming from /data_oasis3 |
---|
125 | # you may have to change the 3 lines below |
---|
126 | SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type |
---|
127 | SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic |
---|
128 | SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids |
---|
129 | |
---|
130 | echo "SRC_GRID_TYPE : $SRC_GRID_TYPE" |
---|
131 | echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD" |
---|
132 | echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP" |
---|
133 | |
---|
134 | ## - Create name_grids.dat, that will be read by the models, from namcouple informations |
---|
135 | cat <<EOF >> $rundir/name_grids.dat |
---|
136 | \$grid_source_characteristics |
---|
137 | cl_grd_src='$SRC_GRID' |
---|
138 | cl_remap='$remap' |
---|
139 | cl_type_src='$SRC_GRID_TYPE' |
---|
140 | cl_period_src='$SRC_GRID_PERIOD' |
---|
141 | il_overlap_src=$SRC_GRID_OVERLAP |
---|
142 | \$end |
---|
143 | \$grid_target_characteristics |
---|
144 | cl_grd_tgt='$TGT_GRID' |
---|
145 | \$end |
---|
146 | EOF |
---|
147 | # |
---|
148 | cd $rundir |
---|
149 | |
---|
150 | ###################################################################### |
---|
151 | ## - Creation of configuration scripts |
---|
152 | |
---|
153 | ###--------------------------------------------------------------------- |
---|
154 | ### NEMO_LENOVO_INTEL_IMPI_OPENMP |
---|
155 | ###--------------------------------------------------------------------- |
---|
156 | if [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then |
---|
157 | |
---|
158 | cat <<EOF > $rundir/run_$casename.$arch |
---|
159 | #!/bin/bash -l |
---|
160 | #SBATCH --partition prod |
---|
161 | #SBATCH --job-name ${n_p_t} |
---|
162 | #SBATCH --time=00:02:00 |
---|
163 | #SBATCH --output=$rundir/$casename.o |
---|
164 | #SBATCH --error=$rundir/$casename.e |
---|
165 | # Number of nodes |
---|
166 | #SBATCH --nodes=$nnode |
---|
167 | # Number of MPI tasks per node |
---|
168 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
169 | # Number of OpenMP threads per MPI task |
---|
170 | ##SBATCH --cpus-per-task=24 |
---|
171 | cd $rundir |
---|
172 | |
---|
173 | export KMP_STACKSIZE=1GB |
---|
174 | export I_MPI_PIN_DOMAIN=omp |
---|
175 | #export I_MPI_PIN_DOMAIN=socket |
---|
176 | export I_MPI_WAIT_MODE=enable |
---|
177 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
178 | export OASIS_OMP_NUM_THREADS=$threads |
---|
179 | |
---|
180 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
181 | EOF |
---|
182 | |
---|
183 | ###--------------------------------------------------------------------- |
---|
184 | ### KRAKEN_INTEL_IMPI_OPENMP |
---|
185 | ###--------------------------------------------------------------------- |
---|
186 | elif [ ${arch} == kraken_intel_impi_openmp ]; then |
---|
187 | |
---|
188 | timreq=00:30:00 |
---|
189 | |
---|
190 | cat <<EOF > $rundir/run_$casename.$arch |
---|
191 | #!/bin/bash -l |
---|
192 | #Partition |
---|
193 | #SBATCH --partition prod |
---|
194 | # Nom du job |
---|
195 | #SBATCH --job-name ${n_p_t} |
---|
196 | # Time limit for the job |
---|
197 | #SBATCH --time=$timreq |
---|
198 | #SBATCH --output=$rundir/$casename.o |
---|
199 | #SBATCH --error=$rundir/$casename.e |
---|
200 | # Number of nodes |
---|
201 | #SBATCH --nodes=$nnode |
---|
202 | # Number of MPI tasks per node |
---|
203 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
204 | # Number of OpenMP threads per MPI task |
---|
205 | #SBATCH --cpus-per-task=36 |
---|
206 | |
---|
207 | cd $rundir |
---|
208 | module purge |
---|
209 | module load compiler/intel/18.0.1.163 |
---|
210 | module load mpi/intelmpi/2018.1.163 |
---|
211 | module load lib/netcdf-fortran/4.4.4_impi |
---|
212 | module load lib/netcdf-c/4.6.1_impi |
---|
213 | |
---|
214 | export KMP_STACKSIZE=1GB |
---|
215 | export I_MPI_PIN_DOMAIN=omp |
---|
216 | export I_MPI_WAIT_MODE=enable |
---|
217 | (( map = $threads - 1 )) |
---|
218 | affin="verbose,granularity=fine,proclist=[0" |
---|
219 | for place in \$(seq \$map); do |
---|
220 | affin=\${affin}",\${place}" |
---|
221 | echo \$place |
---|
222 | done |
---|
223 | echo affin1 \$affin |
---|
224 | affin=\${affin}"],explicit" |
---|
225 | export KMP_AFFINITY=\$affin |
---|
226 | echo KMP_AFFINITY \$KMP_AFFINITY |
---|
227 | export OASIS_OMP_NUM_THREADS=$threads |
---|
228 | export OMP_NUM_THREADS=$threads |
---|
229 | |
---|
230 | # Binding IntelMPI |
---|
231 | MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35" |
---|
232 | INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}" |
---|
233 | I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}" |
---|
234 | |
---|
235 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
236 | EOF |
---|
237 | |
---|
238 | elif [ $arch == belenos ] ; then |
---|
239 | cat <<EOF > $rundir/run_$casename.$arch |
---|
240 | #!/bin/bash |
---|
241 | #SBATCH --exclusive |
---|
242 | #SBATCH --partition=normal256 |
---|
243 | #SBATCH --job-name ${remap}_${nthreads} |
---|
244 | #SBATCH --time=02:00:00 |
---|
245 | #SBATCH -o $rundir/$casename.o |
---|
246 | #SBATCH -e $rundir/$casename.e |
---|
247 | #SBATCH -N $nnode |
---|
248 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
249 | # |
---|
250 | ulimit -s unlimited |
---|
251 | cd $rundir |
---|
252 | # |
---|
253 | module load intelmpi/2018.5.274 |
---|
254 | module load intel/2018.5.274 |
---|
255 | module load netcdf-fortran/4.5.2_V2 |
---|
256 | # |
---|
257 | export KMP_STACKSIZE=1GB |
---|
258 | export I_MPI_WAIT_MODE=enable |
---|
259 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
260 | export OASIS_OMP_NUM_THREADS=$threads |
---|
261 | export OMP_NUM_THREADS=$threads |
---|
262 | # |
---|
263 | time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 |
---|
264 | # |
---|
265 | EOF |
---|
266 | |
---|
267 | fi |
---|
268 | |
---|
269 | ###################################################################### |
---|
270 | ### - Execute the model |
---|
271 | |
---|
272 | if [ ${arch} == training_computer ]; then |
---|
273 | export OASIS_OMP_NUM_THREADS=$threads |
---|
274 | MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun |
---|
275 | echo 'Executing the model using '$MPIRUN |
---|
276 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
277 | elif [ ${arch} == gfortran_openmpi_openmp_linux ]; then |
---|
278 | export OASIS_OMP_NUM_THREADS=$threads |
---|
279 | MPIRUN=/usr/lib64/openmpi/bin/mpirun |
---|
280 | echo 'Executing the model using '$MPIRUN |
---|
281 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
282 | elif [ $arch == pgi_openmpi_openmp_linux ]; then |
---|
283 | MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun |
---|
284 | echo 'Executing the model using '$MPIRUN |
---|
285 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
286 | elif [ ${arch} == gnu1020_openmpi_openmp_linux ]; then |
---|
287 | export OASIS_OMP_NUM_THREADS=$threads |
---|
288 | MPIRUN=/usr/local/openmpi/4.1.0_gcc1020/bin/mpirun |
---|
289 | echo 'Executing the model using '$MPIRUN |
---|
290 | $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
291 | elif [ $arch == pgi20.4_openmpi_openmp_linux ]; then |
---|
292 | MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun |
---|
293 | echo 'Executing the model using '$MPIRUN |
---|
294 | $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
295 | elif [ $arch == nemo_lenovo_intel_impi_openmp ]; then |
---|
296 | echo 'Submitting the job to queue using sbatch' |
---|
297 | sbatch $rundir/run_$casename.$arch |
---|
298 | squeue -u $USER |
---|
299 | elif [ $arch == kraken_intel_impi_openmp ]; then |
---|
300 | echo 'Submitting the job to queue using sbatch' |
---|
301 | sbatch $rundir/run_$casename.$arch |
---|
302 | squeue -u $USER |
---|
303 | elif [ $arch == belenos ]; then |
---|
304 | echo 'Submitting the job to queue using sbatch' |
---|
305 | sbatch $rundir/run_$casename.$arch |
---|
306 | squeue -u $user |
---|
307 | elif [ ${arch} == mac ]; then |
---|
308 | echo 'Executing the model using mpirun' |
---|
309 | mpirun --oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
310 | fi |
---|
311 | |
---|
312 | echo $casename 'is executed or submitted to queue.' |
---|
313 | echo 'Results are found in rundir : '$rundir |
---|
314 | |
---|
315 | ###################################################################### |
---|
316 | |
---|