1 | ##################################################### |
---|
2 | # Author : Simona Flavoni for NEMO |
---|
3 | # Contact : sflod@locean-ipsl.upmc.fr |
---|
4 | # |
---|
5 | # ---------------------------------------------------------------------- |
---|
6 | # NEMO/SETTE , NEMO Consortium (2010) |
---|
7 | # Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
8 | # ---------------------------------------------------------------------- |
---|
9 | # |
---|
10 | # Some scripts called by sette.sh |
---|
11 | # prepare_job.sh : creates the job script for running job |
---|
12 | ###################################################### |
---|
13 | #set -vx |
---|
14 | set -o posix |
---|
15 | #set -u |
---|
16 | #set -e |
---|
17 | #+ |
---|
18 | # |
---|
19 | # ================ |
---|
20 | # prepare_job.sh |
---|
21 | # ================ |
---|
22 | # |
---|
23 | # ------------------------------------------------- |
---|
24 | # script that creates the job script for NEMO tests |
---|
25 | # ------------------------------------------------- |
---|
26 | # |
---|
27 | # SYNOPSIS |
---|
28 | # ======== |
---|
29 | # |
---|
30 | # :: |
---|
31 | # |
---|
32 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE |
---|
33 | # |
---|
34 | # |
---|
35 | # DESCRIPTION |
---|
36 | # =========== |
---|
37 | # |
---|
38 | # Part of the SETTE package to run tests for NEMO |
---|
39 | # |
---|
40 | # prepare the script $JOB_FILE to run the tests |
---|
41 | # |
---|
42 | # EXAMPLES |
---|
43 | # ======== |
---|
44 | # |
---|
45 | # :: |
---|
46 | # |
---|
47 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG $JOB_FILE |
---|
48 | # |
---|
49 | # prepare the $JOB_FILE for execution |
---|
50 | # |
---|
51 | # |
---|
52 | # TODO |
---|
53 | # ==== |
---|
54 | # |
---|
55 | # option debug |
---|
56 | # |
---|
57 | # |
---|
58 | # EVOLUTIONS |
---|
59 | # ========== |
---|
60 | # |
---|
61 | # $Id: prepare_job.sh 3050 2011-11-07 14:11:34Z acc $ |
---|
62 | # |
---|
63 | # |
---|
64 | # |
---|
65 | # * creation |
---|
66 | # |
---|
67 | #- |
---|
68 | # |
---|
69 | |
---|
70 | usage=" Usage : ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE NUM_XIO_SERVERS" |
---|
71 | usage=" example : ./prepare_job.sh input_ORCA2_LIM_PISCES.cfg 8 SHORT no/yes $JOB_FILE 0" |
---|
72 | |
---|
73 | |
---|
74 | minargcount=6 |
---|
75 | if [ ${#} -lt ${minargcount} ] |
---|
76 | then |
---|
77 | echo "not enough arguments for prepare_job.sh script" |
---|
78 | echo "control number of argument of prepare_job.sh in sette.sh" |
---|
79 | echo "${usage}" |
---|
80 | exit 1 |
---|
81 | fi |
---|
82 | unset minargcount |
---|
83 | if [ ! -f ${SETTE_DIR}/output.sette ] ; then |
---|
84 | touch ${SETTE_DIR}/output.sette |
---|
85 | fi |
---|
86 | |
---|
87 | # |
---|
88 | # set and export TEST_NAME. It will be used within the post_test_tidyup function |
---|
89 | # |
---|
90 | INPUTARFILE=$1 |
---|
91 | NB_PROC=$2 |
---|
92 | TEST_NAME=$3 |
---|
93 | MPI_FLAG=$4 |
---|
94 | JOB_FILE=$5 |
---|
95 | NXIO_PROC=$6 |
---|
96 | |
---|
97 | # export EXE_DIR. This directory is used to execute model |
---|
98 | # |
---|
99 | # |
---|
100 | # |
---|
101 | echo "date: `date`" >> ${SETTE_DIR}/output.sette |
---|
102 | echo "" >> ${SETTE_DIR}/output.sette |
---|
103 | echo "running config: ${NEW_CONF}" >> ${SETTE_DIR}/output.sette |
---|
104 | echo "" >> ${SETTE_DIR}/output.sette |
---|
105 | echo "list of cpp_keys: " >> ${SETTE_DIR}/output.sette |
---|
106 | echo "`more ${SETTE_DIR}/../CONFIG/${NEW_CONF}/cpp_${NEW_CONF}.fcm`" >> ${SETTE_DIR}/output.sette |
---|
107 | echo "" >> ${SETTE_DIR}/output.sette |
---|
108 | echo "compiling with: ${CMP_NAM}" >> ${SETTE_DIR}/output.sette |
---|
109 | echo "" >> ${SETTE_DIR}/output.sette |
---|
110 | echo "executing script : \"fcm_job $@\" " >> ${SETTE_DIR}/output.sette |
---|
111 | echo " " >> ${SETTE_DIR}/output.sette |
---|
112 | |
---|
113 | ################################################################ |
---|
114 | # SET INPUT |
---|
115 | # get the input tarfile if needed |
---|
116 | if [ "$(cat ${SETTE_DIR}/$INPUTARFILE | wc -w)" -ne 0 ] ; then |
---|
117 | echo "looking for input files in ${SETTE_DIR}/$INPUTARFILE " >> ${SETTE_DIR}/output.sette |
---|
118 | # number of tarfiles: NBTAR |
---|
119 | NBTAR=`cat ${SETTE_DIR}/$INPUTARFILE |wc -l` |
---|
120 | echo "NB of tarfiles ${NBTAR} " >> ${SETTE_DIR}/output.sette |
---|
121 | # loop on tarfiles |
---|
122 | # read file name and directory |
---|
123 | while read tar_file dir_conf_forc |
---|
124 | do |
---|
125 | echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} ; echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} >> ${SETTE_DIR}/output.sette |
---|
126 | if [ -f ${FORCING_DIR}/${tar_file} ] && [ -d ${FORCING_DIR}/${dir_conf_forc} ] ; then |
---|
127 | # Tarfile and input dir ar there, only check the links |
---|
128 | echo "Tarfile and input dir ar there, only check the links" >> ${SETTE_DIR}/output.sette |
---|
129 | # extract tarfile |
---|
130 | else |
---|
131 | |
---|
132 | if [ ! -f ${FORCING_DIR}/${tar_file} ] ; then |
---|
133 | echo "tarfile ${FORCING_DIR}/${tar_file} cannot be found we stop " ; exit 2 ; fi |
---|
134 | |
---|
135 | echo "mkdir ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
136 | mkdir ${FORCING_DIR}/${dir_conf_forc} |
---|
137 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
138 | echo " extract from tarfile ${FORCING_DIR}/${tar_file} in ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
139 | tar xvof ${FORCING_DIR}/${tar_file} ; gunzip -f `find . -name "*.gz"` |
---|
140 | fi |
---|
141 | # Tarfile and input dir ar there, only check the links |
---|
142 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
143 | for fida in * |
---|
144 | do |
---|
145 | [ -f ${EXE_DIR}/${fida} ] || ln -s ${FORCING_DIR}/${dir_conf_forc}/${fida} ${EXE_DIR}/${fida} |
---|
146 | done |
---|
147 | done < ${SETTE_DIR}/$INPUTARFILE |
---|
148 | |
---|
149 | else |
---|
150 | echo "no input file to be searched " |
---|
151 | fi |
---|
152 | ################################################################ |
---|
153 | |
---|
154 | ########################################################################## |
---|
155 | # COPY RESTART FILES (if needed; i.e. only for SHORT job in RESTART TESTS) |
---|
156 | # get the input tarfile if needed |
---|
157 | # SF : not ok for the moment because at this point is needed cn_exp variable, |
---|
158 | # SF : here it it not known. |
---|
159 | #\cd ${EXE_DIR} |
---|
160 | #if [ "$(echo ${TEST_NAME} | grep -c "SHORT" )" -ne 0 ] ; then |
---|
161 | # for (( i=1; i<=${NB_PROC}; ${NB_PROC}++)) ; do |
---|
162 | # L_NPROC=`printf "%04d\n" $i` |
---|
163 | # ln -sf ../LONG/${NEW_CONF}_00000060_restart_${L_NPROC-1}.nc . |
---|
164 | # done |
---|
165 | #fi |
---|
166 | ## SF : other way |
---|
167 | ## for file in ../LONG/${NEW_CONF}_*restart_0*.nc ; do |
---|
168 | ## ncpu=`echo $file | awk -F '_' '{print $NF}' | cut -f 1 -d '.'` |
---|
169 | ## ln -sf ${file} . |
---|
170 | ## done |
---|
171 | ##fi |
---|
172 | ########################################################################## |
---|
173 | |
---|
174 | ################################################################ |
---|
175 | # RUN OPA |
---|
176 | cd ${EXE_DIR} |
---|
177 | if [ ! -r ${EXE_DIR}/opa ] |
---|
178 | then |
---|
179 | echo "executable opa does not exist" |
---|
180 | echo "executable opa does not exist, exit" >> ${SETTE_DIR}/output.sette |
---|
181 | exit 1 |
---|
182 | fi |
---|
183 | |
---|
184 | # example for NOCS ClusterVision system using SLURM batch submission (requires ${SETTE_DIR}/sette_batch_template file) |
---|
185 | # |
---|
186 | # if [ ${MPI_FLAG} == "no" ] ; then |
---|
187 | case ${COMPILER} in |
---|
188 | X64_MOBILIS*) |
---|
189 | NB_REM=$( echo $NB_PROC $NXIO_PROC | awk '{print ( $1 + $2 ) % 16}') |
---|
190 | if [ ${NB_REM} == 0 ] ; then |
---|
191 | # number of processes required is an integer multiple of 16 |
---|
192 | # |
---|
193 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1 + $2 ) / 16}') |
---|
194 | else |
---|
195 | # |
---|
196 | # number of processes required is not an integer multiple of 16 |
---|
197 | # round up the number of nodes required. |
---|
198 | # |
---|
199 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1 + $2 ) / 16 + 1 )}') |
---|
200 | fi |
---|
201 | ;; |
---|
202 | XC_ARCHER_INTEL) |
---|
203 | # ocean cores are packed 24 to a node |
---|
204 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 24 ) }') |
---|
205 | if [ ${NB_REM} == 0 ] ; then |
---|
206 | # number of processes required is an integer multiple of 24 |
---|
207 | # |
---|
208 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 24}') |
---|
209 | else |
---|
210 | # |
---|
211 | # number of processes required is not an integer multiple of 24 |
---|
212 | # round up the number of nodes required. |
---|
213 | # |
---|
214 | NB_NODES=$( echo $NB_PROC | awk '{printf("%d",($1) / 24 + 1 )}') |
---|
215 | fi |
---|
216 | # xios cores are sparsely packed at 4 to a node |
---|
217 | # but can not share nodes with the ocean cores |
---|
218 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $2 % 4 ) }') |
---|
219 | if [ ${NB_REM} == 0 ] ; then |
---|
220 | # number of processes required is an integer multiple of 4 |
---|
221 | # |
---|
222 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ))}') |
---|
223 | else |
---|
224 | # |
---|
225 | # number of processes required is not an integer multiple of 4 |
---|
226 | # round up the number of nodes required. |
---|
227 | # |
---|
228 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ) + 1)}') |
---|
229 | fi |
---|
230 | ;; |
---|
231 | XC40_METO*) #Setup for Met Office XC40 with any compiler |
---|
232 | # ocean cores are packed 32 to a node |
---|
233 | # If we need more than one node then have to use parallel queue and XIOS must have a node to itself |
---|
234 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 32 ) }') |
---|
235 | if [ ${NB_REM} == 0 ] ; then |
---|
236 | # number of processes required is an integer multiple of 32 |
---|
237 | # |
---|
238 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 32}') |
---|
239 | else |
---|
240 | # |
---|
241 | # number of processes required is not an integer multiple of 32 |
---|
242 | # round up the number of nodes required. |
---|
243 | # |
---|
244 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1) / 32 + 1 )}') |
---|
245 | fi |
---|
246 | # xios cores are sparsely packed at 4 to a node |
---|
247 | if [ $NXIO_PROC == 0 ] ; then |
---|
248 | NB_XNODES=0 |
---|
249 | else |
---|
250 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $1 % 4 ) }') |
---|
251 | if [ ${NB_REM} == 0 ] ; then |
---|
252 | # number of processes required is an integer multiple of 4 |
---|
253 | # |
---|
254 | NB_XNODES=$( echo $NXIO_PROC | awk '{print (( $1 / 4 ) + 1)}') |
---|
255 | else |
---|
256 | # |
---|
257 | # number of processes required is not an integer multiple of 4 |
---|
258 | # round up the number of nodes required. |
---|
259 | # |
---|
260 | NB_XNODES=$( echo $NXIO_PROC | awk '{printf("%d",($1) / 4 + 1) }') |
---|
261 | fi |
---|
262 | fi |
---|
263 | if [ ${NB_XNODES} -ge 1 ] ; then |
---|
264 | NB_NODES=$((NB_NODES+NB_XNODES)) |
---|
265 | fi |
---|
266 | echo NB_XNODES=${NB_XNODES} |
---|
267 | echo Total NB_NODES=${NB_NODES} |
---|
268 | if [ ${NB_NODES} -eq 1 ] ; then |
---|
269 | QUEUE=shared |
---|
270 | #Not using XIOS in detatched mode and using less than one node so should be ok on shared node |
---|
271 | #Load snplauncher module to allow use of mpiexec |
---|
272 | SELECT="select=1:ncpus=$((NXIO_PROC + NB_PROC))":mem=15GB |
---|
273 | module load cray-snplauncher |
---|
274 | echo 'Shared Queue' |
---|
275 | else |
---|
276 | QUEUE=normal |
---|
277 | SELECT="select=$NB_NODES" |
---|
278 | module unload cray-snplauncher #Make sure snplauncher module is not loaded |
---|
279 | echo 'Normal Queue' |
---|
280 | fi |
---|
281 | ;; |
---|
282 | openmpi_NAVITI_MERCATOR) |
---|
283 | echo NB_PROCS ${NB_PROC} |
---|
284 | echo NB_NODES ${NB_NODES} |
---|
285 | echo NB_PROC ${NB_PROC} |
---|
286 | if [ ${NB_PROC} -eq 1 ] ; then |
---|
287 | NB_NODES=1 |
---|
288 | QUEUE=monoproc |
---|
289 | NB_PROC_NODE=${NB_PROC} |
---|
290 | else |
---|
291 | if [ ${NB_PROC} -le 16 ] ; then |
---|
292 | NB_NODES=1 |
---|
293 | QUEUE=mono |
---|
294 | NB_PROC_NODE=${NB_PROC} |
---|
295 | else |
---|
296 | NB_NODES=$( echo $NB_PROC | awk '{print $1 - $1 % 16}' | awk '{print $1 / 16 }') |
---|
297 | QUEUE=multi |
---|
298 | NB_PROC_NODE=16 |
---|
299 | fi |
---|
300 | fi |
---|
301 | echo NB_PROCS ${NB_PROC} |
---|
302 | echo NB_NODES ${NB_NODES} |
---|
303 | echo NB_PROC_NODE ${NB_PROC_NODE} |
---|
304 | ;; |
---|
305 | *) |
---|
306 | NB_NODES=${NB_PROC} |
---|
307 | ;; |
---|
308 | |
---|
309 | esac |
---|
310 | # |
---|
311 | # Pass settings into job file by using sed to edit predefined strings |
---|
312 | # |
---|
313 | TOTAL_NPROCS=$(( $NB_PROC + $NXIO_PROC )) |
---|
314 | cat ${SETTE_DIR}/job_batch_template | sed -e"s/NODES/${NB_NODES}/" \ |
---|
315 | -e"s/TOTAL_NPROCS/${TOTAL_NPROCS}/" \ |
---|
316 | -e"s/NPROCS/${NB_PROC}/" \ |
---|
317 | -e"s/NXIOPROCS/${NXIO_PROC}/" \ |
---|
318 | -e"s:DEF_SETTE_DIR:${SETTE_DIR}:" -e"s:DEF_INPUT_DIR:${INPUT_DIR}:" \ |
---|
319 | -e"s:DEF_EXE_DIR:${EXE_DIR}:" \ |
---|
320 | -e"s:DEF_CONFIG_DIR:${CONFIG_DIR}:" \ |
---|
321 | -e"s:MPI_FLAG:${MPI_FLAG}:" \ |
---|
322 | -e"s:DEF_NEMO_VALIDATION:${NEMO_VALIDATION_DIR}:" -e"s:DEF_NEW_CONF:${NEW_CONF}:" \ |
---|
323 | -e"s:DEF_CMP_NAM:${CMP_NAM}:" -e"s:DEF_TEST_NAME:${TEST_NAME}:" > run_sette_test.job |
---|
324 | |
---|
325 | case ${COMPILER} in |
---|
326 | openmpi_NAVITI_MERCATOR) |
---|
327 | cat run_sette_test.job | sed -e"s/NPROC_NODE/${NB_PROC_NODE}/" \ |
---|
328 | -e"s:QUEUE:${QUEUE}:" > run_sette_test1.job |
---|
329 | mv run_sette_test1.job run_sette_test.job |
---|
330 | ;; |
---|
331 | XC40_METO*) |
---|
332 | cat run_sette_test.job | sed -e"s/QUEUE/${QUEUE}/" \ |
---|
333 | -e"s/SELECT/${SELECT}/" > run_sette_test1.job |
---|
334 | mv run_sette_test1.job run_sette_test.job |
---|
335 | ;; |
---|
336 | esac |
---|
337 | # |
---|
338 | # create the unique submission job script |
---|
339 | # |
---|
340 | if [ ! -f $JOB_FILE ] ; then |
---|
341 | mv run_sette_test.job $JOB_FILE |
---|
342 | else |
---|
343 | e=`grep -n "# END_BODY" ${JOB_FILE} | cut -d : -f 1` |
---|
344 | e=$(($e - 1)) |
---|
345 | head -$e $JOB_FILE > ${JOB_FILE}_new |
---|
346 | mv ${JOB_FILE}_new ${JOB_FILE} |
---|
347 | l=`wc -l run_sette_test.job | sed -e "s:run_sette_test.job::"` |
---|
348 | b=`grep -n "# BODY" run_sette_test.job | cut -d : -f 1` |
---|
349 | t=$(($l - $b)) |
---|
350 | tail -$t run_sette_test.job >> $JOB_FILE |
---|
351 | fi |
---|
352 | |
---|
353 | chmod a+x $JOB_FILE ; echo "$JOB_FILE is ready" |
---|
354 | |
---|
355 | #fi |
---|