1 | #!/bin/bash |
---|
2 | #SBATCH -A GROUP_IDRIS@cpu |
---|
3 | #SBATCH --job-name=SETTE_JOB # nom du job |
---|
4 | #SBATCH --partition=cpu_p1 # Nom de la partition d'exécution |
---|
5 | #SBATCH --ntasks=NPROCS # Nombre total de processus MPI |
---|
6 | #SBATCH --ntasks-per-node=40 # Nombre de processus MPI par noeud |
---|
7 | # /!\ Attention, la ligne suivante est trompeuse mais dans le vocabulaire |
---|
8 | # de Slurm "multithread" fait bien référence à l'hyperthreading. |
---|
9 | #SBATCH --hint=nomultithread # 1 processus MPI par coeur physique (pas d'hyperthreading) |
---|
10 | #SBATCH --time=00:29:00 # Temps d’exécution maximum demande (HH:MM:SS) |
---|
11 | #SBATCH --output=sette.jobid_%j.out # Nom du fichier de sortie |
---|
12 | #SBATCH --error=sette.jobid_%j.out # Nom du fichier d'erreur (ici commun avec la sortie) |
---|
13 | ########################################################################## |
---|
14 | # |
---|
15 | # Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these |
---|
16 | # (via sed operating on this template job file). |
---|
17 | # |
---|
18 | OCEANCORES=NPROCS |
---|
19 | export SETTE_DIR=DEF_SETTE_DIR |
---|
20 | # |
---|
21 | # set up mpp computing environment |
---|
22 | # |
---|
23 | # Local settings for machine BULL (TITANE at CCRT France) |
---|
24 | # |
---|
25 | export MPIRUN="srun --mpi=pmi2 --cpu-bind=cores -K1" |
---|
26 | |
---|
27 | # |
---|
28 | # load sette functions (only post_test_tidyup needed) |
---|
29 | # |
---|
30 | . ${SETTE_DIR}/all_functions.sh |
---|
31 | # |
---|
32 | |
---|
33 | # modules to load |
---|
34 | |
---|
35 | # Don't remove neither change the following line |
---|
36 | # BODY |
---|
37 | |
---|
38 | # |
---|
39 | # These variables are needed by post_test_tidyup function in all_functions.sh |
---|
40 | # |
---|
41 | export EXE_DIR=DEF_EXE_DIR |
---|
42 | export INPUT_DIR=DEF_INPUT_DIR |
---|
43 | export CONFIG_DIR=DEF_CONFIG_DIR |
---|
44 | export TOOLS_DIR=DEF_TOOLS_DIR |
---|
45 | export NEMO_VALIDATION_DIR=DEF_NEMO_VALIDATION |
---|
46 | export NEW_CONF=DEF_NEW_CONF |
---|
47 | export CMP_NAM=DEF_CMP_NAM |
---|
48 | export TEST_NAME=DEF_TEST_NAME |
---|
49 | # |
---|
50 | # end of set up |
---|
51 | ############################################################### |
---|
52 | # |
---|
53 | # change to the working directory |
---|
54 | # |
---|
55 | cd ${EXE_DIR} |
---|
56 | |
---|
57 | echo Running on host `hostname` |
---|
58 | echo Time is `date` |
---|
59 | echo Directory is `pwd` |
---|
60 | # |
---|
61 | # Run the parallel MPI executable |
---|
62 | # |
---|
63 | echo "Running time ${MPIRUN} ./nemo" |
---|
64 | # |
---|
65 | if [ MPI_FLAG == "yes" ]; then |
---|
66 | time ${MPIRUN} ./nemo |
---|
67 | else |
---|
68 | time ./nemo |
---|
69 | fi |
---|
70 | |
---|
71 | # |
---|
72 | post_test_tidyup |
---|
73 | |
---|
74 | # END_BODY |
---|
75 | # Don't remove neither change the previous line |
---|
76 | |
---|
77 | |
---|
78 | exit |
---|
79 | |
---|