source: configs/testing/arch/arch-JEANZAY_PGI_ACC.sh

Last change on this file was 1042, checked in by dubos, 4 years ago

testing : setup for PGI_ACC (TBC)

  • Property svn:executable set to *
File size: 3.6 KB
Line 
1#!/bin/bash
2
3# This script provides site-specific functions
4# called by the site-independent scripts
5# create_runs.sh and submit.sh
6
7#----------------------------- split_XXX ----------------------------
8
9function split_mpi_32() {
10    setvar nbp 32 nsplit_i 1 nsplit_j 2
11}
12function split_serial_40() {
13    setvar nbp 40 nsplit_i 1 nsplit_j 2
14}
15function split_mpi_40() {
16    setvar nbp 40 nsplit_i 1 nsplit_j 2
17}
18function split_mpi_80() {
19    setvar nbp 80 nsplit_i 1 nsplit_j 2
20}
21function split_mpi_omp_40() {
22    setvar nbp 40 nsplit_i 1 nsplit_j 2 omp_level_size 1
23}
24
25#----------------- job submission ------------------
26
27function submit_job_X64_IRENE() # $1=script $* = SLURM OPTIONS
28{
29    TMP=$(mktemp)
30    ccc_msub $1 > $TMP
31    cat $TMP 1>&2
32    cat $TMP | awk '{print $NF }'
33    rm -f $TMP
34}
35
36#------------------------------ job_XXX -----------------------------
37
38# Serial jobs
39function job_serial() { # EXP_NAME
40    job_generic $1 1 1 "./gcm.exe"
41}
42function job_serial_40() { # EXP_NAME
43    job_serial $1
44}
45
46# MPI jobs
47function job_mpi() { #EXP_NAME NB_MPI
48    job_generic $* "srun ./gcm.exe"
49}
50function job_mpi_32() { # EXP_NAME
51    job_mpi $1 1 4 4
52}
53function job_mpi_40() { # EXP_NAME
54    job_mpi $1 1 4 4
55}
56function job_mpi_80() { # EXP_NAME
57    job_mpi $1 1 4 4
58}
59
60# MPI-OMP jobs
61function job_mpi_omp() { # EXP_NAME NODES GPUS GPU_PER_NODE
62    job_generic $* "srun ./gcm.exe"
63}
64
65function job_mpi_omp_40() { # EXP_NAME
66    job_mpi_omp $1 1 4 4
67}
68
69# Generic
70function job_generic() { # EXP_NAME NODES GPUS GPU_PER_NODE CMD
71    cat <<EOF
72#!/bin/bash
73## Request name
74#SBATCH --job-name=$1
75
76#SBATCH --partition=gpu_p1          # partition GPU choisie
77#SBATCH --nodes=$2                  # nombre de noeud
78#SBATCH --ntasks=$3                 # nombre de tache MPI (= nombre de GPU ici)
79#SBATCH --ntasks-per-node=$4        # nombre de tache MPI par noeud (= nombre de GPU ici)
80#SBATCH --gres=gpu:$4               # nombre de GPU par noeud
81#SBATCH --cpus-per-task=1           # nombre de coeurs CPU par tache
82
83## computing project
84#SBATCH -A wuu@gpu
85## Elapsed time limit HH:MM:SS
86#SBATCH --time=00:10:00
87
88# do not use hyperthreading
89#SBATCH --hint=nomultithread
90# standard outputs
91#SBATCH --output=DYNAMICO%j.out
92#SBATCH --error=DYNAMICO%j.out
93
94export OMP_NUM_THREADS=1
95# OpenMP binding
96export OMP_PLACES=cores
97
98# stack
99export OMP_STACKSIZE=128M
100ulimit -s unlimited
101
102# move to submission directory
103cd \${SLURM_SUBMIT_DIR}
104
105# load the same modules as during compilation
106source build/arch.env
107module list
108
109# cleanup execution directory and run
110rm -rf gcm.log logs *.nc netcdf
111date > gcm.log
112ulimit -s unlimited
113$5 >> gcm.log
114date >> gcm.log
115
116# move output files to netcdf subdir
117mkdir -p netcdf
118cp gcm.log *.def netcdf
119mv *.nc netcdf
120
121# keep log files, source code and executable for debug
122mkdir -p logs
123cp *.xml logs
124cp -pr gcm.exe logs
125cp -pr build/src logs
126mv xios_client_*.err xios_client_*.out gcm.log logs
127
128EOF
129}
130
131#------------------------------ post-processing job -----------------------------
132
133function job_post_X64_IRENE() # LIST
134{
135    DEPLIST=afterany
136    for ID in $* ; do
137        DEPLIST="$DEPLIST:$ID"
138    done
139    cat <<EOF
140#!/bin/bash
141## Request name
142#MSUB -r testing
143#MSUB -n 1
144## Elapsed time limit in seconds
145#MSUB -T 600
146#MSUB -q standard
147#MSUB -A $project
148#MSUB -q skylake
149#MSUB -m work
150#MSUB -E "--dependency=$DEPLIST"
151## Number of tasks (=MPI processes) to use
152## Quality of Service required (long [3 days], normal [1 day], test [30 min])
153#MSUB -Q normal
154
155export OMP_NUM_THREADS=1
156# this script is submitted from $ROOT/logs
157cd \${BRIDGE_MSUB_PWD}/..
158
159$(cat $ROOT/DYNAMICO/arch/arch-X64_IRENE.env)
160module load python
161
162bash/post.sh
163EOF
164}
Note: See TracBrowser for help on using the repository browser.