source: CPL/oasis3-mct/branches/OASIS3-MCT_5.0_branch/examples/tutorial_communication/run_tutorial @ 6331

Last change on this file since 6331 was 6331, checked in by aclsce, 17 months ago

Moved oasis-mct_5.0 in oasis3-mct/branches directory.

  • Property svn:executable set to *
File size: 5.3 KB
Line 
1#!/bin/ksh
2#set -xv
3######################################################################
4#
5host=`uname -n`
6user=`whoami`
7#
8## - Define paths
9srcdir=`pwd`
10datadir=$srcdir/data_tutorial
11casename=`basename $srcdir`
12#
13## - Name of the executables
14    exe1=ocean
15    exe2=atmos
16#
17############### User's section #######################################
18#
19## - Define architecture and coupler
20arch=mac  # training, belenos, nemo_lenovo, mac
21             # kraken , gfortran_openmpi_openmp_linux,
22             # pgi_openmpi_openmp_linux,
23             # pgi20.4_openmpi_openmp_linux (not work with 4.0)
24             # gnu1020_openmpi_openmp_linux (not work with 4.0)
25#
26# - Define number of processes to run each executable
27    nproc_exe1=4
28    nproc_exe2=4
29#
30############### End of user's section ################################
31#
32# - Define rundir
33    rundir=${srcdir}/work_${casename}_${nproc_exe1}_${nproc_exe2}
34#
35echo '*****************************************************************'
36echo '*** '$casename' : '$run
37echo ''
38echo 'Rundir       :' $rundir
39echo 'Architecture :' $arch
40echo 'Host         : '$host
41echo 'User         : '$user
42echo ''
43echo $exe1' runs on '$nproc_exe1 'processes'
44echo $exe2' runs on '$nproc_exe2 'processes'
45echo ''
46######################################################################
47### 1. Create rundir and copy everything needed
48#
49\rm -fr $rundir
50mkdir -p $rundir
51cp -f $datadir/*nc  $rundir/.
52cp -f $srcdir/$exe1 $rundir/.
53cp -f $srcdir/$exe2 $rundir/.
54cd $rundir
55######################################################################
56### 2. Definition of mpirun command and batch script
57#
58if [ $arch == training ]; then
59    MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun
60elif [ $arch == gfortran_openmpi_openmp_linux ]; then
61    MPIRUN=/usr/lib64/openmpi/bin/mpirun
62elif [ $arch == pgi_openmpi_openmp_linux ]; then
63    MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun
64elif [ $arch == gnu1020_openmpi_openmp_linux ]; then
65    MPIRUN=/usr/local/openmpi/4.1.0_gcc1020/bin/mpirun
66elif [ $arch == pgi20.4_openmpi_openmp_linux ]; then
67    MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun
68elif [ $arch == belenos ] ; then
69   (( nproc = $nproc_exe1 + $nproc_exe2 ))
70  cat <<EOF > $rundir/run_$casename.$arch
71#!/bin/bash
72#SBATCH --exclusive
73#SBATCH --partition=normal256
74#SBATCH --time=00:10:00
75#SBATCH --job-name=spoc     # job name
76#SBATCH -N 1                # number of nodes
77#SBATCH -n $nproc                # number of procs
78#SBATCH -o $rundir/$casename.o
79#SBATCH -e $rundir/$casename.e
80ulimit -s unlimited
81cd $rundir
82module load intelmpi/2018.5.274
83module load intel/2018.5.274
84module load netcdf-fortran/4.5.2_V2
85#
86export KMP_STACKSIZE=1GB
87export I_MPI_WAIT_MODE=enable
88#
89# Activate next line to run in standalone mode
90time mpirun -np $nproc_exe1 ./$exe1
91time mpirun -np $nproc_exe2 ./$exe2
92#
93EOF
94#
95elif [ ${arch} == nemo_lenovo ] ; then
96  MPIRUN=mpirun
97  (( nproc = $nproc_exe1 + $nproc_exe2 ))
98  cat <<EOF > $rundir/run_$casename.$arch
99#!/bin/bash -l
100# Nom du job
101#SBATCH --job-name spoc
102# Temps limite du job
103#SBATCH --time=00:10:00
104#SBATCH --partition debug
105#SBATCH --output=$rundir/$casename.o
106#SBATCH --error=$rundir/$casename.e
107# Nombre de noeuds et de processus
108#SBATCH --nodes=1 --ntasks-per-node=$nproc
109#SBATCH --distribution cyclic
110cd $rundir
111ulimit -s unlimited
112#SPOC module purge
113#SPOC module -s load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048
114#
115time $MPIRUN -np $nproc_exe1 ./$exe1
116time $MPIRUN -np $nproc_exe2 ./$exe2
117#
118EOF
119
120elif [ ${arch} == kraken ] ; then
121  (( nproc = $nproc_exe1 + $nproc_exe2 ))
122  cat <<EOF > $rundir/run_$casename.$arch
123#!/bin/bash -l
124#SBATCH --partition prod
125# Nom du job
126#SBATCH --job-name spoc
127# Temps limite du job
128#SBATCH --time=00:10:00
129#SBATCH --output=$rundir/$casename.o
130#SBATCH --error=$rundir/$casename.e
131# Nombre de noeuds et de processus
132#SBATCH --nodes=1 --ntasks-per-node=$nproc
133#SBATCH --distribution cyclic
134
135cd $rundir
136
137ulimit -s unlimited
138module purge
139module load compiler/intel/18.0.1.163
140module load mpi/intelmpi/2018.1.163
141module load lib/netcdf-fortran/4.4.4_impi
142module load lib/phdf5/1.8.20_impi
143time mpirun -np $nproc_exe1 ./$exe1
144time mpirun -np $nproc_exe2 ./$exe2
145EOF
146fi 
147 
148######################################################################
149### 3. Model execution or batch submission
150#
151if [ $arch == training ] || [ $arch == gfortran_openmpi_openmp_linux ] || [ $arch == gnu1020_openmpi_openmp_linux ] || [ $arch == pgi_openmpi_openmp_linux ] || [ $arch == pgi20.4_openmpi_openmp_linux ]; then
152    export OMP_NUM_THREADS=1
153    echo 'Executing the model using '$MPIRUN 
154    $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1
155    $MPIRUN -oversubscribe -np $nproc_exe2 ./$exe2
156elif [ $arch == belenos ]; then
157    echo 'Submitting the job to queue using sbatch'
158    sbatch $rundir/run_$casename.$arch
159    squeue -u $user
160elif [ ${arch} == nemo_lenovo ] || [ ${arch} == kraken ]; then
161    echo 'Submitting the job to queue using sbatch'
162    sbatch $rundir/run_$casename.$arch
163    squeue -u $user
164elif [ ${arch} == mac ]; then
165    echo 'Executing the model using mpirun'
166    mpirun --oversubscribe -np $nproc_exe1 ./$exe1
167    mpirun --oversubscribe -np $nproc_exe2 ./$exe2
168fi
169echo $casename 'is executed or submitted to queue.'
170echo 'Results are found in rundir : '$rundir 
171#
172######################################################################
Note: See TracBrowser for help on using the repository browser.