#!/bin/ksh #set -x host=`uname -n` user=`whoami` ## - Define paths srcdir=`pwd` datadir=$srcdir/data_oasis casename=`basename $srcdir` ## - Define case if [ $# -eq 0 ] ; then echo "By default, i.e. without arguments, the source grid is bggd," echo "the target grid is nogt and the remapping is 1st order conservative;" echo "2 nodes, 1 MPI task per node and 1 OpenMP thread per MPI task are used for the run." SRC_GRID=bggd TGT_GRID=nogt remap=conserv1st n_p_t=2_1_1 nnode=2 mpiprocs=1 threads=1 elif [ $# -ne 4 ] ; then echo "If you don't want to run the default case without arguments, " echo "you must run the script with 4 arguments i.e. './run_testinterp.sh src tgt remap nnodes_nprocs_nthreads'" echo "where 'src' is the source grid, 'tgt' the target grid and 'remap' the remapping," echo "'nnodes' the total number of nodes for the run, 'nprocs' the number of MPI tasks per node" echo "and 'nthreads' the number of OpenMP threads per MPI task" exit else SRC_GRID=$1 TGT_GRID=$2 remap=$3 n_p_t=$4 nnode=`echo $n_p_t | awk -F _ '{print $1}'` mpiprocs=`echo $n_p_t | awk -F _ '{print $2}'` threads=`echo $n_p_t | awk -F _ '{print $3}'` fi ## ## User's choice of computing architecture arch=pgi20.4_openmpi_openmp_linux # nemo_lenovo_intel_impi_openmp, kraken_intel_impi_openmp, # training_computer, gfortran_openmpi_openmp_linux, belenos, mac # pgi_openmpi_openmp_linux, # pgi20.4_openmpi_openmp_linux (not work with 4.0) # gnu1020_openmpi_openmp_linux (not work with 4.0) ## ###################################################################### ## - Verification source grid type and remapping # ## - Source grid : bggd, ssea or icos ## bggd is an atmosphere structured (LR) grid ## ssea is an atmosphere gaussian reduced grid (D) : no 2nd order conservative remapping ## icos is an atmosphere unstructured grid (U) : no bilinear, no bicubic nor 2nd order conservative remapping ## ## - Target grid : nogt ## nogt is an ocean structured (LR) grid ## ## - Remapping : distwgt (nearest-neighbour), bili (bilinear), bicu (bicubic), conserv1st or conserv2nd (1st or 2nd order conservative remapping) ## ## Configuration files 'namcouple' are given in /data_oasis3 ## Warning: If you add any extra lines in one of the namcouple given as examples you will have to ## change the definition of SRC_GRID_TYPE, SRC_GRID_PERIOD and SRC_GRID_OVERLAP in this script (see below lines 140-142) ## ## - Verification source grid type and remapping if [ ${SRC_GRID} == "ssea" ]; then if [ ${remap} == "conserv2nd" ]; then echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea" exit fi fi if [ ${SRC_GRID} == "icos" ]; then if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then echo "Impossible to perform ${remap} remapping from unstructured grid icos" exit fi fi ## rundir=$srcdir/${casename}_${SRC_GRID}_${TGT_GRID}_${remap}_${nnode}_${mpiprocs}_${threads} ## ###################################################################### ## ## - Name of the executables exe1=model1 exe2=model2 ## ## - Define number of processes to run each executable (( nproc = $nnode * $mpiprocs )) (( nproc_exe2 = $nproc / 2 )) (( nproc_exe1 = $nproc - $nproc_exe2 )) echo '' echo '**************************************************************************************************************' echo '*** '$casename' : '$run echo '' echo "Running test_interpolation on $nnode nodes with $mpiprocs MPI tasks per node and $threads threads per MPI task" echo '**************************************************************************************************************' echo 'Source grid :' $SRC_GRID echo 'Target grid :' $TGT_GRID echo 'Rundir :' $rundir echo 'Architecture :' $arch echo 'Host : '$host echo 'User : '$user echo 'Grids : '$SRC_GRID'-->'$TGT_GRID echo 'Remap : '$remap echo '' echo $exe1' runs on '$nproc_exe1 'processes' echo $exe2' runs on '$nproc_exe2 'processes' echo '' echo '' ## - Copy everything needed into rundir \rm -fr $rundir/* mkdir -p $rundir ln -sf $datadir/grids.nc $rundir/grids.nc ln -sf $datadir/masks.nc $rundir/masks.nc ln -sf $datadir/areas.nc $rundir/areas.nc ln -sf $srcdir/$exe1 $rundir/. ln -sf $srcdir/$exe2 $rundir/. cp -f $datadir/namcouple_${SRC_GRID}_${TGT_GRID}_${remap} $rundir/namcouple ## - Grid source characteristics # These are read in the namcouple file. If you decide to use another namcouple than the ones coming from /data_oasis3 # you may have to change the 3 lines below SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids echo "SRC_GRID_TYPE : $SRC_GRID_TYPE" echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD" echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP" ## - Create name_grids.dat, that will be read by the models, from namcouple informations cat <> $rundir/name_grids.dat \$grid_source_characteristics cl_grd_src='$SRC_GRID' cl_remap='$remap' cl_type_src='$SRC_GRID_TYPE' cl_period_src='$SRC_GRID_PERIOD' il_overlap_src=$SRC_GRID_OVERLAP \$end \$grid_target_characteristics cl_grd_tgt='$TGT_GRID' \$end EOF # cd $rundir ###################################################################### ## - Creation of configuration scripts ###--------------------------------------------------------------------- ### NEMO_LENOVO_INTEL_IMPI_OPENMP ###--------------------------------------------------------------------- if [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then cat < $rundir/run_$casename.$arch #!/bin/bash -l #SBATCH --partition prod #SBATCH --job-name ${n_p_t} #SBATCH --time=00:02:00 #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Number of nodes #SBATCH --nodes=$nnode # Number of MPI tasks per node #SBATCH --ntasks-per-node=$mpiprocs # Number of OpenMP threads per MPI task ##SBATCH --cpus-per-task=24 cd $rundir export KMP_STACKSIZE=1GB export I_MPI_PIN_DOMAIN=omp #export I_MPI_PIN_DOMAIN=socket export I_MPI_WAIT_MODE=enable export KMP_AFFINITY=verbose,granularity=fine,compact export OASIS_OMP_NUM_THREADS=$threads time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 EOF ###--------------------------------------------------------------------- ### KRAKEN_INTEL_IMPI_OPENMP ###--------------------------------------------------------------------- elif [ ${arch} == kraken_intel_impi_openmp ]; then timreq=00:30:00 cat < $rundir/run_$casename.$arch #!/bin/bash -l #Partition #SBATCH --partition prod # Nom du job #SBATCH --job-name ${n_p_t} # Time limit for the job #SBATCH --time=$timreq #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Number of nodes #SBATCH --nodes=$nnode # Number of MPI tasks per node #SBATCH --ntasks-per-node=$mpiprocs # Number of OpenMP threads per MPI task #SBATCH --cpus-per-task=36 cd $rundir module purge module load compiler/intel/18.0.1.163 module load mpi/intelmpi/2018.1.163 module load lib/netcdf-fortran/4.4.4_impi module load lib/netcdf-c/4.6.1_impi export KMP_STACKSIZE=1GB export I_MPI_PIN_DOMAIN=omp export I_MPI_WAIT_MODE=enable (( map = $threads - 1 )) affin="verbose,granularity=fine,proclist=[0" for place in \$(seq \$map); do affin=\${affin}",\${place}" echo \$place done echo affin1 \$affin affin=\${affin}"],explicit" export KMP_AFFINITY=\$affin echo KMP_AFFINITY \$KMP_AFFINITY export OASIS_OMP_NUM_THREADS=$threads export OMP_NUM_THREADS=$threads # Binding IntelMPI MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35" INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}" I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}" time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 EOF elif [ $arch == belenos ] ; then cat < $rundir/run_$casename.$arch #!/bin/bash #SBATCH --exclusive #SBATCH --partition=normal256 #SBATCH --job-name ${remap}_${nthreads} #SBATCH --time=02:00:00 #SBATCH -o $rundir/$casename.o #SBATCH -e $rundir/$casename.e #SBATCH -N $nnode #SBATCH --ntasks-per-node=$mpiprocs # ulimit -s unlimited cd $rundir # module load intelmpi/2018.5.274 module load intel/2018.5.274 module load netcdf-fortran/4.5.2_V2 # export KMP_STACKSIZE=1GB export I_MPI_WAIT_MODE=enable export KMP_AFFINITY=verbose,granularity=fine,compact export OASIS_OMP_NUM_THREADS=$threads export OMP_NUM_THREADS=$threads # time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 # EOF fi ###################################################################### ### - Execute the model if [ ${arch} == training_computer ]; then export OASIS_OMP_NUM_THREADS=$threads MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun echo 'Executing the model using '$MPIRUN $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err elif [ ${arch} == gfortran_openmpi_openmp_linux ]; then export OASIS_OMP_NUM_THREADS=$threads MPIRUN=/usr/lib64/openmpi/bin/mpirun echo 'Executing the model using '$MPIRUN $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err elif [ $arch == pgi_openmpi_openmp_linux ]; then MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun echo 'Executing the model using '$MPIRUN $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err elif [ ${arch} == gnu1020_openmpi_openmp_linux ]; then export OASIS_OMP_NUM_THREADS=$threads MPIRUN=/usr/local/openmpi/4.1.0_gcc1020/bin/mpirun echo 'Executing the model using '$MPIRUN $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err elif [ $arch == pgi20.4_openmpi_openmp_linux ]; then MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun echo 'Executing the model using '$MPIRUN $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err elif [ $arch == nemo_lenovo_intel_impi_openmp ]; then echo 'Submitting the job to queue using sbatch' sbatch $rundir/run_$casename.$arch squeue -u $USER elif [ $arch == kraken_intel_impi_openmp ]; then echo 'Submitting the job to queue using sbatch' sbatch $rundir/run_$casename.$arch squeue -u $USER elif [ $arch == belenos ]; then echo 'Submitting the job to queue using sbatch' sbatch $rundir/run_$casename.$arch squeue -u $user elif [ ${arch} == mac ]; then echo 'Executing the model using mpirun' mpirun --oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 fi echo $casename 'is executed or submitted to queue.' echo 'Results are found in rundir : '$rundir ######################################################################