[7541] | 1 | ! ============================================================================================================================== |
---|
| 2 | ! MODULE : mod_orchidee_mpi_data |
---|
| 3 | ! |
---|
| 4 | ! CONTACT : orchidee-help _at_ listes.ipsl.fr |
---|
| 5 | ! |
---|
| 6 | ! LICENCE : IPSL (2006) |
---|
| 7 | ! This software is governed by the CeCILL licence see ORCHIDEE/ORCHIDEE_CeCILL.LIC |
---|
| 8 | ! |
---|
| 9 | !>\BRIEF All routines needed for the initialization of the MPI parallelization of Orchidee (coupled and offline) |
---|
| 10 | !! |
---|
| 11 | !!\n DESCRIPTION : Definition and allocation of parallel datas for MPI. |
---|
| 12 | !! Initialization of parallel or sequentiel IOs. |
---|
| 13 | !! Definition of Load Balancing functions. |
---|
| 14 | !! |
---|
| 15 | !! RECENT CHANGE(S): None |
---|
| 16 | !! |
---|
| 17 | !! REFERENCES(S) : None |
---|
| 18 | !! |
---|
| 19 | !! SVN : |
---|
| 20 | !! $HeadURL: svn://forge.ipsl.jussieu.fr/orchidee/branches/ORCHIDEE_2_2/ORCHIDEE/src_parallel/mod_orchidee_mpi_data.F90 $ |
---|
| 21 | !! $Date: 2018-08-02 09:06:40 +0200 (Thu, 02 Aug 2018) $ |
---|
| 22 | !! $Revision: 5364 $ |
---|
| 23 | !! \n |
---|
| 24 | !_ ================================================================================================================================ |
---|
| 25 | |
---|
| 26 | |
---|
| 27 | MODULE mod_orchidee_mpi_data |
---|
| 28 | |
---|
| 29 | !- |
---|
| 30 | USE defprec |
---|
| 31 | USE ioipsl_para |
---|
| 32 | USE mod_orchidee_para_var |
---|
| 33 | |
---|
| 34 | IMPLICIT NONE |
---|
| 35 | |
---|
| 36 | !- |
---|
| 37 | #include "src_parallel.h" |
---|
| 38 | !- |
---|
| 39 | !- |
---|
| 40 | |
---|
| 41 | CONTAINS |
---|
| 42 | |
---|
| 43 | !! ============================================================================================================================= |
---|
| 44 | !! SUBROUTINE: Init_orchidee_mpi |
---|
| 45 | !! |
---|
| 46 | !>\BRIEF Initialization of the mpi parallelization in ORCHIDEE offline mode. |
---|
| 47 | !! |
---|
| 48 | !! DESCRIPTION: Initialization of the mpi parallelization in ORCHIDEE offline mode. |
---|
| 49 | !! |
---|
| 50 | !! \n |
---|
| 51 | !_ ============================================================================================================================== |
---|
| 52 | SUBROUTINE Init_orchidee_mpi(communicator) |
---|
| 53 | |
---|
| 54 | |
---|
| 55 | IMPLICIT NONE |
---|
| 56 | INTEGER, OPTIONAL, INTENT(in) :: communicator |
---|
| 57 | INTEGER :: COMM |
---|
| 58 | INTEGER :: ierr |
---|
| 59 | LOGICAL :: xios_orchidee_ok !! Local variable read from run.def |
---|
| 60 | |
---|
| 61 | #ifdef CPP_PARA |
---|
| 62 | INCLUDE 'mpif.h' |
---|
| 63 | #endif |
---|
| 64 | |
---|
| 65 | ! Read XIOS_ORCHIDEE_OK from run.def. In this module, due to cyclic dependancies |
---|
| 66 | ! the variable can not be used from module xios_orchidee. |
---|
| 67 | xios_orchidee_ok=.TRUE. |
---|
| 68 | CALL getin('XIOS_ORCHIDEE_OK',xios_orchidee_ok) |
---|
| 69 | |
---|
| 70 | #ifdef CPP_PARA |
---|
| 71 | IF ( xios_orchidee_ok ) THEN |
---|
| 72 | CALL MPI_INIT(ierr) |
---|
| 73 | CALL xios_orchidee_comm_init(COMM) |
---|
| 74 | ELSE IF ( PRESENT(communicator) ) THEN |
---|
| 75 | COMM=communicator |
---|
| 76 | ELSE |
---|
| 77 | CALL MPI_INIT(ierr) |
---|
| 78 | COMM=MPI_COMM_WORLD |
---|
| 79 | ENDIF |
---|
| 80 | CALL MPI_COMM_SIZE(COMM,mpi_size,ierr) |
---|
| 81 | CALL MPI_COMM_RANK(COMM,mpi_rank,ierr) |
---|
| 82 | is_ok_mpi=.TRUE. |
---|
| 83 | #else |
---|
| 84 | mpi_rank=0 |
---|
| 85 | mpi_size=1 |
---|
| 86 | is_ok_mpi=.FALSE. |
---|
| 87 | ! It is not possible to use XIOS without MPI |
---|
| 88 | WRITE(numout,*) 'XIOS cannot be run without MPI. xios_orchidee_ok is set to false.' |
---|
| 89 | xios_orchidee_ok=.FALSE. |
---|
| 90 | #endif |
---|
| 91 | |
---|
| 92 | mpi_rank_root=0 |
---|
| 93 | |
---|
| 94 | IF (mpi_rank==mpi_rank_root) THEN |
---|
| 95 | is_mpi_root=.TRUE. |
---|
| 96 | ELSE |
---|
| 97 | is_mpi_root=.FALSE. |
---|
| 98 | ENDIF |
---|
| 99 | |
---|
| 100 | CALL Init_const_mpi(COMM) |
---|
| 101 | |
---|
| 102 | END SUBROUTINE Init_orchidee_mpi |
---|
| 103 | |
---|
| 104 | !! ============================================================================================================================== |
---|
| 105 | !! SUBROUTINE : xios_orchidee_comm_init |
---|
| 106 | !! |
---|
| 107 | !>\BRIEF Get the MPI communicator. |
---|
| 108 | !! |
---|
| 109 | !! DESCRIPTION :\n First call to XIOS to get the MPI communicator. |
---|
| 110 | !! Note that it is XIOS that initialize the MPI communicator. |
---|
| 111 | !! This subroutine is only called in ORCHIDEE offline mode. When running in coupled mode, the |
---|
| 112 | !! atmospheric model must initlialize XIOS at the same time as initializing MPI. |
---|
| 113 | !! \n |
---|
| 114 | !_ ================================================================================================================================ |
---|
| 115 | |
---|
| 116 | SUBROUTINE xios_orchidee_comm_init(comm_local) |
---|
| 117 | #ifdef XIOS |
---|
| 118 | USE xios |
---|
| 119 | #endif |
---|
| 120 | ! |
---|
| 121 | !! 0. Variable and parameter declaration |
---|
| 122 | ! |
---|
| 123 | !! Output variables |
---|
| 124 | INTEGER, INTENT(OUT) :: comm_local |
---|
| 125 | CHARACTER(len=*),PARAMETER :: id="client" !! Id for initialization of ORCHIDEE in XIOS |
---|
| 126 | |
---|
| 127 | !_ ================================================================================================================================ |
---|
| 128 | |
---|
| 129 | IF (is_omp_root) THEN |
---|
| 130 | #ifdef XIOS |
---|
| 131 | CALL xios_initialize(id,return_comm=comm_local) |
---|
| 132 | #else |
---|
| 133 | ! Write error messages and stop the model |
---|
| 134 | WRITE(numout,*) 'Preprocessing key XIOS is missing to run ORCHIDEE with XIOS' |
---|
| 135 | WRITE(numout,*) 'Recompile with preprocessing flag XIOS or set XIOS_ORCHIDEE_OK=n in run.def' |
---|
| 136 | WRITE(numout,*) 'Fatal error from ORCHIDEE. STOP in xios_orchidee_comm_init' |
---|
| 137 | #ifdef CPP_PARA |
---|
| 138 | CALL MPI_ABORT(3) |
---|
| 139 | #endif |
---|
| 140 | STOP 1 |
---|
| 141 | #endif |
---|
| 142 | |
---|
| 143 | END IF |
---|
| 144 | END SUBROUTINE xios_orchidee_comm_init |
---|
| 145 | |
---|
| 146 | |
---|
| 147 | !! ============================================================================================================================== |
---|
| 148 | !! SUBROUTINE : xios_orchidee_finalize |
---|
| 149 | !! |
---|
| 150 | !>\BRIEF Last call to XIOS for finalization. |
---|
| 151 | !! |
---|
| 152 | !! DESCRIPTION :\n Last call to XIOS for finalization of the orchidee context and XIOS. |
---|
| 153 | !! This subroutine is called only when ORCHIDEE is run in offline mode. In coupled mode it is the atmospheric |
---|
| 154 | !! model that finalizes XIOS. In that case, the context orchidee must be finalized using the |
---|
| 155 | !! subroutine xios_orchidee_context_finalize |
---|
| 156 | !! |
---|
| 157 | !! \n |
---|
| 158 | !_ ================================================================================================================================ |
---|
| 159 | |
---|
| 160 | SUBROUTINE xios_orchidee_finalize |
---|
| 161 | #ifdef XIOS |
---|
| 162 | USE xios, ONLY : xios_context_finalize, xios_finalize |
---|
| 163 | #endif |
---|
| 164 | LOGICAL :: xios_orchidee_ok ! Local variable read from run.def |
---|
| 165 | |
---|
| 166 | ! Read XIOS_ORCHIDEE_OK from run.def. In this module, due to cyclic dependancies |
---|
| 167 | ! the variable can not be used from module xios_orchidee. |
---|
| 168 | xios_orchidee_ok=.TRUE. |
---|
| 169 | IF (is_omp_root) CALL getin('XIOS_ORCHIDEE_OK',xios_orchidee_ok) |
---|
| 170 | !_ ================================================================================================================================ |
---|
| 171 | |
---|
| 172 | IF (xios_orchidee_ok .AND. is_omp_root) THEN |
---|
| 173 | #ifdef XIOS |
---|
| 174 | CALL xios_context_finalize() |
---|
| 175 | CALL xios_finalize() |
---|
| 176 | #endif |
---|
| 177 | END IF |
---|
| 178 | END SUBROUTINE xios_orchidee_finalize |
---|
| 179 | |
---|
| 180 | |
---|
| 181 | |
---|
| 182 | SUBROUTINE Init_orchidee_mpi_data(arg_nbp_mpi,arg_kindex_mpi,arg_offset_mpi,COMM) |
---|
| 183 | |
---|
| 184 | IMPLICIT NONE |
---|
| 185 | #ifdef CPP_PARA |
---|
| 186 | INCLUDE 'mpif.h' |
---|
| 187 | #endif |
---|
| 188 | INTEGER, INTENT(IN) :: arg_nbp_mpi |
---|
| 189 | INTEGER, INTENT(IN) :: arg_kindex_mpi(arg_nbp_mpi) |
---|
| 190 | INTEGER, INTENT(IN) :: arg_offset_mpi |
---|
| 191 | INTEGER, INTENT(IN) :: COMM |
---|
| 192 | |
---|
| 193 | INTEGER :: i |
---|
| 194 | |
---|
| 195 | #ifdef CPP_PARA |
---|
| 196 | INTEGER :: ierr |
---|
| 197 | is_ok_mpi=.TRUE. |
---|
| 198 | #else |
---|
| 199 | is_ok_mpi=.FALSE. |
---|
| 200 | #endif |
---|
| 201 | |
---|
| 202 | ! Initialization of MPI_COMM_ORCH |
---|
| 203 | CALL init_const_mpi(COMM) |
---|
| 204 | IF (is_ok_mpi) THEN |
---|
| 205 | #ifdef CPP_PARA |
---|
| 206 | CALL MPI_COMM_SIZE(MPI_COMM_ORCH,mpi_size,ierr) |
---|
| 207 | CALL MPI_COMM_RANK(MPI_COMM_ORCH,mpi_rank,ierr) |
---|
| 208 | #endif |
---|
| 209 | ELSE |
---|
| 210 | mpi_size=1 |
---|
| 211 | mpi_rank=0 |
---|
| 212 | ENDIF |
---|
| 213 | |
---|
| 214 | IF (mpi_rank == 0) THEN |
---|
| 215 | mpi_rank_root = 0 |
---|
| 216 | is_mpi_root = .true. |
---|
| 217 | ENDIF |
---|
| 218 | |
---|
| 219 | ! Test if there is enough land grid cells on each process MPI. |
---|
| 220 | ! At least 2 grid cells are needed for each process if running in parallel. Stop if this is not the case. |
---|
| 221 | IF ( arg_nbp_mpi < 1 ) THEN |
---|
| 222 | PRINT*,'Init_orchidee_mpi_data: nbp_mpi(number of grid-cells for current MPI process)=',arg_nbp_mpi |
---|
| 223 | PRINT*,'nbp_mpi=0 is not possible. It must be 1 or bigger for each MPI process. Stop now.' |
---|
| 224 | CALL ipslerr_p(3, "Init_orchidee_mpi_data","Not all MPI processes has enough land grid cells",& |
---|
| 225 | "Make the region bigger or use a lower number of MPI processes.","") |
---|
| 226 | ELSE IF (arg_nbp_mpi == 1 .AND. mpi_size > 1) THEN |
---|
| 227 | CALL ipslerr_p(1, "Init_orchidee_mpi_data","nbp=1 for current MPI process",& |
---|
| 228 | "This can be a problem in some case.", & |
---|
| 229 | "If the model crashes, then make the region bigger or use a lower number of MPI processes.") |
---|
| 230 | END IF |
---|
| 231 | |
---|
| 232 | ALLOCATE(nbp_mpi_para(0:mpi_size-1)) |
---|
| 233 | ALLOCATE(nbp_mpi_para_begin(0:mpi_size-1)) |
---|
| 234 | ALLOCATE(nbp_mpi_para_end(0:mpi_size-1)) |
---|
| 235 | ALLOCATE(jj_para_nb(0:mpi_size-1)) |
---|
| 236 | ALLOCATE(jj_para_begin(0:mpi_size-1)) |
---|
| 237 | ALLOCATE(jj_para_end(0:mpi_size-1)) |
---|
| 238 | ALLOCATE(ii_para_begin(0:mpi_size-1)) |
---|
| 239 | ALLOCATE(ii_para_end(0:mpi_size-1)) |
---|
| 240 | ALLOCATE(ij_para_nb(0:mpi_size-1)) |
---|
| 241 | ALLOCATE(ij_para_begin(0:mpi_size-1)) |
---|
| 242 | ALLOCATE(ij_para_end(0:mpi_size-1)) |
---|
| 243 | |
---|
| 244 | |
---|
| 245 | nbp_mpi=arg_nbp_mpi |
---|
| 246 | ALLOCATE(kindex_mpi(nbp_mpi)) |
---|
| 247 | kindex_mpi(:)=arg_kindex_mpi(:) |
---|
| 248 | |
---|
| 249 | offset_mpi=arg_offset_mpi |
---|
| 250 | |
---|
| 251 | IF (is_ok_mpi) THEN |
---|
| 252 | #ifdef CPP_PARA |
---|
| 253 | CALL MPI_AllGather(nbp_mpi,1,MPI_INT_ORCH,nbp_mpi_para,1,MPI_INT_ORCH,MPI_COMM_ORCH,ierr) |
---|
| 254 | #endif |
---|
| 255 | ELSE |
---|
| 256 | nbp_mpi_para(0)=nbp_mpi |
---|
| 257 | ENDIF |
---|
| 258 | |
---|
| 259 | nbp_mpi_para_begin(0)=1 |
---|
| 260 | nbp_mpi_para_end(0)=nbp_mpi_para(0) |
---|
| 261 | DO i=1,mpi_size-1 |
---|
| 262 | nbp_mpi_para_begin(i)=nbp_mpi_para_end(i-1)+1 |
---|
| 263 | nbp_mpi_para_end(i)=nbp_mpi_para_begin(i)+nbp_mpi_para(i)-1 |
---|
| 264 | ENDDO |
---|
| 265 | nbp_mpi_begin=nbp_mpi_para_begin(mpi_rank) |
---|
| 266 | nbp_mpi_end=nbp_mpi_para_end(mpi_rank) |
---|
| 267 | |
---|
| 268 | |
---|
| 269 | IF (mpi_rank==mpi_size-1) THEN |
---|
| 270 | ij_end=iim_g*jjm_g |
---|
| 271 | ELSE |
---|
| 272 | ij_end=kindex_mpi(nbp_mpi)+offset_mpi |
---|
| 273 | ENDIF |
---|
| 274 | |
---|
| 275 | IF (is_ok_mpi) THEN |
---|
| 276 | #ifdef CPP_PARA |
---|
| 277 | CALL MPI_Allgather(ij_end,1,MPI_INT_ORCH,ij_para_end,1,MPI_INT_ORCH,MPI_COMM_ORCH,ierr) |
---|
| 278 | #endif |
---|
| 279 | ELSE |
---|
| 280 | ij_para_end(0)=ij_end |
---|
| 281 | ENDIF |
---|
| 282 | |
---|
| 283 | ij_para_begin(0)=1 |
---|
| 284 | ij_para_nb(0)=ij_para_end(0)-ij_para_begin(0)+1 |
---|
| 285 | |
---|
| 286 | DO i=1,mpi_size-1 |
---|
| 287 | ij_para_begin(i)=ij_para_end(i-1)+1 |
---|
| 288 | ij_para_nb(i)=ij_para_end(i)-ij_para_begin(i)+1 |
---|
| 289 | ENDDO |
---|
| 290 | |
---|
| 291 | DO i=0,mpi_size-1 |
---|
| 292 | jj_para_begin(i)=(ij_para_begin(i)-1)/iim_g + 1 |
---|
| 293 | jj_para_end(i)=(ij_para_end(i)-1)/iim_g + 1 |
---|
| 294 | jj_para_nb(i)=jj_para_end(i)-jj_para_begin(i)+1 |
---|
| 295 | |
---|
| 296 | ii_para_begin(i)=MOD(ij_para_begin(i)-1,iim_g)+1 |
---|
| 297 | ii_para_end(i)=MOD(ij_para_end(i)-1,iim_g)+1 |
---|
| 298 | ENDDO |
---|
| 299 | |
---|
| 300 | |
---|
| 301 | ij_nb=ij_para_nb(mpi_rank) |
---|
| 302 | ij_begin=ij_para_begin(mpi_rank) |
---|
| 303 | ij_end=ij_para_end(mpi_rank) |
---|
| 304 | |
---|
| 305 | jj_nb=jj_para_nb(mpi_rank) |
---|
| 306 | jj_begin=jj_para_begin(mpi_rank) |
---|
| 307 | jj_end=jj_para_end(mpi_rank) |
---|
| 308 | |
---|
| 309 | ii_begin=ii_para_begin(mpi_rank) |
---|
| 310 | ii_end=ii_para_end(mpi_rank) |
---|
| 311 | |
---|
| 312 | |
---|
| 313 | CALL print_mpi_data |
---|
| 314 | |
---|
| 315 | |
---|
| 316 | END SUBROUTINE Init_orchidee_mpi_data |
---|
| 317 | |
---|
| 318 | !! ============================================================================================================================= |
---|
| 319 | !! SUBROUTINE: init_const_mpi |
---|
| 320 | !! |
---|
| 321 | !>\BRIEF Initialization of some constants related to the MPI parallelization |
---|
| 322 | !! |
---|
| 323 | !! DESCRIPTION: Initialization of some constants related to the MPI parallelization |
---|
| 324 | !! |
---|
| 325 | !! \n |
---|
| 326 | !_ ============================================================================================================================== |
---|
| 327 | SUBROUTINE init_const_mpi(COMM) |
---|
| 328 | |
---|
| 329 | IMPLICIT NONE |
---|
| 330 | INTEGER :: COMM |
---|
| 331 | |
---|
| 332 | #ifdef CPP_PARA |
---|
| 333 | INCLUDE 'mpif.h' |
---|
| 334 | |
---|
| 335 | MPI_COMM_ORCH=COMM |
---|
| 336 | |
---|
| 337 | IF (i_std==i_4) THEN |
---|
| 338 | MPI_INT_ORCH=MPI_INTEGER4 |
---|
| 339 | ELSEIF (i_std==i_8) THEN |
---|
| 340 | MPI_INT_ORCH=MPI_INTEGER8 |
---|
| 341 | ELSE |
---|
| 342 | MPI_INT_ORCH=MPI_INTEGER |
---|
| 343 | ENDIF |
---|
| 344 | |
---|
| 345 | IF (r_std==r_4) THEN |
---|
| 346 | MPI_REAL_ORCH=MPI_REAL4 |
---|
| 347 | ELSEIF (r_std==r_8) THEN |
---|
| 348 | MPI_REAL_ORCH=MPI_REAL8 |
---|
| 349 | ELSE |
---|
| 350 | MPI_REAL_ORCH=MPI_REAL |
---|
| 351 | ENDIF |
---|
| 352 | #endif |
---|
| 353 | |
---|
| 354 | END SUBROUTINE init_const_mpi |
---|
| 355 | |
---|
| 356 | !! ============================================================================================================================= |
---|
| 357 | !! SUBROUTINE: Finalize_mpi |
---|
| 358 | !! |
---|
| 359 | !>\BRIEF Close the MPI parallelization |
---|
| 360 | !! |
---|
| 361 | !! DESCRIPTION: Close the MPI parallelization. The context XIOS will be closed before call to MPI_finalize routine |
---|
| 362 | !! |
---|
| 363 | !! \n |
---|
| 364 | !_ ============================================================================================================================== |
---|
| 365 | SUBROUTINE Finalize_mpi |
---|
| 366 | |
---|
| 367 | IMPLICIT NONE |
---|
| 368 | #ifdef CPP_PARA |
---|
| 369 | include 'mpif.h' |
---|
| 370 | INTEGER :: ierr |
---|
| 371 | |
---|
| 372 | CALL xios_orchidee_finalize |
---|
| 373 | |
---|
| 374 | CALL MPI_FINALIZE(ierr) |
---|
| 375 | #endif |
---|
| 376 | |
---|
| 377 | END SUBROUTINE Finalize_mpi |
---|
| 378 | |
---|
| 379 | !! ============================================================================================================================= |
---|
| 380 | !! SUBROUTINE: print_mpi_data |
---|
| 381 | !! |
---|
| 382 | !>\BRIEF Print all data specific to MPI parallelization of ORCHIDEE |
---|
| 383 | !! |
---|
| 384 | !! DESCRIPTION: Print all data specific to MPI parallelization of ORCHIDEE |
---|
| 385 | !! |
---|
| 386 | !! \n |
---|
| 387 | !_ ============================================================================================================================== |
---|
| 388 | SUBROUTINE print_mpi_data |
---|
| 389 | |
---|
| 390 | IMPLICIT NONE |
---|
| 391 | |
---|
| 392 | WRITE(numout,*) '==== MPI DOMAIN ====' |
---|
| 393 | WRITE(numout,*) ' ---------- ' |
---|
| 394 | WRITE(numout,*) 'mpi_size',mpi_size |
---|
| 395 | WRITE(numout,*) 'mpi_rank',mpi_rank |
---|
| 396 | WRITE(numout,*) 'is_mpi_root',is_mpi_root |
---|
| 397 | WRITE(numout,*) 'mpi_rank_root',mpi_rank_root |
---|
| 398 | |
---|
| 399 | WRITE(numout,*) 'nbp_mpi_begin=',nbp_mpi_begin |
---|
| 400 | WRITE(numout,*) 'nbp_mpi_end =',nbp_mpi_end |
---|
| 401 | WRITE(numout,*) 'nbp_mpi=',nbp_mpi |
---|
| 402 | |
---|
| 403 | WRITE(numout,*) 'ij_begin=',ij_begin |
---|
| 404 | WRITE(numout,*) 'ij_end=',ij_end |
---|
| 405 | WRITE(numout,*) 'ij_nb=',ij_nb |
---|
| 406 | WRITE(numout,*) 'jj_begin=',jj_begin |
---|
| 407 | WRITE(numout,*) 'jj_end=',jj_end |
---|
| 408 | WRITE(numout,*) 'jj_nb=',jj_nb |
---|
| 409 | WRITE(numout,*) 'ii_begin=',ii_begin |
---|
| 410 | WRITE(numout,*) 'ii_end=',ii_end |
---|
| 411 | |
---|
| 412 | WRITE(numout,*) 'offset_mpi',offset_mpi |
---|
| 413 | WRITE(numout,*) 'nbp_mpi_para_begin=',nbp_mpi_para_begin |
---|
| 414 | WRITE(numout,*) 'nbp_mpi_para_end =',nbp_mpi_para_end |
---|
| 415 | WRITE(numout,*) 'nbp_mpi_para=',nbp_mpi_para |
---|
| 416 | |
---|
| 417 | WRITE(numout,*) 'ij_para_begin=',ij_para_begin |
---|
| 418 | WRITE(numout,*) 'ij_para_end=',ij_para_end |
---|
| 419 | WRITE(numout,*) 'ij_para_nb=',ij_para_nb |
---|
| 420 | WRITE(numout,*) 'jj_para_begin=',jj_para_begin |
---|
| 421 | WRITE(numout,*) 'jj_para_end=',jj_para_end |
---|
| 422 | WRITE(numout,*) 'jj_para_nb=',jj_para_nb |
---|
| 423 | WRITE(numout,*) 'ii_para_begin=',ii_para_begin |
---|
| 424 | WRITE(numout,*) 'ii_para_end=',ii_para_end |
---|
| 425 | |
---|
| 426 | END SUBROUTINE print_mpi_data |
---|
| 427 | |
---|
| 428 | !! ============================================================================================================================= |
---|
| 429 | !! SUBROUTINE: Read_Load_balance |
---|
| 430 | !! |
---|
| 431 | !>\BRIEF Read load balance file. |
---|
| 432 | !! |
---|
| 433 | !! DESCRIPTION: Read load balance file. This is only done in offline mode. |
---|
| 434 | !! The load balance file contains information about the MPI partitionning on the different processes. |
---|
| 435 | !! |
---|
| 436 | !! \n |
---|
| 437 | !_ ============================================================================================================================== |
---|
| 438 | SUBROUTINE Read_Load_balance(NbPoints,Nbpoints_loc) |
---|
| 439 | |
---|
| 440 | IMPLICIT NONE |
---|
| 441 | INTEGER,INTENT(IN) :: NbPoints |
---|
| 442 | INTEGER,INTENT(OUT) :: Nbpoints_loc(0:mpi_size-1) |
---|
| 443 | INTEGER :: i,s |
---|
| 444 | INTEGER :: ierr |
---|
| 445 | |
---|
| 446 | #ifdef CPP_PARA |
---|
| 447 | CHARACTER(len=255) :: filename='Load_balance_orchidee.dat' |
---|
| 448 | INTEGER :: j |
---|
| 449 | INTEGER :: unit_number=10 |
---|
| 450 | #endif |
---|
| 451 | |
---|
| 452 | #ifdef CPP_PARA |
---|
| 453 | OPEN(UNIT=unit_number,FILE=trim(filename),STATUS='old',FORM='formatted',IOSTAT=ierr) |
---|
| 454 | #else |
---|
| 455 | ierr=1 |
---|
| 456 | #endif |
---|
| 457 | Nbpoints_loc(:) = 0 |
---|
| 458 | |
---|
| 459 | s=0 |
---|
| 460 | #ifdef CPP_PARA |
---|
| 461 | IF (ierr==0) THEN |
---|
| 462 | i=0 |
---|
| 463 | !- Reading for any balancing file (even with a bad structure) |
---|
| 464 | DO WHILE (i < mpi_size .AND. ierr == 0) |
---|
| 465 | READ (unit_number,*,IOSTAT=ierr) j,Nbpoints_loc(i) |
---|
| 466 | s=s+Nbpoints_loc(i) |
---|
| 467 | i=i+1 |
---|
| 468 | ENDDO |
---|
| 469 | CLOSE(unit_number) |
---|
| 470 | ENDIF |
---|
| 471 | #endif |
---|
| 472 | |
---|
| 473 | !- Correction of bad balancing file (or an empty file) => same nb of points for each procs |
---|
| 474 | IF (ierr/=0 .OR. s/=Nbpoints) THEN |
---|
| 475 | DO i=0,mpi_size-1 |
---|
| 476 | Nbpoints_loc(i)=Nbpoints/mpi_size |
---|
| 477 | IF (MOD(Nbpoints,mpi_size) > i) Nbpoints_loc(i)=Nbpoints_loc(i)+1 |
---|
| 478 | ENDDO |
---|
| 479 | ENDIF |
---|
| 480 | |
---|
| 481 | END SUBROUTINE Read_Load_balance |
---|
| 482 | |
---|
| 483 | !! ============================================================================================================================= |
---|
| 484 | !! SUBROUTINE: Write_Load_balance |
---|
| 485 | !! |
---|
| 486 | !>\BRIEF Write the load balance file. |
---|
| 487 | !! |
---|
| 488 | !! DESCRIPTION: Write the load balance file. This is only done in offline mode. |
---|
| 489 | !! The load balance file contains information about the MPI partitionning on the different processes. |
---|
| 490 | !! |
---|
| 491 | !! \n |
---|
| 492 | !_ ============================================================================================================================== |
---|
| 493 | SUBROUTINE Write_Load_balance(times) |
---|
| 494 | IMPLICIT NONE |
---|
| 495 | REAL,INTENT(IN) :: times |
---|
| 496 | |
---|
| 497 | CHARACTER(len=255) :: filename='Load_balance_orchidee.dat' |
---|
| 498 | INTEGER :: unit_number=10 |
---|
| 499 | INTEGER :: i,ierr |
---|
| 500 | REAL :: All_Times(0:mpi_size-1) |
---|
| 501 | REAL :: average |
---|
| 502 | REAL :: efficiency |
---|
| 503 | INTEGER :: dp,S |
---|
| 504 | INTEGER :: New_nbpoints(0:mpi_size-1) |
---|
| 505 | |
---|
| 506 | IF (is_ok_mpi) THEN |
---|
| 507 | #ifdef CPP_PARA |
---|
| 508 | CALL MPI_GATHER(times,1,MPI_REAL_ORCH,All_times,1,MPI_REAL_ORCH,mpi_rank_root,MPI_COMM_ORCH,ierr) |
---|
| 509 | #endif |
---|
| 510 | ELSE |
---|
| 511 | All_times(:)=times |
---|
| 512 | ENDIF |
---|
| 513 | |
---|
| 514 | IF (is_mpi_root) WRITE(numout,*) 'ALL_times',All_times |
---|
| 515 | |
---|
| 516 | IF (is_mpi_root) THEN |
---|
| 517 | |
---|
| 518 | OPEN(UNIT=unit_number,FILE=trim(filename),STATUS='replace',FORM='formatted',IOSTAT=ierr) |
---|
| 519 | |
---|
| 520 | average=sum(All_times(:))/mpi_size |
---|
| 521 | DO i=0,mpi_size-1 |
---|
| 522 | efficiency=All_times(i)/nbp_mpi_para(i) |
---|
| 523 | New_nbpoints(i)=Nbp_mpi_para(i)-(All_times(i)-average)/efficiency |
---|
| 524 | ENDDO |
---|
| 525 | |
---|
| 526 | S=sum(new_nbpoints(:)) |
---|
| 527 | dp=nbp_glo-S |
---|
| 528 | |
---|
| 529 | IF ( dp > 0 ) THEN |
---|
| 530 | DO WHILE ( dp > 0 ) |
---|
| 531 | New_nbpoints(MOD(dp,mpi_size))=New_nbpoints(MOD(dp,mpi_size))+1 |
---|
| 532 | dp=dp-1 |
---|
| 533 | ENDDO |
---|
| 534 | ELSE |
---|
| 535 | dp=-dp |
---|
| 536 | DO WHILE ( dp > 0 ) |
---|
| 537 | New_nbpoints(MOD(dp,mpi_size))=New_nbpoints(MOD(dp,mpi_size))-1 |
---|
| 538 | dp=dp-1 |
---|
| 539 | ENDDO |
---|
| 540 | ENDIF |
---|
| 541 | |
---|
| 542 | |
---|
| 543 | ! If this algorithm diverge, we use previous repartition. |
---|
| 544 | IF ( ANY(New_nbpoints(:) .LE. 0) ) THEN |
---|
| 545 | New_nbpoints(:)=Nbp_mpi_para(:) |
---|
| 546 | ENDIF |
---|
| 547 | |
---|
| 548 | DO i=0,mpi_size-1 |
---|
| 549 | WRITE(Unit_number,*) i,New_nbpoints(i) |
---|
| 550 | ENDDO |
---|
| 551 | CLOSE(Unit_number) |
---|
| 552 | ENDIF |
---|
| 553 | |
---|
| 554 | END SUBROUTINE Write_Load_Balance |
---|
| 555 | |
---|
| 556 | END MODULE mod_orchidee_mpi_data |
---|
| 557 | |
---|
| 558 | #include "mpi_dummy.h" |
---|