1 | ! ============================================================================================================================== |
---|
2 | ! MODULE : mod_orchidee_mpi_data |
---|
3 | ! |
---|
4 | ! CONTACT : orchidee-help _at_ listes.ipsl.fr |
---|
5 | ! |
---|
6 | ! LICENCE : IPSL (2006) |
---|
7 | ! This software is governed by the CeCILL licence see ORCHIDEE/ORCHIDEE_CeCILL.LIC |
---|
8 | ! |
---|
9 | !>\BRIEF All routines needed for the initialization of the MPI parallelization of Orchidee (coupled and offline) |
---|
10 | !! |
---|
11 | !!\n DESCRIPTION : Definition and allocation of parallel datas for MPI. |
---|
12 | !! Initialization of parallel or sequentiel IOs. |
---|
13 | !! Definition of Load Balancing functions. |
---|
14 | !! |
---|
15 | !! RECENT CHANGE(S): None |
---|
16 | !! |
---|
17 | !! REFERENCES(S) : None |
---|
18 | !! |
---|
19 | !! SVN : |
---|
20 | !! $HeadURL$ |
---|
21 | !! $Date$ |
---|
22 | !! $Revision$ |
---|
23 | !! \n |
---|
24 | !_ ================================================================================================================================ |
---|
25 | |
---|
26 | |
---|
27 | MODULE mod_orchidee_mpi_data |
---|
28 | |
---|
29 | !- |
---|
30 | USE defprec |
---|
31 | USE ioipsl |
---|
32 | USE ioipsl_para |
---|
33 | USE xios_orchidee |
---|
34 | USE mod_orchidee_para_var |
---|
35 | |
---|
36 | IMPLICIT NONE |
---|
37 | |
---|
38 | !- |
---|
39 | #include "src_parallel.h" |
---|
40 | !- |
---|
41 | !- |
---|
42 | |
---|
43 | CONTAINS |
---|
44 | |
---|
45 | !! ============================================================================================================================= |
---|
46 | !! SUBROUTINE: Init_orchidee_mpi |
---|
47 | !! |
---|
48 | !>\BRIEF Initialization of the mpi parallelization in ORCHIDEE offline mode. |
---|
49 | !! |
---|
50 | !! DESCRIPTION: Initialization of the mpi parallelization in ORCHIDEE offline mode. |
---|
51 | !! |
---|
52 | !! \n |
---|
53 | !_ ============================================================================================================================== |
---|
54 | SUBROUTINE Init_orchidee_mpi(communicator) |
---|
55 | |
---|
56 | |
---|
57 | IMPLICIT NONE |
---|
58 | INTEGER, OPTIONAL, INTENT(in) :: communicator |
---|
59 | INTEGER :: COMM |
---|
60 | INTEGER :: ierr |
---|
61 | |
---|
62 | #ifdef CPP_PARA |
---|
63 | INCLUDE 'mpif.h' |
---|
64 | #endif |
---|
65 | |
---|
66 | !Config Key = XIOS_ORCHIDEE_OK |
---|
67 | !Config Desc = Use XIOS for writing diagnostics file |
---|
68 | !Config If = |
---|
69 | !Config Def = y |
---|
70 | !Config Help = If XIOS_ORCHIDEE_OK=y then no output with IOIPSL can be done |
---|
71 | !Config Units = [FLAG] |
---|
72 | CALL getin('XIOS_ORCHIDEE_OK',xios_orchidee_ok) |
---|
73 | |
---|
74 | #ifdef CPP_PARA |
---|
75 | IF ( xios_orchidee_ok ) THEN |
---|
76 | CALL MPI_INIT(ierr) |
---|
77 | CALL xios_orchidee_comm_init(COMM) |
---|
78 | ELSE IF ( PRESENT(communicator) ) THEN |
---|
79 | COMM=communicator |
---|
80 | ELSE |
---|
81 | CALL MPI_INIT(ierr) |
---|
82 | COMM=MPI_COMM_WORLD |
---|
83 | ENDIF |
---|
84 | CALL MPI_COMM_SIZE(COMM,mpi_size,ierr) |
---|
85 | CALL MPI_COMM_RANK(COMM,mpi_rank,ierr) |
---|
86 | is_ok_mpi=.TRUE. |
---|
87 | #else |
---|
88 | mpi_rank=0 |
---|
89 | mpi_size=1 |
---|
90 | is_ok_mpi=.FALSE. |
---|
91 | ! It is not possible to use XIOS without MPI |
---|
92 | WRITE(numout,*) 'XIOS cannot be run without MPI. xios_orchidee_ok is set to false.' |
---|
93 | xios_orchidee_ok=.FALSE. |
---|
94 | #endif |
---|
95 | |
---|
96 | mpi_rank_root=0 |
---|
97 | |
---|
98 | IF (mpi_rank==mpi_rank_root) THEN |
---|
99 | is_mpi_root=.TRUE. |
---|
100 | ELSE |
---|
101 | is_mpi_root=.FALSE. |
---|
102 | ENDIF |
---|
103 | |
---|
104 | CALL Init_const_mpi(COMM) |
---|
105 | |
---|
106 | END SUBROUTINE Init_orchidee_mpi |
---|
107 | |
---|
108 | |
---|
109 | |
---|
110 | !! ============================================================================================================================= |
---|
111 | !! SUBROUTINE: Init_orchidee_mpi_data |
---|
112 | !! |
---|
113 | !>\BRIEF Initialization of the mpi parallelization |
---|
114 | !! |
---|
115 | !! DESCRIPTION: Initialization of the mpi parallelization. This subroutine is called from Init_orchidee_data_para |
---|
116 | !! which is called both in offline and coupled mode. |
---|
117 | !! |
---|
118 | !! \n |
---|
119 | !_ ============================================================================================================================== |
---|
120 | SUBROUTINE Init_orchidee_mpi_data(arg_nbp_mpi,arg_kindex_mpi,arg_offset_mpi,COMM) |
---|
121 | |
---|
122 | IMPLICIT NONE |
---|
123 | #ifdef CPP_PARA |
---|
124 | INCLUDE 'mpif.h' |
---|
125 | #endif |
---|
126 | INTEGER, INTENT(IN) :: arg_nbp_mpi |
---|
127 | INTEGER, INTENT(IN) :: arg_kindex_mpi(arg_nbp_mpi) |
---|
128 | INTEGER, INTENT(IN) :: arg_offset_mpi |
---|
129 | INTEGER, INTENT(IN) :: COMM |
---|
130 | |
---|
131 | INTEGER :: i |
---|
132 | |
---|
133 | #ifdef CPP_PARA |
---|
134 | INTEGER :: ierr |
---|
135 | is_ok_mpi=.TRUE. |
---|
136 | #else |
---|
137 | is_ok_mpi=.FALSE. |
---|
138 | #endif |
---|
139 | |
---|
140 | ! Initialization of MPI_COMM_ORCH |
---|
141 | CALL init_const_mpi(COMM) |
---|
142 | IF (is_ok_mpi) THEN |
---|
143 | #ifdef CPP_PARA |
---|
144 | CALL MPI_COMM_SIZE(MPI_COMM_ORCH,mpi_size,ierr) |
---|
145 | CALL MPI_COMM_RANK(MPI_COMM_ORCH,mpi_rank,ierr) |
---|
146 | #endif |
---|
147 | ELSE |
---|
148 | mpi_size=1 |
---|
149 | mpi_rank=0 |
---|
150 | ENDIF |
---|
151 | |
---|
152 | IF (mpi_rank == 0) THEN |
---|
153 | mpi_rank_root = 0 |
---|
154 | is_mpi_root = .true. |
---|
155 | ENDIF |
---|
156 | |
---|
157 | ! Test if there is enough land grid cells on each process MPI. |
---|
158 | ! At least 2 grid cells are needed for each process if running in parallel. Stop if this is not the case. |
---|
159 | IF ( arg_nbp_mpi < 1 ) THEN |
---|
160 | PRINT*,'Init_orchidee_mpi_data: nbp_mpi(number of grid-cells for current MPI process)=',arg_nbp_mpi |
---|
161 | PRINT*,'nbp_mpi=0 is not possible. It must be 1 or bigger for each MPI process. Stop now.' |
---|
162 | CALL ipslerr_p(3, "Init_orchidee_mpi_data","Not all MPI processes has enough land grid cells",& |
---|
163 | "Make the region bigger or use a lower number of MPI processes.","") |
---|
164 | ELSE IF (arg_nbp_mpi == 1 .AND. mpi_size > 1) THEN |
---|
165 | CALL ipslerr_p(1, "Init_orchidee_mpi_data","nbp=1 for current MPI process",& |
---|
166 | "This can be a problem in some case.", & |
---|
167 | "If the model crashes, then make the region bigger or use a lower number of MPI processes.") |
---|
168 | END IF |
---|
169 | |
---|
170 | ALLOCATE(nbp_mpi_para(0:mpi_size-1)) |
---|
171 | ALLOCATE(nbp_mpi_para_begin(0:mpi_size-1)) |
---|
172 | ALLOCATE(nbp_mpi_para_end(0:mpi_size-1)) |
---|
173 | ALLOCATE(jj_para_nb(0:mpi_size-1)) |
---|
174 | ALLOCATE(jj_para_begin(0:mpi_size-1)) |
---|
175 | ALLOCATE(jj_para_end(0:mpi_size-1)) |
---|
176 | ALLOCATE(ii_para_begin(0:mpi_size-1)) |
---|
177 | ALLOCATE(ii_para_end(0:mpi_size-1)) |
---|
178 | ALLOCATE(ij_para_nb(0:mpi_size-1)) |
---|
179 | ALLOCATE(ij_para_begin(0:mpi_size-1)) |
---|
180 | ALLOCATE(ij_para_end(0:mpi_size-1)) |
---|
181 | |
---|
182 | |
---|
183 | nbp_mpi=arg_nbp_mpi |
---|
184 | ALLOCATE(kindex_mpi(nbp_mpi)) |
---|
185 | kindex_mpi(:)=arg_kindex_mpi(:) |
---|
186 | |
---|
187 | offset_mpi=arg_offset_mpi |
---|
188 | |
---|
189 | IF (is_ok_mpi) THEN |
---|
190 | #ifdef CPP_PARA |
---|
191 | CALL MPI_AllGather(nbp_mpi,1,MPI_INT_ORCH,nbp_mpi_para,1,MPI_INT_ORCH,MPI_COMM_ORCH,ierr) |
---|
192 | #endif |
---|
193 | ELSE |
---|
194 | nbp_mpi_para(0)=nbp_mpi |
---|
195 | ENDIF |
---|
196 | |
---|
197 | nbp_mpi_para_begin(0)=1 |
---|
198 | nbp_mpi_para_end(0)=nbp_mpi_para(0) |
---|
199 | DO i=1,mpi_size-1 |
---|
200 | nbp_mpi_para_begin(i)=nbp_mpi_para_end(i-1)+1 |
---|
201 | nbp_mpi_para_end(i)=nbp_mpi_para_begin(i)+nbp_mpi_para(i)-1 |
---|
202 | ENDDO |
---|
203 | nbp_mpi_begin=nbp_mpi_para_begin(mpi_rank) |
---|
204 | nbp_mpi_end=nbp_mpi_para_end(mpi_rank) |
---|
205 | |
---|
206 | |
---|
207 | IF (mpi_rank==mpi_size-1) THEN |
---|
208 | ij_end=iim_g*jjm_g |
---|
209 | ELSE |
---|
210 | ij_end=kindex_mpi(nbp_mpi)+offset_mpi |
---|
211 | ENDIF |
---|
212 | |
---|
213 | IF (is_ok_mpi) THEN |
---|
214 | #ifdef CPP_PARA |
---|
215 | CALL MPI_Allgather(ij_end,1,MPI_INT_ORCH,ij_para_end,1,MPI_INT_ORCH,MPI_COMM_ORCH,ierr) |
---|
216 | #endif |
---|
217 | ELSE |
---|
218 | ij_para_end(0)=ij_end |
---|
219 | ENDIF |
---|
220 | |
---|
221 | ij_para_begin(0)=1 |
---|
222 | ij_para_nb(0)=ij_para_end(0)-ij_para_begin(0)+1 |
---|
223 | |
---|
224 | DO i=1,mpi_size-1 |
---|
225 | ij_para_begin(i)=ij_para_end(i-1)+1 |
---|
226 | ij_para_nb(i)=ij_para_end(i)-ij_para_begin(i)+1 |
---|
227 | ENDDO |
---|
228 | |
---|
229 | DO i=0,mpi_size-1 |
---|
230 | jj_para_begin(i)=(ij_para_begin(i)-1)/iim_g + 1 |
---|
231 | jj_para_end(i)=(ij_para_end(i)-1)/iim_g + 1 |
---|
232 | jj_para_nb(i)=jj_para_end(i)-jj_para_begin(i)+1 |
---|
233 | |
---|
234 | ii_para_begin(i)=MOD(ij_para_begin(i)-1,iim_g)+1 |
---|
235 | ii_para_end(i)=MOD(ij_para_end(i)-1,iim_g)+1 |
---|
236 | ENDDO |
---|
237 | |
---|
238 | |
---|
239 | ij_nb=ij_para_nb(mpi_rank) |
---|
240 | ij_begin=ij_para_begin(mpi_rank) |
---|
241 | ij_end=ij_para_end(mpi_rank) |
---|
242 | |
---|
243 | jj_nb=jj_para_nb(mpi_rank) |
---|
244 | jj_begin=jj_para_begin(mpi_rank) |
---|
245 | jj_end=jj_para_end(mpi_rank) |
---|
246 | |
---|
247 | ii_begin=ii_para_begin(mpi_rank) |
---|
248 | ii_end=ii_para_end(mpi_rank) |
---|
249 | |
---|
250 | |
---|
251 | CALL print_mpi_data |
---|
252 | |
---|
253 | |
---|
254 | END SUBROUTINE Init_orchidee_mpi_data |
---|
255 | |
---|
256 | !! ============================================================================================================================= |
---|
257 | !! SUBROUTINE: init_const_mpi |
---|
258 | !! |
---|
259 | !>\BRIEF Initialization of some constants related to the MPI parallelization |
---|
260 | !! |
---|
261 | !! DESCRIPTION: Initialization of some constants related to the MPI parallelization |
---|
262 | !! |
---|
263 | !! \n |
---|
264 | !_ ============================================================================================================================== |
---|
265 | SUBROUTINE init_const_mpi(COMM) |
---|
266 | |
---|
267 | IMPLICIT NONE |
---|
268 | INTEGER :: COMM |
---|
269 | |
---|
270 | #ifdef CPP_PARA |
---|
271 | INCLUDE 'mpif.h' |
---|
272 | |
---|
273 | MPI_COMM_ORCH=COMM |
---|
274 | |
---|
275 | IF (i_std==i_4) THEN |
---|
276 | MPI_INT_ORCH=MPI_INTEGER4 |
---|
277 | ELSEIF (i_std==i_8) THEN |
---|
278 | MPI_INT_ORCH=MPI_INTEGER8 |
---|
279 | ELSE |
---|
280 | MPI_INT_ORCH=MPI_INTEGER |
---|
281 | ENDIF |
---|
282 | |
---|
283 | IF (r_std==r_4) THEN |
---|
284 | MPI_REAL_ORCH=MPI_REAL4 |
---|
285 | ELSEIF (r_std==r_8) THEN |
---|
286 | MPI_REAL_ORCH=MPI_REAL8 |
---|
287 | ELSE |
---|
288 | MPI_REAL_ORCH=MPI_REAL |
---|
289 | ENDIF |
---|
290 | #endif |
---|
291 | |
---|
292 | END SUBROUTINE init_const_mpi |
---|
293 | |
---|
294 | !! ============================================================================================================================= |
---|
295 | !! SUBROUTINE: Finalize_mpi |
---|
296 | !! |
---|
297 | !>\BRIEF Close the MPI parallelization |
---|
298 | !! |
---|
299 | !! DESCRIPTION: Close the MPI parallelization. The context XIOS will be closed before call to MPI_finalize routine |
---|
300 | !! |
---|
301 | !! \n |
---|
302 | !_ ============================================================================================================================== |
---|
303 | SUBROUTINE Finalize_mpi |
---|
304 | |
---|
305 | IMPLICIT NONE |
---|
306 | #ifdef CPP_PARA |
---|
307 | include 'mpif.h' |
---|
308 | INTEGER :: ierr |
---|
309 | |
---|
310 | CALL xios_orchidee_finalize |
---|
311 | |
---|
312 | CALL MPI_FINALIZE(ierr) |
---|
313 | #endif |
---|
314 | |
---|
315 | END SUBROUTINE Finalize_mpi |
---|
316 | |
---|
317 | !! ============================================================================================================================= |
---|
318 | !! SUBROUTINE: print_mpi_data |
---|
319 | !! |
---|
320 | !>\BRIEF Print all data specific to MPI parallelization of ORCHIDEE |
---|
321 | !! |
---|
322 | !! DESCRIPTION: Print all data specific to MPI parallelization of ORCHIDEE |
---|
323 | !! |
---|
324 | !! \n |
---|
325 | !_ ============================================================================================================================== |
---|
326 | SUBROUTINE print_mpi_data |
---|
327 | |
---|
328 | IMPLICIT NONE |
---|
329 | |
---|
330 | WRITE(numout,*) '==== MPI DOMAIN ====' |
---|
331 | WRITE(numout,*) ' ---------- ' |
---|
332 | WRITE(numout,*) 'mpi_size',mpi_size |
---|
333 | WRITE(numout,*) 'mpi_rank',mpi_rank |
---|
334 | WRITE(numout,*) 'is_mpi_root',is_mpi_root |
---|
335 | WRITE(numout,*) 'mpi_rank_root',mpi_rank_root |
---|
336 | |
---|
337 | WRITE(numout,*) 'nbp_mpi_begin=',nbp_mpi_begin |
---|
338 | WRITE(numout,*) 'nbp_mpi_end =',nbp_mpi_end |
---|
339 | WRITE(numout,*) 'nbp_mpi=',nbp_mpi |
---|
340 | |
---|
341 | WRITE(numout,*) 'ij_begin=',ij_begin |
---|
342 | WRITE(numout,*) 'ij_end=',ij_end |
---|
343 | WRITE(numout,*) 'ij_nb=',ij_nb |
---|
344 | WRITE(numout,*) 'jj_begin=',jj_begin |
---|
345 | WRITE(numout,*) 'jj_end=',jj_end |
---|
346 | WRITE(numout,*) 'jj_nb=',jj_nb |
---|
347 | WRITE(numout,*) 'ii_begin=',ii_begin |
---|
348 | WRITE(numout,*) 'ii_end=',ii_end |
---|
349 | |
---|
350 | WRITE(numout,*) 'offset_mpi',offset_mpi |
---|
351 | WRITE(numout,*) 'nbp_mpi_para_begin=',nbp_mpi_para_begin |
---|
352 | WRITE(numout,*) 'nbp_mpi_para_end =',nbp_mpi_para_end |
---|
353 | WRITE(numout,*) 'nbp_mpi_para=',nbp_mpi_para |
---|
354 | |
---|
355 | WRITE(numout,*) 'ij_para_begin=',ij_para_begin |
---|
356 | WRITE(numout,*) 'ij_para_end=',ij_para_end |
---|
357 | WRITE(numout,*) 'ij_para_nb=',ij_para_nb |
---|
358 | WRITE(numout,*) 'jj_para_begin=',jj_para_begin |
---|
359 | WRITE(numout,*) 'jj_para_end=',jj_para_end |
---|
360 | WRITE(numout,*) 'jj_para_nb=',jj_para_nb |
---|
361 | WRITE(numout,*) 'ii_para_begin=',ii_para_begin |
---|
362 | WRITE(numout,*) 'ii_para_end=',ii_para_end |
---|
363 | |
---|
364 | END SUBROUTINE print_mpi_data |
---|
365 | |
---|
366 | !! ============================================================================================================================= |
---|
367 | !! SUBROUTINE: Read_Load_balance |
---|
368 | !! |
---|
369 | !>\BRIEF Read load balance file. |
---|
370 | !! |
---|
371 | !! DESCRIPTION: Read load balance file. This is only done in offline mode. |
---|
372 | !! The load balance file contains information about the MPI partitionning on the different processes. |
---|
373 | !! |
---|
374 | !! \n |
---|
375 | !_ ============================================================================================================================== |
---|
376 | SUBROUTINE Read_Load_balance(NbPoints,Nbpoints_loc) |
---|
377 | |
---|
378 | IMPLICIT NONE |
---|
379 | INTEGER,INTENT(IN) :: NbPoints |
---|
380 | INTEGER,INTENT(OUT) :: Nbpoints_loc(0:mpi_size-1) |
---|
381 | INTEGER :: i,s |
---|
382 | INTEGER :: ierr |
---|
383 | |
---|
384 | #ifdef CPP_PARA |
---|
385 | CHARACTER(len=255) :: filename='Load_balance_orchidee.dat' |
---|
386 | INTEGER :: j |
---|
387 | INTEGER :: unit_number=10 |
---|
388 | #endif |
---|
389 | |
---|
390 | #ifdef CPP_PARA |
---|
391 | OPEN(UNIT=unit_number,FILE=trim(filename),STATUS='old',FORM='formatted',IOSTAT=ierr) |
---|
392 | #else |
---|
393 | ierr=1 |
---|
394 | #endif |
---|
395 | Nbpoints_loc(:) = 0 |
---|
396 | |
---|
397 | s=0 |
---|
398 | #ifdef CPP_PARA |
---|
399 | IF (ierr==0) THEN |
---|
400 | i=0 |
---|
401 | !- Reading for any balancing file (even with a bad structure) |
---|
402 | DO WHILE (i < mpi_size .AND. ierr == 0) |
---|
403 | READ (unit_number,*,IOSTAT=ierr) j,Nbpoints_loc(i) |
---|
404 | s=s+Nbpoints_loc(i) |
---|
405 | i=i+1 |
---|
406 | ENDDO |
---|
407 | CLOSE(unit_number) |
---|
408 | ENDIF |
---|
409 | #endif |
---|
410 | |
---|
411 | !- Correction of bad balancing file (or an empty file) => same nb of points for each procs |
---|
412 | IF (ierr/=0 .OR. s/=Nbpoints) THEN |
---|
413 | DO i=0,mpi_size-1 |
---|
414 | Nbpoints_loc(i)=Nbpoints/mpi_size |
---|
415 | IF (MOD(Nbpoints,mpi_size) > i) Nbpoints_loc(i)=Nbpoints_loc(i)+1 |
---|
416 | ENDDO |
---|
417 | ENDIF |
---|
418 | |
---|
419 | END SUBROUTINE Read_Load_balance |
---|
420 | |
---|
421 | !! ============================================================================================================================= |
---|
422 | !! SUBROUTINE: Write_Load_balance |
---|
423 | !! |
---|
424 | !>\BRIEF Write the load balance file. |
---|
425 | !! |
---|
426 | !! DESCRIPTION: Write the load balance file. This is only done in offline mode. |
---|
427 | !! The load balance file contains information about the MPI partitionning on the different processes. |
---|
428 | !! |
---|
429 | !! \n |
---|
430 | !_ ============================================================================================================================== |
---|
431 | SUBROUTINE Write_Load_balance(times) |
---|
432 | IMPLICIT NONE |
---|
433 | REAL,INTENT(IN) :: times |
---|
434 | |
---|
435 | CHARACTER(len=255) :: filename='Load_balance_orchidee.dat' |
---|
436 | INTEGER :: unit_number=10 |
---|
437 | INTEGER :: i,ierr |
---|
438 | REAL :: All_Times(0:mpi_size-1) |
---|
439 | REAL :: average |
---|
440 | REAL :: efficiency |
---|
441 | INTEGER :: dp,S |
---|
442 | INTEGER :: New_nbpoints(0:mpi_size-1) |
---|
443 | |
---|
444 | IF (is_ok_mpi) THEN |
---|
445 | #ifdef CPP_PARA |
---|
446 | CALL MPI_GATHER(times,1,MPI_REAL_ORCH,All_times,1,MPI_REAL_ORCH,mpi_rank_root,MPI_COMM_ORCH,ierr) |
---|
447 | #endif |
---|
448 | ELSE |
---|
449 | All_times(:)=times |
---|
450 | ENDIF |
---|
451 | |
---|
452 | IF (is_mpi_root) WRITE(numout,*) 'ALL_times',All_times |
---|
453 | |
---|
454 | IF (is_mpi_root) THEN |
---|
455 | |
---|
456 | OPEN(UNIT=unit_number,FILE=trim(filename),STATUS='replace',FORM='formatted',IOSTAT=ierr) |
---|
457 | |
---|
458 | average=sum(All_times(:))/mpi_size |
---|
459 | DO i=0,mpi_size-1 |
---|
460 | efficiency=All_times(i)/nbp_mpi_para(i) |
---|
461 | New_nbpoints(i)=Nbp_mpi_para(i)-(All_times(i)-average)/efficiency |
---|
462 | ENDDO |
---|
463 | |
---|
464 | S=sum(new_nbpoints(:)) |
---|
465 | dp=nbp_glo-S |
---|
466 | |
---|
467 | IF ( dp > 0 ) THEN |
---|
468 | DO WHILE ( dp > 0 ) |
---|
469 | New_nbpoints(MOD(dp,mpi_size))=New_nbpoints(MOD(dp,mpi_size))+1 |
---|
470 | dp=dp-1 |
---|
471 | ENDDO |
---|
472 | ELSE |
---|
473 | dp=-dp |
---|
474 | DO WHILE ( dp > 0 ) |
---|
475 | New_nbpoints(MOD(dp,mpi_size))=New_nbpoints(MOD(dp,mpi_size))-1 |
---|
476 | dp=dp-1 |
---|
477 | ENDDO |
---|
478 | ENDIF |
---|
479 | |
---|
480 | |
---|
481 | ! If this algorithm diverge, we use previous repartition. |
---|
482 | IF ( ANY(New_nbpoints(:) .LE. 0) ) THEN |
---|
483 | New_nbpoints(:)=Nbp_mpi_para(:) |
---|
484 | ENDIF |
---|
485 | |
---|
486 | DO i=0,mpi_size-1 |
---|
487 | WRITE(Unit_number,*) i,New_nbpoints(i) |
---|
488 | ENDDO |
---|
489 | CLOSE(Unit_number) |
---|
490 | ENDIF |
---|
491 | |
---|
492 | END SUBROUTINE Write_Load_Balance |
---|
493 | |
---|
494 | END MODULE mod_orchidee_mpi_data |
---|
495 | |
---|
496 | #include "mpi_dummy.h" |
---|