1 | MODULE lib_mpp |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE lib_mpp *** |
---|
4 | !! Ocean numerics: massively parallel processing library |
---|
5 | !!===================================================================== |
---|
6 | !! History : OPA ! 1994 (M. Guyon, J. Escobar, M. Imbard) Original code |
---|
7 | !! 7.0 ! 1997 (A.M. Treguier) SHMEM additions |
---|
8 | !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI |
---|
9 | !! ! 1998 (J.M. Molines) Open boundary conditions |
---|
10 | !! NEMO 1.0 ! 2003 (J.-M. Molines, G. Madec) F90, free form |
---|
11 | !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) |
---|
12 | !! - ! 2004 (R. Bourdalle Badie) isend option in mpi |
---|
13 | !! ! 2004 (J.M. Molines) minloc, maxloc |
---|
14 | !! - ! 2005 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases |
---|
15 | !! - ! 2005 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort |
---|
16 | !! - ! 2005 (R. Benshila, G. Madec) add extra halo case |
---|
17 | !! - ! 2008 (R. Benshila) add mpp_ini_ice |
---|
18 | !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd |
---|
19 | !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl |
---|
20 | !!---------------------------------------------------------------------- |
---|
21 | #if defined key_mpp_mpi |
---|
22 | !!---------------------------------------------------------------------- |
---|
23 | !! 'key_mpp_mpi' MPI massively parallel processing library |
---|
24 | !!---------------------------------------------------------------------- |
---|
25 | !! mynode : indentify the processor unit |
---|
26 | !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) |
---|
27 | !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays |
---|
28 | !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e) |
---|
29 | !! mpprecv : |
---|
30 | !! mppsend : SUBROUTINE mpp_ini_znl |
---|
31 | !! mppscatter : |
---|
32 | !! mppgather : |
---|
33 | !! mpp_min : generic interface for mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real |
---|
34 | !! mpp_max : generic interface for mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real |
---|
35 | !! mpp_sum : generic interface for mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real |
---|
36 | !! mpp_minloc : |
---|
37 | !! mpp_maxloc : |
---|
38 | !! mppsync : |
---|
39 | !! mppstop : |
---|
40 | !! mppobc : variant of mpp_lnk for open boundary condition |
---|
41 | !! mpp_ini_north : initialisation of north fold |
---|
42 | !! mpp_lbc_north : north fold processors gathering |
---|
43 | !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo |
---|
44 | !!---------------------------------------------------------------------- |
---|
45 | !! History : |
---|
46 | !! ! 94 (M. Guyon, J. Escobar, M. Imbard) Original code |
---|
47 | !! ! 97 (A.M. Treguier) SHMEM additions |
---|
48 | !! ! 98 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI |
---|
49 | !! 9.0 ! 03 (J.-M. Molines, G. Madec) F90, free form |
---|
50 | !! ! 04 (R. Bourdalle Badie) isend option in mpi |
---|
51 | !! ! 05 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases |
---|
52 | !! ! 05 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort |
---|
53 | !! ! 09 (R. Benshila) SHMEM suppression, north fold in lbc_nfd |
---|
54 | !!---------------------------------------------------------------------- |
---|
55 | !! OPA 9.0 , LOCEAN-IPSL (2005) |
---|
56 | !! $Id$ |
---|
57 | !! This software is governed by the CeCILL licence see modipsl/doc/NEMO_CeCILL.txt |
---|
58 | !!--------------------------------------------------------------------- |
---|
59 | !! * Modules used |
---|
60 | USE dom_oce ! ocean space and time domain |
---|
61 | USE in_out_manager ! I/O manager |
---|
62 | USE lbcnfd ! north fold treatment |
---|
63 | |
---|
64 | IMPLICIT NONE |
---|
65 | PRIVATE |
---|
66 | |
---|
67 | PUBLIC mynode, mppstop, mppsync, mpp_comm_free |
---|
68 | PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e |
---|
69 | PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc |
---|
70 | PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e |
---|
71 | PUBLIC mpprecv, mppsend, mppscatter, mppgather |
---|
72 | PUBLIC mppobc, mpp_ini_ice, mpp_ini_znl |
---|
73 | #if defined key_oasis3 || defined key_oasis4 |
---|
74 | PUBLIC mppsize, mpprank |
---|
75 | #endif |
---|
76 | |
---|
77 | !! * Interfaces |
---|
78 | !! define generic interface for these routine as they are called sometimes |
---|
79 | !! with scalar arguments instead of array arguments, which causes problems |
---|
80 | !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ |
---|
81 | INTERFACE mpp_min |
---|
82 | MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real |
---|
83 | END INTERFACE |
---|
84 | INTERFACE mpp_max |
---|
85 | MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real |
---|
86 | END INTERFACE |
---|
87 | INTERFACE mpp_sum |
---|
88 | MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real |
---|
89 | END INTERFACE |
---|
90 | INTERFACE mpp_lbc_north |
---|
91 | MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d |
---|
92 | END INTERFACE |
---|
93 | INTERFACE mpp_minloc |
---|
94 | MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d |
---|
95 | END INTERFACE |
---|
96 | INTERFACE mpp_maxloc |
---|
97 | MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d |
---|
98 | END INTERFACE |
---|
99 | |
---|
100 | |
---|
101 | !! ========================= !! |
---|
102 | !! MPI variable definition !! |
---|
103 | !! ========================= !! |
---|
104 | !$AGRIF_DO_NOT_TREAT |
---|
105 | # include <mpif.h> |
---|
106 | !$AGRIF_END_DO_NOT_TREAT |
---|
107 | |
---|
108 | LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag |
---|
109 | |
---|
110 | INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) |
---|
111 | |
---|
112 | INTEGER :: mppsize ! number of process |
---|
113 | INTEGER :: mpprank ! process number [ 0 - size-1 ] |
---|
114 | INTEGER :: mpi_comm_opa ! opa local communicator |
---|
115 | |
---|
116 | ! variables used in case of sea-ice |
---|
117 | INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice |
---|
118 | INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology) |
---|
119 | INTEGER :: ndim_rank_ice ! number of 'ice' processors |
---|
120 | INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm |
---|
121 | INTEGER, DIMENSION(:), ALLOCATABLE :: nrank_ice ! dimension ndim_rank_ice |
---|
122 | |
---|
123 | ! variables used for zonal integration |
---|
124 | INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average |
---|
125 | LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row |
---|
126 | INTEGER :: ngrp_znl ! group ID for the znl processors |
---|
127 | INTEGER :: ndim_rank_znl ! number of processors on the same zonal average |
---|
128 | INTEGER, DIMENSION(:), ALLOCATABLE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain |
---|
129 | |
---|
130 | ! North fold condition in mpp_mpi with jpni > 1 |
---|
131 | INTEGER :: ngrp_world ! group ID for the world processors |
---|
132 | INTEGER :: ngrp_opa ! group ID for the opa processors |
---|
133 | INTEGER :: ngrp_north ! group ID for the northern processors (to be fold) |
---|
134 | INTEGER :: ncomm_north ! communicator made by the processors belonging to ngrp_north |
---|
135 | INTEGER :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !) |
---|
136 | INTEGER :: njmppmax ! value of njmpp for the processors of the northern line |
---|
137 | INTEGER :: north_root ! number (in the comm_opa) of proc 0 in the northern comm |
---|
138 | INTEGER, DIMENSION(:), ALLOCATABLE :: nrank_north ! dimension ndim_rank_north |
---|
139 | |
---|
140 | ! Type of send : standard, buffered, immediate |
---|
141 | CHARACTER(len=1) :: cn_mpi_send = 'S' ! type od mpi send/recieve (S=standard, B=bsend, I=isend) |
---|
142 | LOGICAL :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I') |
---|
143 | INTEGER :: nn_buffer = 0 ! size of the buffer in case of mpi_bsend |
---|
144 | |
---|
145 | REAL(wp), ALLOCATABLE, DIMENSION(:) :: tampon ! buffer in case of bsend |
---|
146 | |
---|
147 | ! message passing arrays |
---|
148 | REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: t4ns, t4sn ! 2 x 3d for north-south & south-north |
---|
149 | REAL(wp), DIMENSION(jpj,jpreci,jpk,2,2) :: t4ew, t4we ! 2 x 3d for east-west & west-east |
---|
150 | REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: t4p1, t4p2 ! 2 x 3d for north fold |
---|
151 | REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: t3ns, t3sn ! 3d for north-south & south-north |
---|
152 | REAL(wp), DIMENSION(jpj,jpreci,jpk,2) :: t3ew, t3we ! 3d for east-west & west-east |
---|
153 | REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: t3p1, t3p2 ! 3d for north fold |
---|
154 | REAL(wp), DIMENSION(jpi,jprecj,2) :: t2ns, t2sn ! 2d for north-south & south-north |
---|
155 | REAL(wp), DIMENSION(jpj,jpreci,2) :: t2ew, t2we ! 2d for east-west & west-east |
---|
156 | REAL(wp), DIMENSION(jpi,jprecj,2) :: t2p1, t2p2 ! 2d for north fold |
---|
157 | REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) :: tr2ns, tr2sn ! 2d for north-south & south-north + extra outer halo |
---|
158 | REAL(wp), DIMENSION(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) :: tr2ew, tr2we ! 2d for east-west & west-east + extra outer halo |
---|
159 | !!---------------------------------------------------------------------- |
---|
160 | !! NEMO/OPA 3.2 , LOCEAN-IPSL (2009) |
---|
161 | !! $Id$ |
---|
162 | !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt) |
---|
163 | !!---------------------------------------------------------------------- |
---|
164 | |
---|
165 | CONTAINS |
---|
166 | |
---|
167 | FUNCTION mynode(ldtxt, localComm) |
---|
168 | !!---------------------------------------------------------------------- |
---|
169 | !! *** routine mynode *** |
---|
170 | !! |
---|
171 | !! ** Purpose : Find processor unit |
---|
172 | !! |
---|
173 | !!---------------------------------------------------------------------- |
---|
174 | CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt |
---|
175 | INTEGER, OPTIONAL , INTENT(in ) :: localComm |
---|
176 | INTEGER :: mynode, ierr, code |
---|
177 | LOGICAL :: mpi_was_called |
---|
178 | |
---|
179 | NAMELIST/nammpp/ cn_mpi_send, nn_buffer |
---|
180 | !!---------------------------------------------------------------------- |
---|
181 | ! |
---|
182 | WRITE(ldtxt(1),*) |
---|
183 | WRITE(ldtxt(2),*) 'mynode : mpi initialisation' |
---|
184 | WRITE(ldtxt(3),*) '~~~~~~ ' |
---|
185 | ! |
---|
186 | REWIND( numnam ) ! Namelist namrun : parameters of the run |
---|
187 | READ ( numnam, nammpp ) |
---|
188 | ! ! control print |
---|
189 | WRITE(ldtxt(4),*) ' Namelist nammpp' |
---|
190 | WRITE(ldtxt(5),*) ' mpi send type cn_mpi_send = ', cn_mpi_send |
---|
191 | WRITE(ldtxt(6),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer |
---|
192 | |
---|
193 | #if defined key_agrif |
---|
194 | IF( Agrif_Root() ) THEN |
---|
195 | #endif |
---|
196 | !!bug RB : should be clean to use Agrif in coupled mode |
---|
197 | #if ! defined key_agrif |
---|
198 | CALL mpi_initialized ( mpi_was_called, code ) |
---|
199 | IF( code /= MPI_SUCCESS ) THEN |
---|
200 | WRITE(*, cform_err) |
---|
201 | WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' |
---|
202 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
203 | ENDIF |
---|
204 | |
---|
205 | IF( PRESENT(localComm) .and. mpi_was_called ) THEN |
---|
206 | mpi_comm_opa = localComm |
---|
207 | SELECT CASE ( cn_mpi_send ) |
---|
208 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
209 | WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' |
---|
210 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
211 | WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' |
---|
212 | CALL mpi_init_opa( ierr ) |
---|
213 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
214 | WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' |
---|
215 | l_isend = .TRUE. |
---|
216 | CASE DEFAULT |
---|
217 | WRITE(ldtxt(7),cform_err) |
---|
218 | WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send |
---|
219 | nstop = nstop + 1 |
---|
220 | END SELECT |
---|
221 | ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN |
---|
222 | WRITE(ldtxt(7),*) ' lib_mpp: You cannot provide a local communicator ' |
---|
223 | WRITE(ldtxt(8),*) ' without calling MPI_Init before ! ' |
---|
224 | nstop = nstop + 1 |
---|
225 | ELSE |
---|
226 | #endif |
---|
227 | SELECT CASE ( cn_mpi_send ) |
---|
228 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
229 | WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' |
---|
230 | CALL mpi_init( ierr ) |
---|
231 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
232 | WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' |
---|
233 | CALL mpi_init_opa( ierr ) |
---|
234 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
235 | WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' |
---|
236 | l_isend = .TRUE. |
---|
237 | CALL mpi_init( ierr ) |
---|
238 | CASE DEFAULT |
---|
239 | WRITE(ldtxt(7),cform_err) |
---|
240 | WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send |
---|
241 | nstop = nstop + 1 |
---|
242 | END SELECT |
---|
243 | |
---|
244 | #if ! defined key_agrif |
---|
245 | CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) |
---|
246 | IF( code /= MPI_SUCCESS ) THEN |
---|
247 | WRITE(*, cform_err) |
---|
248 | WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' |
---|
249 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
250 | ENDIF |
---|
251 | ! |
---|
252 | ENDIF |
---|
253 | #endif |
---|
254 | #if defined key_agrif |
---|
255 | ELSE |
---|
256 | SELECT CASE ( cn_mpi_send ) |
---|
257 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
258 | WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' |
---|
259 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
260 | WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' |
---|
261 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
262 | WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' |
---|
263 | l_isend = .TRUE. |
---|
264 | CASE DEFAULT |
---|
265 | WRITE(ldtxt(7),cform_err) |
---|
266 | WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send |
---|
267 | nstop = nstop + 1 |
---|
268 | END SELECT |
---|
269 | ENDIF |
---|
270 | |
---|
271 | mpi_comm_opa = mpi_comm_world |
---|
272 | #endif |
---|
273 | CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) |
---|
274 | CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) |
---|
275 | mynode = mpprank |
---|
276 | ! |
---|
277 | END FUNCTION mynode |
---|
278 | |
---|
279 | |
---|
280 | SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) |
---|
281 | !!---------------------------------------------------------------------- |
---|
282 | !! *** routine mpp_lnk_3d *** |
---|
283 | !! |
---|
284 | !! ** Purpose : Message passing manadgement |
---|
285 | !! |
---|
286 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
287 | !! between processors following neighboring subdomains. |
---|
288 | !! domain parameters |
---|
289 | !! nlci : first dimension of the local subdomain |
---|
290 | !! nlcj : second dimension of the local subdomain |
---|
291 | !! nbondi : mark for "east-west local boundary" |
---|
292 | !! nbondj : mark for "north-south local boundary" |
---|
293 | !! noea : number for local neighboring processors |
---|
294 | !! nowe : number for local neighboring processors |
---|
295 | !! noso : number for local neighboring processors |
---|
296 | !! nono : number for local neighboring processors |
---|
297 | !! |
---|
298 | !! ** Action : ptab with update value at its periphery |
---|
299 | !! |
---|
300 | !!---------------------------------------------------------------------- |
---|
301 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied |
---|
302 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
303 | ! ! = T , U , V , F , W points |
---|
304 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
305 | ! ! = 1. , the sign is kept |
---|
306 | CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
307 | REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
308 | !! |
---|
309 | INTEGER :: ji, jj, jl ! dummy loop indices |
---|
310 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
311 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
312 | REAL(wp) :: zland |
---|
313 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
314 | !!---------------------------------------------------------------------- |
---|
315 | |
---|
316 | IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value |
---|
317 | ELSE ; zland = 0.e0 ! zero by default |
---|
318 | ENDIF |
---|
319 | |
---|
320 | ! 1. standard boundary treatment |
---|
321 | ! ------------------------------ |
---|
322 | IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with non zero values |
---|
323 | ! |
---|
324 | DO jj = nlcj+1, jpj ! added line(s) (inner only) |
---|
325 | ptab(1:nlci, jj, :) = ptab(1:nlci, nlej, :) |
---|
326 | END DO |
---|
327 | DO ji = nlci+1, jpi ! added column(s) (full) |
---|
328 | ptab(ji , : , :) = ptab(nlei , : , :) |
---|
329 | END DO |
---|
330 | ! |
---|
331 | ELSE ! standard close or cyclic treatment |
---|
332 | ! |
---|
333 | ! ! East-West boundaries |
---|
334 | ! !* Cyclic east-west |
---|
335 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
336 | ptab( 1 ,:,:) = ptab(jpim1,:,:) |
---|
337 | ptab(jpi,:,:) = ptab( 2 ,:,:) |
---|
338 | ELSE !* closed |
---|
339 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point |
---|
340 | ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north |
---|
341 | ENDIF |
---|
342 | ! ! North-South boundaries (always closed) |
---|
343 | IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point |
---|
344 | ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north |
---|
345 | ! |
---|
346 | ENDIF |
---|
347 | |
---|
348 | ! 2. East and west directions exchange |
---|
349 | ! ------------------------------------ |
---|
350 | ! we play with the neigbours AND the row number because of the periodicity |
---|
351 | ! |
---|
352 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
353 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
354 | iihom = nlci-nreci |
---|
355 | DO jl = 1, jpreci |
---|
356 | t3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) |
---|
357 | t3we(:,jl,:,1) = ptab(iihom +jl,:,:) |
---|
358 | END DO |
---|
359 | END SELECT |
---|
360 | ! |
---|
361 | ! ! Migrations |
---|
362 | imigr = jpreci * jpj * jpk |
---|
363 | ! |
---|
364 | SELECT CASE ( nbondi ) |
---|
365 | CASE ( -1 ) |
---|
366 | CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req1 ) |
---|
367 | CALL mpprecv( 1, t3ew(1,1,1,2), imigr ) |
---|
368 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
369 | CASE ( 0 ) |
---|
370 | CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
371 | CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req2 ) |
---|
372 | CALL mpprecv( 1, t3ew(1,1,1,2), imigr ) |
---|
373 | CALL mpprecv( 2, t3we(1,1,1,2), imigr ) |
---|
374 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
375 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
376 | CASE ( 1 ) |
---|
377 | CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
378 | CALL mpprecv( 2, t3we(1,1,1,2), imigr ) |
---|
379 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
380 | END SELECT |
---|
381 | ! |
---|
382 | ! ! Write Dirichlet lateral conditions |
---|
383 | iihom = nlci-jpreci |
---|
384 | ! |
---|
385 | SELECT CASE ( nbondi ) |
---|
386 | CASE ( -1 ) |
---|
387 | DO jl = 1, jpreci |
---|
388 | ptab(iihom+jl,:,:) = t3ew(:,jl,:,2) |
---|
389 | END DO |
---|
390 | CASE ( 0 ) |
---|
391 | DO jl = 1, jpreci |
---|
392 | ptab(jl ,:,:) = t3we(:,jl,:,2) |
---|
393 | ptab(iihom+jl,:,:) = t3ew(:,jl,:,2) |
---|
394 | END DO |
---|
395 | CASE ( 1 ) |
---|
396 | DO jl = 1, jpreci |
---|
397 | ptab(jl ,:,:) = t3we(:,jl,:,2) |
---|
398 | END DO |
---|
399 | END SELECT |
---|
400 | |
---|
401 | |
---|
402 | ! 3. North and south directions |
---|
403 | ! ----------------------------- |
---|
404 | ! always closed : we play only with the neigbours |
---|
405 | ! |
---|
406 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
407 | ijhom = nlcj-nrecj |
---|
408 | DO jl = 1, jprecj |
---|
409 | t3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) |
---|
410 | t3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) |
---|
411 | END DO |
---|
412 | ENDIF |
---|
413 | ! |
---|
414 | ! ! Migrations |
---|
415 | imigr = jprecj * jpi * jpk |
---|
416 | ! |
---|
417 | SELECT CASE ( nbondj ) |
---|
418 | CASE ( -1 ) |
---|
419 | CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req1 ) |
---|
420 | CALL mpprecv( 3, t3ns(1,1,1,2), imigr ) |
---|
421 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
422 | CASE ( 0 ) |
---|
423 | CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
424 | CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req2 ) |
---|
425 | CALL mpprecv( 3, t3ns(1,1,1,2), imigr ) |
---|
426 | CALL mpprecv( 4, t3sn(1,1,1,2), imigr ) |
---|
427 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
428 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
429 | CASE ( 1 ) |
---|
430 | CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
431 | CALL mpprecv( 4, t3sn(1,1,1,2), imigr ) |
---|
432 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
433 | END SELECT |
---|
434 | ! |
---|
435 | ! ! Write Dirichlet lateral conditions |
---|
436 | ijhom = nlcj-jprecj |
---|
437 | ! |
---|
438 | SELECT CASE ( nbondj ) |
---|
439 | CASE ( -1 ) |
---|
440 | DO jl = 1, jprecj |
---|
441 | ptab(:,ijhom+jl,:) = t3ns(:,jl,:,2) |
---|
442 | END DO |
---|
443 | CASE ( 0 ) |
---|
444 | DO jl = 1, jprecj |
---|
445 | ptab(:,jl ,:) = t3sn(:,jl,:,2) |
---|
446 | ptab(:,ijhom+jl,:) = t3ns(:,jl,:,2) |
---|
447 | END DO |
---|
448 | CASE ( 1 ) |
---|
449 | DO jl = 1, jprecj |
---|
450 | ptab(:,jl,:) = t3sn(:,jl,:,2) |
---|
451 | END DO |
---|
452 | END SELECT |
---|
453 | |
---|
454 | |
---|
455 | ! 4. north fold treatment |
---|
456 | ! ----------------------- |
---|
457 | ! |
---|
458 | IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN |
---|
459 | ! |
---|
460 | SELECT CASE ( jpni ) |
---|
461 | CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
462 | CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. |
---|
463 | END SELECT |
---|
464 | ! |
---|
465 | ENDIF |
---|
466 | ! |
---|
467 | END SUBROUTINE mpp_lnk_3d |
---|
468 | |
---|
469 | |
---|
470 | SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval ) |
---|
471 | !!---------------------------------------------------------------------- |
---|
472 | !! *** routine mpp_lnk_2d *** |
---|
473 | !! |
---|
474 | !! ** Purpose : Message passing manadgement for 2d array |
---|
475 | !! |
---|
476 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
477 | !! between processors following neighboring subdomains. |
---|
478 | !! domain parameters |
---|
479 | !! nlci : first dimension of the local subdomain |
---|
480 | !! nlcj : second dimension of the local subdomain |
---|
481 | !! nbondi : mark for "east-west local boundary" |
---|
482 | !! nbondj : mark for "north-south local boundary" |
---|
483 | !! noea : number for local neighboring processors |
---|
484 | !! nowe : number for local neighboring processors |
---|
485 | !! noso : number for local neighboring processors |
---|
486 | !! nono : number for local neighboring processors |
---|
487 | !! |
---|
488 | !!---------------------------------------------------------------------- |
---|
489 | REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied |
---|
490 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
491 | ! ! = T , U , V , F , W and I points |
---|
492 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
493 | ! ! = 1. , the sign is kept |
---|
494 | CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
495 | REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
496 | !! |
---|
497 | INTEGER :: ji, jj, jl ! dummy loop indices |
---|
498 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
499 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
500 | REAL(wp) :: zland |
---|
501 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
502 | !!---------------------------------------------------------------------- |
---|
503 | |
---|
504 | IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value |
---|
505 | ELSE ; zland = 0.e0 ! zero by default |
---|
506 | ENDIF |
---|
507 | |
---|
508 | ! 1. standard boundary treatment |
---|
509 | ! ------------------------------ |
---|
510 | ! |
---|
511 | IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with non zero values |
---|
512 | ! |
---|
513 | DO jj = nlcj+1, jpj ! last line (inner) |
---|
514 | pt2d(1:nlci, jj) = pt2d(1:nlci, nlej) |
---|
515 | END DO |
---|
516 | DO ji = nlci+1, jpi ! last column |
---|
517 | pt2d(ji , : ) = pt2d(nlei , : ) |
---|
518 | END DO |
---|
519 | ! |
---|
520 | ELSE ! standard close or cyclic treatment |
---|
521 | ! |
---|
522 | ! ! East-West boundaries |
---|
523 | IF( nbondi == 2 .AND. & ! Cyclic east-west |
---|
524 | & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
525 | pt2d( 1 ,:) = pt2d(jpim1,:) ! west |
---|
526 | pt2d(jpi,:) = pt2d( 2 ,:) ! east |
---|
527 | ELSE ! closed |
---|
528 | IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point |
---|
529 | pt2d(nlci-jpreci+1:jpi ,:) = zland ! north |
---|
530 | ENDIF |
---|
531 | ! ! North-South boundaries (always closed) |
---|
532 | IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point |
---|
533 | pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north |
---|
534 | ! |
---|
535 | ENDIF |
---|
536 | |
---|
537 | ! 2. East and west directions exchange |
---|
538 | ! ------------------------------------ |
---|
539 | ! we play with the neigbours AND the row number because of the periodicity |
---|
540 | ! |
---|
541 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
542 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
543 | iihom = nlci-nreci |
---|
544 | DO jl = 1, jpreci |
---|
545 | t2ew(:,jl,1) = pt2d(jpreci+jl,:) |
---|
546 | t2we(:,jl,1) = pt2d(iihom +jl,:) |
---|
547 | END DO |
---|
548 | END SELECT |
---|
549 | ! |
---|
550 | ! ! Migrations |
---|
551 | imigr = jpreci * jpj |
---|
552 | ! |
---|
553 | SELECT CASE ( nbondi ) |
---|
554 | CASE ( -1 ) |
---|
555 | CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) |
---|
556 | CALL mpprecv( 1, t2ew(1,1,2), imigr ) |
---|
557 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
558 | CASE ( 0 ) |
---|
559 | CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
560 | CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) |
---|
561 | CALL mpprecv( 1, t2ew(1,1,2), imigr ) |
---|
562 | CALL mpprecv( 2, t2we(1,1,2), imigr ) |
---|
563 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
564 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
565 | CASE ( 1 ) |
---|
566 | CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
567 | CALL mpprecv( 2, t2we(1,1,2), imigr ) |
---|
568 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
569 | END SELECT |
---|
570 | ! |
---|
571 | ! ! Write Dirichlet lateral conditions |
---|
572 | iihom = nlci - jpreci |
---|
573 | ! |
---|
574 | SELECT CASE ( nbondi ) |
---|
575 | CASE ( -1 ) |
---|
576 | DO jl = 1, jpreci |
---|
577 | pt2d(iihom+jl,:) = t2ew(:,jl,2) |
---|
578 | END DO |
---|
579 | CASE ( 0 ) |
---|
580 | DO jl = 1, jpreci |
---|
581 | pt2d(jl ,:) = t2we(:,jl,2) |
---|
582 | pt2d(iihom+jl,:) = t2ew(:,jl,2) |
---|
583 | END DO |
---|
584 | CASE ( 1 ) |
---|
585 | DO jl = 1, jpreci |
---|
586 | pt2d(jl ,:) = t2we(:,jl,2) |
---|
587 | END DO |
---|
588 | END SELECT |
---|
589 | |
---|
590 | |
---|
591 | ! 3. North and south directions |
---|
592 | ! ----------------------------- |
---|
593 | ! always closed : we play only with the neigbours |
---|
594 | ! |
---|
595 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
596 | ijhom = nlcj-nrecj |
---|
597 | DO jl = 1, jprecj |
---|
598 | t2sn(:,jl,1) = pt2d(:,ijhom +jl) |
---|
599 | t2ns(:,jl,1) = pt2d(:,jprecj+jl) |
---|
600 | END DO |
---|
601 | ENDIF |
---|
602 | ! |
---|
603 | ! ! Migrations |
---|
604 | imigr = jprecj * jpi |
---|
605 | ! |
---|
606 | SELECT CASE ( nbondj ) |
---|
607 | CASE ( -1 ) |
---|
608 | CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 ) |
---|
609 | CALL mpprecv( 3, t2ns(1,1,2), imigr ) |
---|
610 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
611 | CASE ( 0 ) |
---|
612 | CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
613 | CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 ) |
---|
614 | CALL mpprecv( 3, t2ns(1,1,2), imigr ) |
---|
615 | CALL mpprecv( 4, t2sn(1,1,2), imigr ) |
---|
616 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
617 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
618 | CASE ( 1 ) |
---|
619 | CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
620 | CALL mpprecv( 4, t2sn(1,1,2), imigr ) |
---|
621 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
622 | END SELECT |
---|
623 | ! |
---|
624 | ! ! Write Dirichlet lateral conditions |
---|
625 | ijhom = nlcj - jprecj |
---|
626 | ! |
---|
627 | SELECT CASE ( nbondj ) |
---|
628 | CASE ( -1 ) |
---|
629 | DO jl = 1, jprecj |
---|
630 | pt2d(:,ijhom+jl) = t2ns(:,jl,2) |
---|
631 | END DO |
---|
632 | CASE ( 0 ) |
---|
633 | DO jl = 1, jprecj |
---|
634 | pt2d(:,jl ) = t2sn(:,jl,2) |
---|
635 | pt2d(:,ijhom+jl) = t2ns(:,jl,2) |
---|
636 | END DO |
---|
637 | CASE ( 1 ) |
---|
638 | DO jl = 1, jprecj |
---|
639 | pt2d(:,jl ) = t2sn(:,jl,2) |
---|
640 | END DO |
---|
641 | END SELECT |
---|
642 | |
---|
643 | |
---|
644 | ! 4. north fold treatment |
---|
645 | ! ----------------------- |
---|
646 | ! |
---|
647 | IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN |
---|
648 | ! |
---|
649 | SELECT CASE ( jpni ) |
---|
650 | CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
651 | CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. |
---|
652 | END SELECT |
---|
653 | ! |
---|
654 | ENDIF |
---|
655 | ! |
---|
656 | END SUBROUTINE mpp_lnk_2d |
---|
657 | |
---|
658 | |
---|
659 | SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn ) |
---|
660 | !!---------------------------------------------------------------------- |
---|
661 | !! *** routine mpp_lnk_3d_gather *** |
---|
662 | !! |
---|
663 | !! ** Purpose : Message passing manadgement for two 3D arrays |
---|
664 | !! |
---|
665 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
666 | !! between processors following neighboring subdomains. |
---|
667 | !! domain parameters |
---|
668 | !! nlci : first dimension of the local subdomain |
---|
669 | !! nlcj : second dimension of the local subdomain |
---|
670 | !! nbondi : mark for "east-west local boundary" |
---|
671 | !! nbondj : mark for "north-south local boundary" |
---|
672 | !! noea : number for local neighboring processors |
---|
673 | !! nowe : number for local neighboring processors |
---|
674 | !! noso : number for local neighboring processors |
---|
675 | !! nono : number for local neighboring processors |
---|
676 | !! |
---|
677 | !! ** Action : ptab1 and ptab2 with update value at its periphery |
---|
678 | !! |
---|
679 | !!---------------------------------------------------------------------- |
---|
680 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which |
---|
681 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied |
---|
682 | CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays |
---|
683 | CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points |
---|
684 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
685 | !! ! = 1. , the sign is kept |
---|
686 | INTEGER :: jl ! dummy loop indices |
---|
687 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
688 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
689 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
690 | !!---------------------------------------------------------------------- |
---|
691 | |
---|
692 | ! 1. standard boundary treatment |
---|
693 | ! ------------------------------ |
---|
694 | ! ! East-West boundaries |
---|
695 | ! !* Cyclic east-west |
---|
696 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
697 | ptab1( 1 ,:,:) = ptab1(jpim1,:,:) |
---|
698 | ptab1(jpi,:,:) = ptab1( 2 ,:,:) |
---|
699 | ptab2( 1 ,:,:) = ptab2(jpim1,:,:) |
---|
700 | ptab2(jpi,:,:) = ptab2( 2 ,:,:) |
---|
701 | ELSE !* closed |
---|
702 | IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point |
---|
703 | IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 |
---|
704 | ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north |
---|
705 | ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 |
---|
706 | ENDIF |
---|
707 | |
---|
708 | |
---|
709 | ! ! North-South boundaries |
---|
710 | IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point |
---|
711 | IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 |
---|
712 | ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north |
---|
713 | ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 |
---|
714 | |
---|
715 | |
---|
716 | ! 2. East and west directions exchange |
---|
717 | ! ------------------------------------ |
---|
718 | ! we play with the neigbours AND the row number because of the periodicity |
---|
719 | ! |
---|
720 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
721 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
722 | iihom = nlci-nreci |
---|
723 | DO jl = 1, jpreci |
---|
724 | t4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) |
---|
725 | t4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) |
---|
726 | t4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) |
---|
727 | t4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) |
---|
728 | END DO |
---|
729 | END SELECT |
---|
730 | ! |
---|
731 | ! ! Migrations |
---|
732 | imigr = jpreci * jpj * jpk *2 |
---|
733 | ! |
---|
734 | SELECT CASE ( nbondi ) |
---|
735 | CASE ( -1 ) |
---|
736 | CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req1 ) |
---|
737 | CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr ) |
---|
738 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
739 | CASE ( 0 ) |
---|
740 | CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) |
---|
741 | CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req2 ) |
---|
742 | CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr ) |
---|
743 | CALL mpprecv( 2, t4we(1,1,1,1,2), imigr ) |
---|
744 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
745 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
746 | CASE ( 1 ) |
---|
747 | CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) |
---|
748 | CALL mpprecv( 2, t4we(1,1,1,1,2), imigr ) |
---|
749 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
750 | END SELECT |
---|
751 | ! |
---|
752 | ! ! Write Dirichlet lateral conditions |
---|
753 | iihom = nlci - jpreci |
---|
754 | ! |
---|
755 | SELECT CASE ( nbondi ) |
---|
756 | CASE ( -1 ) |
---|
757 | DO jl = 1, jpreci |
---|
758 | ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2) |
---|
759 | ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2) |
---|
760 | END DO |
---|
761 | CASE ( 0 ) |
---|
762 | DO jl = 1, jpreci |
---|
763 | ptab1(jl ,:,:) = t4we(:,jl,:,1,2) |
---|
764 | ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2) |
---|
765 | ptab2(jl ,:,:) = t4we(:,jl,:,2,2) |
---|
766 | ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2) |
---|
767 | END DO |
---|
768 | CASE ( 1 ) |
---|
769 | DO jl = 1, jpreci |
---|
770 | ptab1(jl ,:,:) = t4we(:,jl,:,1,2) |
---|
771 | ptab2(jl ,:,:) = t4we(:,jl,:,2,2) |
---|
772 | END DO |
---|
773 | END SELECT |
---|
774 | |
---|
775 | |
---|
776 | ! 3. North and south directions |
---|
777 | ! ----------------------------- |
---|
778 | ! always closed : we play only with the neigbours |
---|
779 | ! |
---|
780 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
781 | ijhom = nlcj - nrecj |
---|
782 | DO jl = 1, jprecj |
---|
783 | t4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) |
---|
784 | t4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:) |
---|
785 | t4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:) |
---|
786 | t4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:) |
---|
787 | END DO |
---|
788 | ENDIF |
---|
789 | ! |
---|
790 | ! ! Migrations |
---|
791 | imigr = jprecj * jpi * jpk * 2 |
---|
792 | ! |
---|
793 | SELECT CASE ( nbondj ) |
---|
794 | CASE ( -1 ) |
---|
795 | CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req1 ) |
---|
796 | CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr ) |
---|
797 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
798 | CASE ( 0 ) |
---|
799 | CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 ) |
---|
800 | CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req2 ) |
---|
801 | CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr ) |
---|
802 | CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr ) |
---|
803 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
804 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
805 | CASE ( 1 ) |
---|
806 | CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 ) |
---|
807 | CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr ) |
---|
808 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
809 | END SELECT |
---|
810 | ! |
---|
811 | ! ! Write Dirichlet lateral conditions |
---|
812 | ijhom = nlcj - jprecj |
---|
813 | ! |
---|
814 | SELECT CASE ( nbondj ) |
---|
815 | CASE ( -1 ) |
---|
816 | DO jl = 1, jprecj |
---|
817 | ptab1(:,ijhom+jl,:) = t4ns(:,jl,:,1,2) |
---|
818 | ptab2(:,ijhom+jl,:) = t4ns(:,jl,:,2,2) |
---|
819 | END DO |
---|
820 | CASE ( 0 ) |
---|
821 | DO jl = 1, jprecj |
---|
822 | ptab1(:,jl ,:) = t4sn(:,jl,:,1,2) |
---|
823 | ptab1(:,ijhom+jl,:) = t4ns(:,jl,:,1,2) |
---|
824 | ptab2(:,jl ,:) = t4sn(:,jl,:,2,2) |
---|
825 | ptab2(:,ijhom+jl,:) = t4ns(:,jl,:,2,2) |
---|
826 | END DO |
---|
827 | CASE ( 1 ) |
---|
828 | DO jl = 1, jprecj |
---|
829 | ptab1(:,jl,:) = t4sn(:,jl,:,1,2) |
---|
830 | ptab2(:,jl,:) = t4sn(:,jl,:,2,2) |
---|
831 | END DO |
---|
832 | END SELECT |
---|
833 | |
---|
834 | |
---|
835 | ! 4. north fold treatment |
---|
836 | ! ----------------------- |
---|
837 | IF( npolj /= 0 ) THEN |
---|
838 | ! |
---|
839 | SELECT CASE ( jpni ) |
---|
840 | CASE ( 1 ) |
---|
841 | CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. |
---|
842 | CALL lbc_nfd ( ptab2, cd_type2, psgn ) |
---|
843 | CASE DEFAULT |
---|
844 | CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. |
---|
845 | CALL mpp_lbc_north (ptab2, cd_type2, psgn) |
---|
846 | END SELECT |
---|
847 | ! |
---|
848 | ENDIF |
---|
849 | ! |
---|
850 | END SUBROUTINE mpp_lnk_3d_gather |
---|
851 | |
---|
852 | |
---|
853 | SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn ) |
---|
854 | !!---------------------------------------------------------------------- |
---|
855 | !! *** routine mpp_lnk_2d_e *** |
---|
856 | !! |
---|
857 | !! ** Purpose : Message passing manadgement for 2d array (with halo) |
---|
858 | !! |
---|
859 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
860 | !! between processors following neighboring subdomains. |
---|
861 | !! domain parameters |
---|
862 | !! nlci : first dimension of the local subdomain |
---|
863 | !! nlcj : second dimension of the local subdomain |
---|
864 | !! jpr2di : number of rows for extra outer halo |
---|
865 | !! jpr2dj : number of columns for extra outer halo |
---|
866 | !! nbondi : mark for "east-west local boundary" |
---|
867 | !! nbondj : mark for "north-south local boundary" |
---|
868 | !! noea : number for local neighboring processors |
---|
869 | !! nowe : number for local neighboring processors |
---|
870 | !! noso : number for local neighboring processors |
---|
871 | !! nono : number for local neighboring processors |
---|
872 | !! |
---|
873 | !!---------------------------------------------------------------------- |
---|
874 | REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
875 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points |
---|
876 | ! ! = T , U , V , F , W and I points |
---|
877 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the |
---|
878 | !! ! north boundary, = 1. otherwise |
---|
879 | INTEGER :: jl ! dummy loop indices |
---|
880 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
881 | INTEGER :: ipreci, iprecj ! temporary integers |
---|
882 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
883 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
884 | !!---------------------------------------------------------------------- |
---|
885 | |
---|
886 | ipreci = jpreci + jpr2di ! take into account outer extra 2D overlap area |
---|
887 | iprecj = jprecj + jpr2dj |
---|
888 | |
---|
889 | |
---|
890 | ! 1. standard boundary treatment |
---|
891 | ! ------------------------------ |
---|
892 | ! Order matters Here !!!! |
---|
893 | ! |
---|
894 | ! !* North-South boundaries (always colsed) |
---|
895 | IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jpr2dj : jprecj ) = 0.e0 ! south except at F-point |
---|
896 | pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e0 ! north |
---|
897 | |
---|
898 | ! ! East-West boundaries |
---|
899 | ! !* Cyclic east-west |
---|
900 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
901 | pt2d(1-jpr2di: 1 ,:) = pt2d(jpim1-jpr2di: jpim1 ,:) ! east |
---|
902 | pt2d( jpi :jpi+jpr2di,:) = pt2d( 2 :2+jpr2di,:) ! west |
---|
903 | ! |
---|
904 | ELSE !* closed |
---|
905 | IF( .NOT. cd_type == 'F' ) pt2d( 1-jpr2di :jpreci ,:) = 0.e0 ! south except at F-point |
---|
906 | pt2d(nlci-jpreci+1:jpi+jpr2di,:) = 0.e0 ! north |
---|
907 | ENDIF |
---|
908 | ! |
---|
909 | |
---|
910 | ! north fold treatment |
---|
911 | ! ----------------------- |
---|
912 | IF( npolj /= 0 ) THEN |
---|
913 | ! |
---|
914 | SELECT CASE ( jpni ) |
---|
915 | CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jpr2dj), cd_type, psgn, pr2dj=jpr2dj ) |
---|
916 | CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) |
---|
917 | END SELECT |
---|
918 | ! |
---|
919 | ENDIF |
---|
920 | |
---|
921 | ! 2. East and west directions exchange |
---|
922 | ! ------------------------------------ |
---|
923 | ! we play with the neigbours AND the row number because of the periodicity |
---|
924 | ! |
---|
925 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
926 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
927 | iihom = nlci-nreci-jpr2di |
---|
928 | DO jl = 1, ipreci |
---|
929 | tr2ew(:,jl,1) = pt2d(jpreci+jl,:) |
---|
930 | tr2we(:,jl,1) = pt2d(iihom +jl,:) |
---|
931 | END DO |
---|
932 | END SELECT |
---|
933 | ! |
---|
934 | ! ! Migrations |
---|
935 | imigr = ipreci * ( jpj + 2*jpr2dj) |
---|
936 | ! |
---|
937 | SELECT CASE ( nbondi ) |
---|
938 | CASE ( -1 ) |
---|
939 | CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req1 ) |
---|
940 | CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr ) |
---|
941 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
942 | CASE ( 0 ) |
---|
943 | CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) |
---|
944 | CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req2 ) |
---|
945 | CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr ) |
---|
946 | CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr ) |
---|
947 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
948 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
949 | CASE ( 1 ) |
---|
950 | CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) |
---|
951 | CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr ) |
---|
952 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
953 | END SELECT |
---|
954 | ! |
---|
955 | ! ! Write Dirichlet lateral conditions |
---|
956 | iihom = nlci - jpreci |
---|
957 | ! |
---|
958 | SELECT CASE ( nbondi ) |
---|
959 | CASE ( -1 ) |
---|
960 | DO jl = 1, ipreci |
---|
961 | pt2d(iihom+jl,:) = tr2ew(:,jl,2) |
---|
962 | END DO |
---|
963 | CASE ( 0 ) |
---|
964 | DO jl = 1, ipreci |
---|
965 | pt2d(jl-jpr2di,:) = tr2we(:,jl,2) |
---|
966 | pt2d( iihom+jl,:) = tr2ew(:,jl,2) |
---|
967 | END DO |
---|
968 | CASE ( 1 ) |
---|
969 | DO jl = 1, ipreci |
---|
970 | pt2d(jl-jpr2di,:) = tr2we(:,jl,2) |
---|
971 | END DO |
---|
972 | END SELECT |
---|
973 | |
---|
974 | |
---|
975 | ! 3. North and south directions |
---|
976 | ! ----------------------------- |
---|
977 | ! always closed : we play only with the neigbours |
---|
978 | ! |
---|
979 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
980 | ijhom = nlcj-nrecj-jpr2dj |
---|
981 | DO jl = 1, iprecj |
---|
982 | tr2sn(:,jl,1) = pt2d(:,ijhom +jl) |
---|
983 | tr2ns(:,jl,1) = pt2d(:,jprecj+jl) |
---|
984 | END DO |
---|
985 | ENDIF |
---|
986 | ! |
---|
987 | ! ! Migrations |
---|
988 | imigr = iprecj * ( jpi + 2*jpr2di ) |
---|
989 | ! |
---|
990 | SELECT CASE ( nbondj ) |
---|
991 | CASE ( -1 ) |
---|
992 | CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req1 ) |
---|
993 | CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr ) |
---|
994 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
995 | CASE ( 0 ) |
---|
996 | CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 ) |
---|
997 | CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req2 ) |
---|
998 | CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr ) |
---|
999 | CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr ) |
---|
1000 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1001 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
1002 | CASE ( 1 ) |
---|
1003 | CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 ) |
---|
1004 | CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr ) |
---|
1005 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1006 | END SELECT |
---|
1007 | ! |
---|
1008 | ! ! Write Dirichlet lateral conditions |
---|
1009 | ijhom = nlcj - jprecj |
---|
1010 | ! |
---|
1011 | SELECT CASE ( nbondj ) |
---|
1012 | CASE ( -1 ) |
---|
1013 | DO jl = 1, iprecj |
---|
1014 | pt2d(:,ijhom+jl) = tr2ns(:,jl,2) |
---|
1015 | END DO |
---|
1016 | CASE ( 0 ) |
---|
1017 | DO jl = 1, iprecj |
---|
1018 | pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2) |
---|
1019 | pt2d(:,ijhom+jl ) = tr2ns(:,jl,2) |
---|
1020 | END DO |
---|
1021 | CASE ( 1 ) |
---|
1022 | DO jl = 1, iprecj |
---|
1023 | pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2) |
---|
1024 | END DO |
---|
1025 | END SELECT |
---|
1026 | |
---|
1027 | END SUBROUTINE mpp_lnk_2d_e |
---|
1028 | |
---|
1029 | |
---|
1030 | SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) |
---|
1031 | !!---------------------------------------------------------------------- |
---|
1032 | !! *** routine mppsend *** |
---|
1033 | !! |
---|
1034 | !! ** Purpose : Send messag passing array |
---|
1035 | !! |
---|
1036 | !!---------------------------------------------------------------------- |
---|
1037 | REAL(wp), INTENT(inout) :: pmess(*) ! array of real |
---|
1038 | INTEGER , INTENT(in ) :: kbytes ! size of the array pmess |
---|
1039 | INTEGER , INTENT(in ) :: kdest ! receive process number |
---|
1040 | INTEGER , INTENT(in ) :: ktyp ! tag of the message |
---|
1041 | INTEGER , INTENT(in ) :: md_req ! argument for isend |
---|
1042 | !! |
---|
1043 | INTEGER :: iflag |
---|
1044 | !!---------------------------------------------------------------------- |
---|
1045 | ! |
---|
1046 | SELECT CASE ( cn_mpi_send ) |
---|
1047 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
1048 | CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) |
---|
1049 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
1050 | CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) |
---|
1051 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
1052 | ! be carefull, one more argument here : the mpi request identifier.. |
---|
1053 | CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa, md_req, iflag ) |
---|
1054 | END SELECT |
---|
1055 | ! |
---|
1056 | END SUBROUTINE mppsend |
---|
1057 | |
---|
1058 | |
---|
1059 | SUBROUTINE mpprecv( ktyp, pmess, kbytes ) |
---|
1060 | !!---------------------------------------------------------------------- |
---|
1061 | !! *** routine mpprecv *** |
---|
1062 | !! |
---|
1063 | !! ** Purpose : Receive messag passing array |
---|
1064 | !! |
---|
1065 | !!---------------------------------------------------------------------- |
---|
1066 | REAL(wp), INTENT(inout) :: pmess(*) ! array of real |
---|
1067 | INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess |
---|
1068 | INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message |
---|
1069 | !! |
---|
1070 | INTEGER :: istatus(mpi_status_size) |
---|
1071 | INTEGER :: iflag |
---|
1072 | !!---------------------------------------------------------------------- |
---|
1073 | ! |
---|
1074 | CALL mpi_recv( pmess, kbytes, mpi_double_precision, mpi_any_source, ktyp, mpi_comm_opa, istatus, iflag ) |
---|
1075 | ! |
---|
1076 | END SUBROUTINE mpprecv |
---|
1077 | |
---|
1078 | |
---|
1079 | SUBROUTINE mppgather( ptab, kp, pio ) |
---|
1080 | !!---------------------------------------------------------------------- |
---|
1081 | !! *** routine mppgather *** |
---|
1082 | !! |
---|
1083 | !! ** Purpose : Transfert between a local subdomain array and a work |
---|
1084 | !! array which is distributed following the vertical level. |
---|
1085 | !! |
---|
1086 | !!---------------------------------------------------------------------- |
---|
1087 | REAL(wp), DIMENSION(jpi,jpj), INTENT(in ) :: ptab ! subdomain input array |
---|
1088 | INTEGER , INTENT(in ) :: kp ! record length |
---|
1089 | REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out) :: pio ! subdomain input array |
---|
1090 | !! |
---|
1091 | INTEGER :: itaille, ierror ! temporary integer |
---|
1092 | !!--------------------------------------------------------------------- |
---|
1093 | ! |
---|
1094 | itaille = jpi * jpj |
---|
1095 | CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & |
---|
1096 | & mpi_double_precision, kp , mpi_comm_opa, ierror ) |
---|
1097 | ! |
---|
1098 | END SUBROUTINE mppgather |
---|
1099 | |
---|
1100 | |
---|
1101 | SUBROUTINE mppscatter( pio, kp, ptab ) |
---|
1102 | !!---------------------------------------------------------------------- |
---|
1103 | !! *** routine mppscatter *** |
---|
1104 | !! |
---|
1105 | !! ** Purpose : Transfert between awork array which is distributed |
---|
1106 | !! following the vertical level and the local subdomain array. |
---|
1107 | !! |
---|
1108 | !!---------------------------------------------------------------------- |
---|
1109 | REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array |
---|
1110 | INTEGER :: kp ! Tag (not used with MPI |
---|
1111 | REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input |
---|
1112 | !! |
---|
1113 | INTEGER :: itaille, ierror ! temporary integer |
---|
1114 | !!--------------------------------------------------------------------- |
---|
1115 | ! |
---|
1116 | itaille=jpi*jpj |
---|
1117 | ! |
---|
1118 | CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & |
---|
1119 | & mpi_double_precision, kp , mpi_comm_opa, ierror ) |
---|
1120 | ! |
---|
1121 | END SUBROUTINE mppscatter |
---|
1122 | |
---|
1123 | |
---|
1124 | SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) |
---|
1125 | !!---------------------------------------------------------------------- |
---|
1126 | !! *** routine mppmax_a_int *** |
---|
1127 | !! |
---|
1128 | !! ** Purpose : Find maximum value in an integer layout array |
---|
1129 | !! |
---|
1130 | !!---------------------------------------------------------------------- |
---|
1131 | INTEGER , INTENT(in ) :: kdim ! size of array |
---|
1132 | INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array |
---|
1133 | INTEGER , INTENT(in ), OPTIONAL :: kcom ! |
---|
1134 | !! |
---|
1135 | INTEGER :: ierror, localcomm ! temporary integer |
---|
1136 | INTEGER, DIMENSION(kdim) :: iwork |
---|
1137 | !!---------------------------------------------------------------------- |
---|
1138 | ! |
---|
1139 | localcomm = mpi_comm_opa |
---|
1140 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1141 | ! |
---|
1142 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) |
---|
1143 | ! |
---|
1144 | ktab(:) = iwork(:) |
---|
1145 | ! |
---|
1146 | END SUBROUTINE mppmax_a_int |
---|
1147 | |
---|
1148 | |
---|
1149 | SUBROUTINE mppmax_int( ktab, kcom ) |
---|
1150 | !!---------------------------------------------------------------------- |
---|
1151 | !! *** routine mppmax_int *** |
---|
1152 | !! |
---|
1153 | !! ** Purpose : Find maximum value in an integer layout array |
---|
1154 | !! |
---|
1155 | !!---------------------------------------------------------------------- |
---|
1156 | INTEGER, INTENT(inout) :: ktab ! ??? |
---|
1157 | INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? |
---|
1158 | !! |
---|
1159 | INTEGER :: ierror, iwork, localcomm ! temporary integer |
---|
1160 | !!---------------------------------------------------------------------- |
---|
1161 | ! |
---|
1162 | localcomm = mpi_comm_opa |
---|
1163 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1164 | ! |
---|
1165 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror) |
---|
1166 | ! |
---|
1167 | ktab = iwork |
---|
1168 | ! |
---|
1169 | END SUBROUTINE mppmax_int |
---|
1170 | |
---|
1171 | |
---|
1172 | SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) |
---|
1173 | !!---------------------------------------------------------------------- |
---|
1174 | !! *** routine mppmin_a_int *** |
---|
1175 | !! |
---|
1176 | !! ** Purpose : Find minimum value in an integer layout array |
---|
1177 | !! |
---|
1178 | !!---------------------------------------------------------------------- |
---|
1179 | INTEGER , INTENT( in ) :: kdim ! size of array |
---|
1180 | INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array |
---|
1181 | INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array |
---|
1182 | !! |
---|
1183 | INTEGER :: ierror, localcomm ! temporary integer |
---|
1184 | INTEGER, DIMENSION(kdim) :: iwork |
---|
1185 | !!---------------------------------------------------------------------- |
---|
1186 | ! |
---|
1187 | localcomm = mpi_comm_opa |
---|
1188 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1189 | ! |
---|
1190 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) |
---|
1191 | ! |
---|
1192 | ktab(:) = iwork(:) |
---|
1193 | ! |
---|
1194 | END SUBROUTINE mppmin_a_int |
---|
1195 | |
---|
1196 | |
---|
1197 | SUBROUTINE mppmin_int( ktab, kcom ) |
---|
1198 | !!---------------------------------------------------------------------- |
---|
1199 | !! *** routine mppmin_int *** |
---|
1200 | !! |
---|
1201 | !! ** Purpose : Find minimum value in an integer layout array |
---|
1202 | !! |
---|
1203 | !!---------------------------------------------------------------------- |
---|
1204 | INTEGER, INTENT(inout) :: ktab ! ??? |
---|
1205 | INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array |
---|
1206 | !! |
---|
1207 | INTEGER :: ierror, iwork, localcomm |
---|
1208 | !!---------------------------------------------------------------------- |
---|
1209 | ! |
---|
1210 | localcomm = mpi_comm_opa |
---|
1211 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1212 | ! |
---|
1213 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror ) |
---|
1214 | ! |
---|
1215 | ktab = iwork |
---|
1216 | ! |
---|
1217 | END SUBROUTINE mppmin_int |
---|
1218 | |
---|
1219 | |
---|
1220 | SUBROUTINE mppsum_a_int( ktab, kdim ) |
---|
1221 | !!---------------------------------------------------------------------- |
---|
1222 | !! *** routine mppsum_a_int *** |
---|
1223 | !! |
---|
1224 | !! ** Purpose : Global integer sum, 1D array case |
---|
1225 | !! |
---|
1226 | !!---------------------------------------------------------------------- |
---|
1227 | INTEGER, INTENT(in ) :: kdim ! ??? |
---|
1228 | INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? |
---|
1229 | !! |
---|
1230 | INTEGER :: ierror |
---|
1231 | INTEGER, DIMENSION (kdim) :: iwork |
---|
1232 | !!---------------------------------------------------------------------- |
---|
1233 | ! |
---|
1234 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) |
---|
1235 | ! |
---|
1236 | ktab(:) = iwork(:) |
---|
1237 | ! |
---|
1238 | END SUBROUTINE mppsum_a_int |
---|
1239 | |
---|
1240 | |
---|
1241 | SUBROUTINE mppsum_int( ktab ) |
---|
1242 | !!---------------------------------------------------------------------- |
---|
1243 | !! *** routine mppsum_int *** |
---|
1244 | !! |
---|
1245 | !! ** Purpose : Global integer sum |
---|
1246 | !! |
---|
1247 | !!---------------------------------------------------------------------- |
---|
1248 | INTEGER, INTENT(inout) :: ktab |
---|
1249 | !! |
---|
1250 | INTEGER :: ierror, iwork |
---|
1251 | !!---------------------------------------------------------------------- |
---|
1252 | ! |
---|
1253 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) |
---|
1254 | ! |
---|
1255 | ktab = iwork |
---|
1256 | ! |
---|
1257 | END SUBROUTINE mppsum_int |
---|
1258 | |
---|
1259 | |
---|
1260 | SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) |
---|
1261 | !!---------------------------------------------------------------------- |
---|
1262 | !! *** routine mppmax_a_real *** |
---|
1263 | !! |
---|
1264 | !! ** Purpose : Maximum |
---|
1265 | !! |
---|
1266 | !!---------------------------------------------------------------------- |
---|
1267 | INTEGER , INTENT(in ) :: kdim |
---|
1268 | REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab |
---|
1269 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1270 | !! |
---|
1271 | INTEGER :: ierror, localcomm |
---|
1272 | REAL(wp), DIMENSION(kdim) :: zwork |
---|
1273 | !!---------------------------------------------------------------------- |
---|
1274 | ! |
---|
1275 | localcomm = mpi_comm_opa |
---|
1276 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1277 | ! |
---|
1278 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) |
---|
1279 | ptab(:) = zwork(:) |
---|
1280 | ! |
---|
1281 | END SUBROUTINE mppmax_a_real |
---|
1282 | |
---|
1283 | |
---|
1284 | SUBROUTINE mppmax_real( ptab, kcom ) |
---|
1285 | !!---------------------------------------------------------------------- |
---|
1286 | !! *** routine mppmax_real *** |
---|
1287 | !! |
---|
1288 | !! ** Purpose : Maximum |
---|
1289 | !! |
---|
1290 | !!---------------------------------------------------------------------- |
---|
1291 | REAL(wp), INTENT(inout) :: ptab ! ??? |
---|
1292 | INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? |
---|
1293 | !! |
---|
1294 | INTEGER :: ierror, localcomm |
---|
1295 | REAL(wp) :: zwork |
---|
1296 | !!---------------------------------------------------------------------- |
---|
1297 | ! |
---|
1298 | localcomm = mpi_comm_opa |
---|
1299 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1300 | ! |
---|
1301 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) |
---|
1302 | ptab = zwork |
---|
1303 | ! |
---|
1304 | END SUBROUTINE mppmax_real |
---|
1305 | |
---|
1306 | |
---|
1307 | SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) |
---|
1308 | !!---------------------------------------------------------------------- |
---|
1309 | !! *** routine mppmin_a_real *** |
---|
1310 | !! |
---|
1311 | !! ** Purpose : Minimum of REAL, array case |
---|
1312 | !! |
---|
1313 | !!----------------------------------------------------------------------- |
---|
1314 | INTEGER , INTENT(in ) :: kdim |
---|
1315 | REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab |
---|
1316 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1317 | !! |
---|
1318 | INTEGER :: ierror, localcomm |
---|
1319 | REAL(wp), DIMENSION(kdim) :: zwork |
---|
1320 | !!----------------------------------------------------------------------- |
---|
1321 | ! |
---|
1322 | localcomm = mpi_comm_opa |
---|
1323 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1324 | ! |
---|
1325 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) |
---|
1326 | ptab(:) = zwork(:) |
---|
1327 | ! |
---|
1328 | END SUBROUTINE mppmin_a_real |
---|
1329 | |
---|
1330 | |
---|
1331 | SUBROUTINE mppmin_real( ptab, kcom ) |
---|
1332 | !!---------------------------------------------------------------------- |
---|
1333 | !! *** routine mppmin_real *** |
---|
1334 | !! |
---|
1335 | !! ** Purpose : minimum of REAL, scalar case |
---|
1336 | !! |
---|
1337 | !!----------------------------------------------------------------------- |
---|
1338 | REAL(wp), INTENT(inout) :: ptab ! |
---|
1339 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1340 | !! |
---|
1341 | INTEGER :: ierror |
---|
1342 | REAL(wp) :: zwork |
---|
1343 | INTEGER :: localcomm |
---|
1344 | !!----------------------------------------------------------------------- |
---|
1345 | ! |
---|
1346 | localcomm = mpi_comm_opa |
---|
1347 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1348 | ! |
---|
1349 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) |
---|
1350 | ptab = zwork |
---|
1351 | ! |
---|
1352 | END SUBROUTINE mppmin_real |
---|
1353 | |
---|
1354 | |
---|
1355 | SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) |
---|
1356 | !!---------------------------------------------------------------------- |
---|
1357 | !! *** routine mppsum_a_real *** |
---|
1358 | !! |
---|
1359 | !! ** Purpose : global sum, REAL ARRAY argument case |
---|
1360 | !! |
---|
1361 | !!----------------------------------------------------------------------- |
---|
1362 | INTEGER , INTENT( in ) :: kdim ! size of ptab |
---|
1363 | REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array |
---|
1364 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
1365 | !! |
---|
1366 | INTEGER :: ierror ! temporary integer |
---|
1367 | INTEGER :: localcomm |
---|
1368 | REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace |
---|
1369 | !!----------------------------------------------------------------------- |
---|
1370 | ! |
---|
1371 | localcomm = mpi_comm_opa |
---|
1372 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1373 | ! |
---|
1374 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) |
---|
1375 | ptab(:) = zwork(:) |
---|
1376 | ! |
---|
1377 | END SUBROUTINE mppsum_a_real |
---|
1378 | |
---|
1379 | |
---|
1380 | SUBROUTINE mppsum_real( ptab, kcom ) |
---|
1381 | !!---------------------------------------------------------------------- |
---|
1382 | !! *** routine mppsum_real *** |
---|
1383 | !! |
---|
1384 | !! ** Purpose : global sum, SCALAR argument case |
---|
1385 | !! |
---|
1386 | !!----------------------------------------------------------------------- |
---|
1387 | REAL(wp), INTENT(inout) :: ptab ! input scalar |
---|
1388 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1389 | !! |
---|
1390 | INTEGER :: ierror, localcomm |
---|
1391 | REAL(wp) :: zwork |
---|
1392 | !!----------------------------------------------------------------------- |
---|
1393 | ! |
---|
1394 | localcomm = mpi_comm_opa |
---|
1395 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1396 | ! |
---|
1397 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) |
---|
1398 | ptab = zwork |
---|
1399 | ! |
---|
1400 | END SUBROUTINE mppsum_real |
---|
1401 | |
---|
1402 | |
---|
1403 | SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) |
---|
1404 | !!------------------------------------------------------------------------ |
---|
1405 | !! *** routine mpp_minloc *** |
---|
1406 | !! |
---|
1407 | !! ** Purpose : Compute the global minimum of an array ptab |
---|
1408 | !! and also give its global position |
---|
1409 | !! |
---|
1410 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1411 | !! |
---|
1412 | !!-------------------------------------------------------------------------- |
---|
1413 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array |
---|
1414 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask |
---|
1415 | REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab |
---|
1416 | INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame |
---|
1417 | !! |
---|
1418 | INTEGER , DIMENSION(2) :: ilocs |
---|
1419 | INTEGER :: ierror |
---|
1420 | REAL(wp) :: zmin ! local minimum |
---|
1421 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1422 | !!----------------------------------------------------------------------- |
---|
1423 | ! |
---|
1424 | zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1425 | ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1426 | ! |
---|
1427 | ki = ilocs(1) + nimpp - 1 |
---|
1428 | kj = ilocs(2) + njmpp - 1 |
---|
1429 | ! |
---|
1430 | zain(1,:)=zmin |
---|
1431 | zain(2,:)=ki+10000.*kj |
---|
1432 | ! |
---|
1433 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) |
---|
1434 | ! |
---|
1435 | pmin = zaout(1,1) |
---|
1436 | kj = INT(zaout(2,1)/10000.) |
---|
1437 | ki = INT(zaout(2,1) - 10000.*kj ) |
---|
1438 | ! |
---|
1439 | END SUBROUTINE mpp_minloc2d |
---|
1440 | |
---|
1441 | |
---|
1442 | SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) |
---|
1443 | !!------------------------------------------------------------------------ |
---|
1444 | !! *** routine mpp_minloc *** |
---|
1445 | !! |
---|
1446 | !! ** Purpose : Compute the global minimum of an array ptab |
---|
1447 | !! and also give its global position |
---|
1448 | !! |
---|
1449 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1450 | !! |
---|
1451 | !!-------------------------------------------------------------------------- |
---|
1452 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array |
---|
1453 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask |
---|
1454 | REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab |
---|
1455 | INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame |
---|
1456 | !! |
---|
1457 | INTEGER :: ierror |
---|
1458 | REAL(wp) :: zmin ! local minimum |
---|
1459 | INTEGER , DIMENSION(3) :: ilocs |
---|
1460 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1461 | !!----------------------------------------------------------------------- |
---|
1462 | ! |
---|
1463 | zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1464 | ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1465 | ! |
---|
1466 | ki = ilocs(1) + nimpp - 1 |
---|
1467 | kj = ilocs(2) + njmpp - 1 |
---|
1468 | kk = ilocs(3) |
---|
1469 | ! |
---|
1470 | zain(1,:)=zmin |
---|
1471 | zain(2,:)=ki+10000.*kj+100000000.*kk |
---|
1472 | ! |
---|
1473 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) |
---|
1474 | ! |
---|
1475 | pmin = zaout(1,1) |
---|
1476 | kk = INT( zaout(2,1) / 100000000. ) |
---|
1477 | kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 |
---|
1478 | ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) |
---|
1479 | ! |
---|
1480 | END SUBROUTINE mpp_minloc3d |
---|
1481 | |
---|
1482 | |
---|
1483 | SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) |
---|
1484 | !!------------------------------------------------------------------------ |
---|
1485 | !! *** routine mpp_maxloc *** |
---|
1486 | !! |
---|
1487 | !! ** Purpose : Compute the global maximum of an array ptab |
---|
1488 | !! and also give its global position |
---|
1489 | !! |
---|
1490 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1491 | !! |
---|
1492 | !!-------------------------------------------------------------------------- |
---|
1493 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array |
---|
1494 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask |
---|
1495 | REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab |
---|
1496 | INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame |
---|
1497 | !! |
---|
1498 | INTEGER :: ierror |
---|
1499 | INTEGER, DIMENSION (2) :: ilocs |
---|
1500 | REAL(wp) :: zmax ! local maximum |
---|
1501 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1502 | !!----------------------------------------------------------------------- |
---|
1503 | ! |
---|
1504 | zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1505 | ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1506 | ! |
---|
1507 | ki = ilocs(1) + nimpp - 1 |
---|
1508 | kj = ilocs(2) + njmpp - 1 |
---|
1509 | ! |
---|
1510 | zain(1,:) = zmax |
---|
1511 | zain(2,:) = ki + 10000. * kj |
---|
1512 | ! |
---|
1513 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) |
---|
1514 | ! |
---|
1515 | pmax = zaout(1,1) |
---|
1516 | kj = INT( zaout(2,1) / 10000. ) |
---|
1517 | ki = INT( zaout(2,1) - 10000.* kj ) |
---|
1518 | ! |
---|
1519 | END SUBROUTINE mpp_maxloc2d |
---|
1520 | |
---|
1521 | |
---|
1522 | SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) |
---|
1523 | !!------------------------------------------------------------------------ |
---|
1524 | !! *** routine mpp_maxloc *** |
---|
1525 | !! |
---|
1526 | !! ** Purpose : Compute the global maximum of an array ptab |
---|
1527 | !! and also give its global position |
---|
1528 | !! |
---|
1529 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1530 | !! |
---|
1531 | !!-------------------------------------------------------------------------- |
---|
1532 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array |
---|
1533 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask |
---|
1534 | REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab |
---|
1535 | INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame |
---|
1536 | !! |
---|
1537 | REAL(wp) :: zmax ! local maximum |
---|
1538 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1539 | INTEGER , DIMENSION(3) :: ilocs |
---|
1540 | INTEGER :: ierror |
---|
1541 | !!----------------------------------------------------------------------- |
---|
1542 | ! |
---|
1543 | zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1544 | ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1545 | ! |
---|
1546 | ki = ilocs(1) + nimpp - 1 |
---|
1547 | kj = ilocs(2) + njmpp - 1 |
---|
1548 | kk = ilocs(3) |
---|
1549 | ! |
---|
1550 | zain(1,:)=zmax |
---|
1551 | zain(2,:)=ki+10000.*kj+100000000.*kk |
---|
1552 | ! |
---|
1553 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) |
---|
1554 | ! |
---|
1555 | pmax = zaout(1,1) |
---|
1556 | kk = INT( zaout(2,1) / 100000000. ) |
---|
1557 | kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 |
---|
1558 | ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) |
---|
1559 | ! |
---|
1560 | END SUBROUTINE mpp_maxloc3d |
---|
1561 | |
---|
1562 | |
---|
1563 | SUBROUTINE mppsync() |
---|
1564 | !!---------------------------------------------------------------------- |
---|
1565 | !! *** routine mppsync *** |
---|
1566 | !! |
---|
1567 | !! ** Purpose : Massively parallel processors, synchroneous |
---|
1568 | !! |
---|
1569 | !!----------------------------------------------------------------------- |
---|
1570 | INTEGER :: ierror |
---|
1571 | !!----------------------------------------------------------------------- |
---|
1572 | ! |
---|
1573 | CALL mpi_barrier( mpi_comm_opa, ierror ) |
---|
1574 | ! |
---|
1575 | END SUBROUTINE mppsync |
---|
1576 | |
---|
1577 | |
---|
1578 | SUBROUTINE mppstop |
---|
1579 | !!---------------------------------------------------------------------- |
---|
1580 | !! *** routine mppstop *** |
---|
1581 | !! |
---|
1582 | !! ** purpose : Stop massilively parallel processors method |
---|
1583 | !! |
---|
1584 | !!---------------------------------------------------------------------- |
---|
1585 | INTEGER :: info |
---|
1586 | !!---------------------------------------------------------------------- |
---|
1587 | ! |
---|
1588 | CALL mppsync |
---|
1589 | CALL mpi_finalize( info ) |
---|
1590 | ! |
---|
1591 | END SUBROUTINE mppstop |
---|
1592 | |
---|
1593 | |
---|
1594 | SUBROUTINE mppobc( ptab, kd1, kd2, kl, kk, ktype, kij ) |
---|
1595 | !!---------------------------------------------------------------------- |
---|
1596 | !! *** routine mppobc *** |
---|
1597 | !! |
---|
1598 | !! ** Purpose : Message passing manadgement for open boundary |
---|
1599 | !! conditions array |
---|
1600 | !! |
---|
1601 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
1602 | !! between processors following neighboring subdomains. |
---|
1603 | !! domain parameters |
---|
1604 | !! nlci : first dimension of the local subdomain |
---|
1605 | !! nlcj : second dimension of the local subdomain |
---|
1606 | !! nbondi : mark for "east-west local boundary" |
---|
1607 | !! nbondj : mark for "north-south local boundary" |
---|
1608 | !! noea : number for local neighboring processors |
---|
1609 | !! nowe : number for local neighboring processors |
---|
1610 | !! noso : number for local neighboring processors |
---|
1611 | !! nono : number for local neighboring processors |
---|
1612 | !! |
---|
1613 | !!---------------------------------------------------------------------- |
---|
1614 | INTEGER , INTENT(in ) :: kd1, kd2 ! starting and ending indices |
---|
1615 | INTEGER , INTENT(in ) :: kl ! index of open boundary |
---|
1616 | INTEGER , INTENT(in ) :: kk ! vertical dimension |
---|
1617 | INTEGER , INTENT(in ) :: ktype ! define north/south or east/west cdt |
---|
1618 | ! ! = 1 north/south ; = 2 east/west |
---|
1619 | INTEGER , INTENT(in ) :: kij ! horizontal dimension |
---|
1620 | REAL(wp), INTENT(inout), DIMENSION(kij,kk) :: ptab ! variable array |
---|
1621 | !! |
---|
1622 | INTEGER :: ji, jj, jk, jl ! dummy loop indices |
---|
1623 | INTEGER :: iipt0, iipt1, ilpt1 ! temporary integers |
---|
1624 | INTEGER :: ijpt0, ijpt1 ! - - |
---|
1625 | INTEGER :: imigr, iihom, ijhom ! - - |
---|
1626 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
1627 | INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend |
---|
1628 | REAL(wp), DIMENSION(jpi,jpj) :: ztab ! temporary workspace |
---|
1629 | !!---------------------------------------------------------------------- |
---|
1630 | |
---|
1631 | ! boundary condition initialization |
---|
1632 | ! --------------------------------- |
---|
1633 | ztab(:,:) = 0.e0 |
---|
1634 | ! |
---|
1635 | IF( ktype==1 ) THEN ! north/south boundaries |
---|
1636 | iipt0 = MAX( 1, MIN(kd1 - nimpp+1, nlci ) ) |
---|
1637 | iipt1 = MAX( 0, MIN(kd2 - nimpp+1, nlci - 1 ) ) |
---|
1638 | ilpt1 = MAX( 1, MIN(kd2 - nimpp+1, nlci ) ) |
---|
1639 | ijpt0 = MAX( 1, MIN(kl - njmpp+1, nlcj ) ) |
---|
1640 | ijpt1 = MAX( 0, MIN(kl - njmpp+1, nlcj - 1 ) ) |
---|
1641 | ELSEIF( ktype==2 ) THEN ! east/west boundaries |
---|
1642 | iipt0 = MAX( 1, MIN(kl - nimpp+1, nlci ) ) |
---|
1643 | iipt1 = MAX( 0, MIN(kl - nimpp+1, nlci - 1 ) ) |
---|
1644 | ijpt0 = MAX( 1, MIN(kd1 - njmpp+1, nlcj ) ) |
---|
1645 | ijpt1 = MAX( 0, MIN(kd2 - njmpp+1, nlcj - 1 ) ) |
---|
1646 | ilpt1 = MAX( 1, MIN(kd2 - njmpp+1, nlcj ) ) |
---|
1647 | ELSE |
---|
1648 | CALL ctl_stop( 'mppobc: bad ktype' ) |
---|
1649 | ENDIF |
---|
1650 | |
---|
1651 | ! Communication level by level |
---|
1652 | ! ---------------------------- |
---|
1653 | !!gm Remark : this is very time consumming!!! |
---|
1654 | ! ! ------------------------ ! |
---|
1655 | DO jk = 1, kk ! Loop over the levels ! |
---|
1656 | ! ! ------------------------ ! |
---|
1657 | ! |
---|
1658 | IF( ktype == 1 ) THEN ! north/south boundaries |
---|
1659 | DO jj = ijpt0, ijpt1 |
---|
1660 | DO ji = iipt0, iipt1 |
---|
1661 | ztab(ji,jj) = ptab(ji,jk) |
---|
1662 | END DO |
---|
1663 | END DO |
---|
1664 | ELSEIF( ktype == 2 ) THEN ! east/west boundaries |
---|
1665 | DO jj = ijpt0, ijpt1 |
---|
1666 | DO ji = iipt0, iipt1 |
---|
1667 | ztab(ji,jj) = ptab(jj,jk) |
---|
1668 | END DO |
---|
1669 | END DO |
---|
1670 | ENDIF |
---|
1671 | |
---|
1672 | |
---|
1673 | ! 1. East and west directions |
---|
1674 | ! --------------------------- |
---|
1675 | ! |
---|
1676 | IF( nbondi /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
1677 | iihom = nlci-nreci |
---|
1678 | DO jl = 1, jpreci |
---|
1679 | t2ew(:,jl,1) = ztab(jpreci+jl,:) |
---|
1680 | t2we(:,jl,1) = ztab(iihom +jl,:) |
---|
1681 | END DO |
---|
1682 | ENDIF |
---|
1683 | ! |
---|
1684 | ! ! Migrations |
---|
1685 | imigr=jpreci*jpj |
---|
1686 | ! |
---|
1687 | IF( nbondi == -1 ) THEN |
---|
1688 | CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) |
---|
1689 | CALL mpprecv( 1, t2ew(1,1,2), imigr ) |
---|
1690 | IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1691 | ELSEIF( nbondi == 0 ) THEN |
---|
1692 | CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
1693 | CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) |
---|
1694 | CALL mpprecv( 1, t2ew(1,1,2), imigr ) |
---|
1695 | CALL mpprecv( 2, t2we(1,1,2), imigr ) |
---|
1696 | IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1697 | IF(l_isend) CALL mpi_wait( ml_req2, ml_stat, ml_err ) |
---|
1698 | ELSEIF( nbondi == 1 ) THEN |
---|
1699 | CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
1700 | CALL mpprecv( 2, t2we(1,1,2), imigr ) |
---|
1701 | IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1702 | ENDIF |
---|
1703 | ! |
---|
1704 | ! ! Write Dirichlet lateral conditions |
---|
1705 | iihom = nlci-jpreci |
---|
1706 | ! |
---|
1707 | IF( nbondi == 0 .OR. nbondi == 1 ) THEN |
---|
1708 | DO jl = 1, jpreci |
---|
1709 | ztab(jl,:) = t2we(:,jl,2) |
---|
1710 | END DO |
---|
1711 | ENDIF |
---|
1712 | IF( nbondi == -1 .OR. nbondi == 0 ) THEN |
---|
1713 | DO jl = 1, jpreci |
---|
1714 | ztab(iihom+jl,:) = t2ew(:,jl,2) |
---|
1715 | END DO |
---|
1716 | ENDIF |
---|
1717 | |
---|
1718 | |
---|
1719 | ! 2. North and south directions |
---|
1720 | ! ----------------------------- |
---|
1721 | ! |
---|
1722 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
1723 | ijhom = nlcj-nrecj |
---|
1724 | DO jl = 1, jprecj |
---|
1725 | t2sn(:,jl,1) = ztab(:,ijhom +jl) |
---|
1726 | t2ns(:,jl,1) = ztab(:,jprecj+jl) |
---|
1727 | END DO |
---|
1728 | ENDIF |
---|
1729 | ! |
---|
1730 | ! ! Migrations |
---|
1731 | imigr = jprecj * jpi |
---|
1732 | ! |
---|
1733 | IF( nbondj == -1 ) THEN |
---|
1734 | CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 ) |
---|
1735 | CALL mpprecv( 3, t2ns(1,1,2), imigr ) |
---|
1736 | IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1737 | ELSEIF( nbondj == 0 ) THEN |
---|
1738 | CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
1739 | CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 ) |
---|
1740 | CALL mpprecv( 3, t2ns(1,1,2), imigr ) |
---|
1741 | CALL mpprecv( 4, t2sn(1,1,2), imigr ) |
---|
1742 | IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1743 | IF( l_isend ) CALL mpi_wait( ml_req2, ml_stat, ml_err ) |
---|
1744 | ELSEIF( nbondj == 1 ) THEN |
---|
1745 | CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
1746 | CALL mpprecv( 4, t2sn(1,1,2), imigr) |
---|
1747 | IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) |
---|
1748 | ENDIF |
---|
1749 | ! |
---|
1750 | ! ! Write Dirichlet lateral conditions |
---|
1751 | ijhom = nlcj - jprecj |
---|
1752 | IF( nbondj == 0 .OR. nbondj == 1 ) THEN |
---|
1753 | DO jl = 1, jprecj |
---|
1754 | ztab(:,jl) = t2sn(:,jl,2) |
---|
1755 | END DO |
---|
1756 | ENDIF |
---|
1757 | IF( nbondj == 0 .OR. nbondj == -1 ) THEN |
---|
1758 | DO jl = 1, jprecj |
---|
1759 | ztab(:,ijhom+jl) = t2ns(:,jl,2) |
---|
1760 | END DO |
---|
1761 | ENDIF |
---|
1762 | IF( ktype==1 .AND. kd1 <= jpi+nimpp-1 .AND. nimpp <= kd2 ) THEN |
---|
1763 | DO jj = ijpt0, ijpt1 ! north/south boundaries |
---|
1764 | DO ji = iipt0,ilpt1 |
---|
1765 | ptab(ji,jk) = ztab(ji,jj) |
---|
1766 | END DO |
---|
1767 | END DO |
---|
1768 | ELSEIF( ktype==2 .AND. kd1 <= jpj+njmpp-1 .AND. njmpp <= kd2 ) THEN |
---|
1769 | DO jj = ijpt0, ilpt1 ! east/west boundaries |
---|
1770 | DO ji = iipt0,iipt1 |
---|
1771 | ptab(jj,jk) = ztab(ji,jj) |
---|
1772 | END DO |
---|
1773 | END DO |
---|
1774 | ENDIF |
---|
1775 | ! |
---|
1776 | END DO |
---|
1777 | ! |
---|
1778 | END SUBROUTINE mppobc |
---|
1779 | |
---|
1780 | |
---|
1781 | SUBROUTINE mpp_comm_free( kcom ) |
---|
1782 | !!---------------------------------------------------------------------- |
---|
1783 | !!---------------------------------------------------------------------- |
---|
1784 | INTEGER, INTENT(in) :: kcom |
---|
1785 | !! |
---|
1786 | INTEGER :: ierr |
---|
1787 | !!---------------------------------------------------------------------- |
---|
1788 | ! |
---|
1789 | CALL MPI_COMM_FREE(kcom, ierr) |
---|
1790 | ! |
---|
1791 | END SUBROUTINE mpp_comm_free |
---|
1792 | |
---|
1793 | |
---|
1794 | SUBROUTINE mpp_ini_ice( pindic ) |
---|
1795 | !!---------------------------------------------------------------------- |
---|
1796 | !! *** routine mpp_ini_ice *** |
---|
1797 | !! |
---|
1798 | !! ** Purpose : Initialize special communicator for ice areas |
---|
1799 | !! condition together with global variables needed in the ddmpp folding |
---|
1800 | !! |
---|
1801 | !! ** Method : - Look for ice processors in ice routines |
---|
1802 | !! - Put their number in nrank_ice |
---|
1803 | !! - Create groups for the world processors and the ice processors |
---|
1804 | !! - Create a communicator for ice processors |
---|
1805 | !! |
---|
1806 | !! ** output |
---|
1807 | !! njmppmax = njmpp for northern procs |
---|
1808 | !! ndim_rank_ice = number of processors with ice |
---|
1809 | !! nrank_ice (ndim_rank_ice) = ice processors |
---|
1810 | !! ngrp_world = group ID for the world processors |
---|
1811 | !! ngrp_ice = group ID for the ice processors |
---|
1812 | !! ncomm_ice = communicator for the ice procs. |
---|
1813 | !! n_ice_root = number (in the world) of proc 0 in the ice comm. |
---|
1814 | !! |
---|
1815 | !!---------------------------------------------------------------------- |
---|
1816 | INTEGER, INTENT(in) :: pindic |
---|
1817 | !! |
---|
1818 | INTEGER :: ierr |
---|
1819 | INTEGER :: jjproc |
---|
1820 | INTEGER :: ii |
---|
1821 | INTEGER, DIMENSION(jpnij) :: kice |
---|
1822 | INTEGER, DIMENSION(jpnij) :: zwork |
---|
1823 | !!---------------------------------------------------------------------- |
---|
1824 | ! |
---|
1825 | ! Look for how many procs with sea-ice |
---|
1826 | ! |
---|
1827 | kice = 0 |
---|
1828 | DO jjproc = 1, jpnij |
---|
1829 | IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1 |
---|
1830 | END DO |
---|
1831 | ! |
---|
1832 | zwork = 0 |
---|
1833 | CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr ) |
---|
1834 | ndim_rank_ice = SUM( zwork ) |
---|
1835 | |
---|
1836 | ! Allocate the right size to nrank_north |
---|
1837 | #if ! defined key_agrif |
---|
1838 | IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice ) |
---|
1839 | #else |
---|
1840 | IF( ASSOCIATED( nrank_ice ) ) DEALLOCATE( nrank_ice ) |
---|
1841 | #endif |
---|
1842 | ALLOCATE( nrank_ice(ndim_rank_ice) ) |
---|
1843 | ! |
---|
1844 | ii = 0 |
---|
1845 | nrank_ice = 0 |
---|
1846 | DO jjproc = 1, jpnij |
---|
1847 | IF( zwork(jjproc) == 1) THEN |
---|
1848 | ii = ii + 1 |
---|
1849 | nrank_ice(ii) = jjproc -1 |
---|
1850 | ENDIF |
---|
1851 | END DO |
---|
1852 | |
---|
1853 | ! Create the world group |
---|
1854 | CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr ) |
---|
1855 | |
---|
1856 | ! Create the ice group from the world group |
---|
1857 | CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_ice, nrank_ice, ngrp_ice, ierr ) |
---|
1858 | |
---|
1859 | ! Create the ice communicator , ie the pool of procs with sea-ice |
---|
1860 | CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr ) |
---|
1861 | |
---|
1862 | ! Find proc number in the world of proc 0 in the north |
---|
1863 | ! The following line seems to be useless, we just comment & keep it as reminder |
---|
1864 | ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_world,n_ice_root,ierr) |
---|
1865 | ! |
---|
1866 | END SUBROUTINE mpp_ini_ice |
---|
1867 | |
---|
1868 | |
---|
1869 | SUBROUTINE mpp_ini_znl |
---|
1870 | !!---------------------------------------------------------------------- |
---|
1871 | !! *** routine mpp_ini_znl *** |
---|
1872 | !! |
---|
1873 | !! ** Purpose : Initialize special communicator for computing zonal sum |
---|
1874 | !! |
---|
1875 | !! ** Method : - Look for processors in the same row |
---|
1876 | !! - Put their number in nrank_znl |
---|
1877 | !! - Create group for the znl processors |
---|
1878 | !! - Create a communicator for znl processors |
---|
1879 | !! - Determine if processor should write znl files |
---|
1880 | !! |
---|
1881 | !! ** output |
---|
1882 | !! ndim_rank_znl = number of processors on the same row |
---|
1883 | !! ngrp_znl = group ID for the znl processors |
---|
1884 | !! ncomm_znl = communicator for the ice procs. |
---|
1885 | !! n_znl_root = number (in the world) of proc 0 in the ice comm. |
---|
1886 | !! |
---|
1887 | !!---------------------------------------------------------------------- |
---|
1888 | INTEGER :: ierr |
---|
1889 | INTEGER :: jproc |
---|
1890 | INTEGER :: ii |
---|
1891 | INTEGER, DIMENSION(jpnij) :: kwork |
---|
1892 | ! |
---|
1893 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world |
---|
1894 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world |
---|
1895 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_opa : ', mpi_comm_opa |
---|
1896 | ! |
---|
1897 | IF ( jpnj == 1 ) THEN |
---|
1898 | ngrp_znl = ngrp_world |
---|
1899 | ncomm_znl = mpi_comm_opa |
---|
1900 | ELSE |
---|
1901 | ! |
---|
1902 | CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_opa, ierr ) |
---|
1903 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork |
---|
1904 | !-$$ CALL flush(numout) |
---|
1905 | ! |
---|
1906 | ! Count number of processors on the same row |
---|
1907 | ndim_rank_znl = 0 |
---|
1908 | DO jproc=1,jpnij |
---|
1909 | IF ( kwork(jproc) == njmpp ) THEN |
---|
1910 | ndim_rank_znl = ndim_rank_znl + 1 |
---|
1911 | ENDIF |
---|
1912 | END DO |
---|
1913 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ndim_rank_znl : ', ndim_rank_znl |
---|
1914 | !-$$ CALL flush(numout) |
---|
1915 | ! Allocate the right size to nrank_znl |
---|
1916 | #if ! defined key_agrif |
---|
1917 | IF (ALLOCATED (nrank_znl)) DEALLOCATE(nrank_znl) |
---|
1918 | #else |
---|
1919 | IF (ASSOCIATED(nrank_znl)) DEALLOCATE(nrank_znl) |
---|
1920 | #endif |
---|
1921 | ALLOCATE(nrank_znl(ndim_rank_znl)) |
---|
1922 | ii = 0 |
---|
1923 | nrank_znl (:) = 0 |
---|
1924 | DO jproc=1,jpnij |
---|
1925 | IF ( kwork(jproc) == njmpp) THEN |
---|
1926 | ii = ii + 1 |
---|
1927 | nrank_znl(ii) = jproc -1 |
---|
1928 | ENDIF |
---|
1929 | END DO |
---|
1930 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - nrank_znl : ', nrank_znl |
---|
1931 | !-$$ CALL flush(numout) |
---|
1932 | |
---|
1933 | ! Create the opa group |
---|
1934 | CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_opa,ierr) |
---|
1935 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa |
---|
1936 | !-$$ CALL flush(numout) |
---|
1937 | |
---|
1938 | ! Create the znl group from the opa group |
---|
1939 | CALL MPI_GROUP_INCL ( ngrp_opa, ndim_rank_znl, nrank_znl, ngrp_znl, ierr ) |
---|
1940 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_znl ', ngrp_znl |
---|
1941 | !-$$ CALL flush(numout) |
---|
1942 | |
---|
1943 | ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row |
---|
1944 | CALL MPI_COMM_CREATE ( mpi_comm_opa, ngrp_znl, ncomm_znl, ierr ) |
---|
1945 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl |
---|
1946 | !-$$ CALL flush(numout) |
---|
1947 | ! |
---|
1948 | END IF |
---|
1949 | |
---|
1950 | ! Determines if processor if the first (starting from i=1) on the row |
---|
1951 | IF ( jpni == 1 ) THEN |
---|
1952 | l_znl_root = .TRUE. |
---|
1953 | ELSE |
---|
1954 | l_znl_root = .FALSE. |
---|
1955 | kwork (1) = nimpp |
---|
1956 | CALL mpp_min ( kwork(1), kcom = ncomm_znl) |
---|
1957 | IF ( nimpp == kwork(1)) l_znl_root = .TRUE. |
---|
1958 | END IF |
---|
1959 | |
---|
1960 | END SUBROUTINE mpp_ini_znl |
---|
1961 | |
---|
1962 | |
---|
1963 | SUBROUTINE mpp_ini_north |
---|
1964 | !!---------------------------------------------------------------------- |
---|
1965 | !! *** routine mpp_ini_north *** |
---|
1966 | !! |
---|
1967 | !! ** Purpose : Initialize special communicator for north folding |
---|
1968 | !! condition together with global variables needed in the mpp folding |
---|
1969 | !! |
---|
1970 | !! ** Method : - Look for northern processors |
---|
1971 | !! - Put their number in nrank_north |
---|
1972 | !! - Create groups for the world processors and the north processors |
---|
1973 | !! - Create a communicator for northern processors |
---|
1974 | !! |
---|
1975 | !! ** output |
---|
1976 | !! njmppmax = njmpp for northern procs |
---|
1977 | !! ndim_rank_north = number of processors in the northern line |
---|
1978 | !! nrank_north (ndim_rank_north) = number of the northern procs. |
---|
1979 | !! ngrp_world = group ID for the world processors |
---|
1980 | !! ngrp_north = group ID for the northern processors |
---|
1981 | !! ncomm_north = communicator for the northern procs. |
---|
1982 | !! north_root = number (in the world) of proc 0 in the northern comm. |
---|
1983 | !! |
---|
1984 | !!---------------------------------------------------------------------- |
---|
1985 | INTEGER :: ierr |
---|
1986 | INTEGER :: jjproc |
---|
1987 | INTEGER :: ii, ji |
---|
1988 | !!---------------------------------------------------------------------- |
---|
1989 | ! |
---|
1990 | njmppmax = MAXVAL( njmppt ) |
---|
1991 | ! |
---|
1992 | ! Look for how many procs on the northern boundary |
---|
1993 | ndim_rank_north = 0 |
---|
1994 | DO jjproc = 1, jpnij |
---|
1995 | IF( njmppt(jjproc) == njmppmax ) ndim_rank_north = ndim_rank_north + 1 |
---|
1996 | END DO |
---|
1997 | ! |
---|
1998 | ! Allocate the right size to nrank_north |
---|
1999 | #if ! defined key_agrif |
---|
2000 | IF (ALLOCATED (nrank_north)) DEALLOCATE(nrank_north) |
---|
2001 | #else |
---|
2002 | IF (ASSOCIATED(nrank_north)) DEALLOCATE(nrank_north) |
---|
2003 | #endif |
---|
2004 | ALLOCATE( nrank_north(ndim_rank_north) ) |
---|
2005 | |
---|
2006 | ! Fill the nrank_north array with proc. number of northern procs. |
---|
2007 | ! Note : the rank start at 0 in MPI |
---|
2008 | ii = 0 |
---|
2009 | DO ji = 1, jpnij |
---|
2010 | IF ( njmppt(ji) == njmppmax ) THEN |
---|
2011 | ii=ii+1 |
---|
2012 | nrank_north(ii)=ji-1 |
---|
2013 | END IF |
---|
2014 | END DO |
---|
2015 | ! |
---|
2016 | ! create the world group |
---|
2017 | CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr ) |
---|
2018 | ! |
---|
2019 | ! Create the North group from the world group |
---|
2020 | CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_north, nrank_north, ngrp_north, ierr ) |
---|
2021 | ! |
---|
2022 | ! Create the North communicator , ie the pool of procs in the north group |
---|
2023 | CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_north, ncomm_north, ierr ) |
---|
2024 | ! |
---|
2025 | END SUBROUTINE mpp_ini_north |
---|
2026 | |
---|
2027 | |
---|
2028 | SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn ) |
---|
2029 | !!--------------------------------------------------------------------- |
---|
2030 | !! *** routine mpp_lbc_north_3d *** |
---|
2031 | !! |
---|
2032 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2033 | !! in mpp configuration in case of jpn1 > 1 |
---|
2034 | !! |
---|
2035 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2036 | !! in i-direction require a specific treatment. We gather |
---|
2037 | !! the 4 northern lines of the global domain on 1 processor |
---|
2038 | !! and apply lbc north-fold on this sub array. Then we |
---|
2039 | !! scatter the north fold array back to the processors. |
---|
2040 | !! |
---|
2041 | !!---------------------------------------------------------------------- |
---|
2042 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied |
---|
2043 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
2044 | ! ! = T , U , V , F or W gridpoints |
---|
2045 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold |
---|
2046 | !! ! = 1. , the sign is kept |
---|
2047 | INTEGER :: ji, jj, jr |
---|
2048 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2049 | INTEGER :: ijpj, ijpjm1, ij, iproc |
---|
2050 | REAL(wp), DIMENSION(jpiglo,4,jpk) :: ztab |
---|
2051 | REAL(wp), DIMENSION(jpi ,4,jpk) :: znorthloc |
---|
2052 | REAL(wp), DIMENSION(jpi ,4,jpk,jpni) :: znorthgloio |
---|
2053 | !!---------------------------------------------------------------------- |
---|
2054 | ! |
---|
2055 | ijpj = 4 |
---|
2056 | ijpjm1 = 3 |
---|
2057 | ! |
---|
2058 | DO jj = nlcj - ijpj +1, nlcj ! put in znorthloc the last 4 jlines of pt3d |
---|
2059 | ij = jj - nlcj + ijpj |
---|
2060 | znorthloc(:,ij,:) = pt3d(:,jj,:) |
---|
2061 | END DO |
---|
2062 | ! |
---|
2063 | ! ! Build in procs of ncomm_north the znorthgloio |
---|
2064 | itaille = jpi * jpk * ijpj |
---|
2065 | CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & |
---|
2066 | & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2067 | ! |
---|
2068 | ! ! recover the global north array |
---|
2069 | DO jr = 1, ndim_rank_north |
---|
2070 | iproc = nrank_north(jr) + 1 |
---|
2071 | ildi = nldit (iproc) |
---|
2072 | ilei = nleit (iproc) |
---|
2073 | iilb = nimppt(iproc) |
---|
2074 | DO jj = 1, 4 |
---|
2075 | DO ji = ildi, ilei |
---|
2076 | ztab(ji+iilb-1,jj,:) = znorthgloio(ji,jj,:,jr) |
---|
2077 | END DO |
---|
2078 | END DO |
---|
2079 | END DO |
---|
2080 | ! |
---|
2081 | CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition |
---|
2082 | ! |
---|
2083 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d |
---|
2084 | ij = jj - nlcj + ijpj |
---|
2085 | DO ji= 1, nlci |
---|
2086 | pt3d(ji,jj,:) = ztab(ji+nimpp-1,ij,:) |
---|
2087 | END DO |
---|
2088 | END DO |
---|
2089 | ! |
---|
2090 | END SUBROUTINE mpp_lbc_north_3d |
---|
2091 | |
---|
2092 | |
---|
2093 | SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn) |
---|
2094 | !!--------------------------------------------------------------------- |
---|
2095 | !! *** routine mpp_lbc_north_2d *** |
---|
2096 | !! |
---|
2097 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2098 | !! in mpp configuration in case of jpn1 > 1 (for 2d array ) |
---|
2099 | !! |
---|
2100 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2101 | !! in i-direction require a specific treatment. We gather |
---|
2102 | !! the 4 northern lines of the global domain on 1 processor |
---|
2103 | !! and apply lbc north-fold on this sub array. Then we |
---|
2104 | !! scatter the north fold array back to the processors. |
---|
2105 | !! |
---|
2106 | !!---------------------------------------------------------------------- |
---|
2107 | REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 3D array on which the b.c. is applied |
---|
2108 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
2109 | ! ! = T , U , V , F or W gridpoints |
---|
2110 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold |
---|
2111 | !! ! = 1. , the sign is kept |
---|
2112 | INTEGER :: ji, jj, jr |
---|
2113 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2114 | INTEGER :: ijpj, ijpjm1, ij, iproc |
---|
2115 | REAL(wp), DIMENSION(jpiglo,4) :: ztab |
---|
2116 | REAL(wp), DIMENSION(jpi ,4) :: znorthloc |
---|
2117 | REAL(wp), DIMENSION(jpi ,4,jpni) :: znorthgloio |
---|
2118 | !!---------------------------------------------------------------------- |
---|
2119 | ! |
---|
2120 | ijpj = 4 |
---|
2121 | ijpjm1 = 3 |
---|
2122 | ! |
---|
2123 | DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d |
---|
2124 | ij = jj - nlcj + ijpj |
---|
2125 | znorthloc(:,ij) = pt2d(:,jj) |
---|
2126 | END DO |
---|
2127 | |
---|
2128 | ! ! Build in procs of ncomm_north the znorthgloio |
---|
2129 | itaille = jpi * ijpj |
---|
2130 | CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & |
---|
2131 | & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2132 | ! |
---|
2133 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2134 | iproc = nrank_north(jr) + 1 |
---|
2135 | ildi=nldit (iproc) |
---|
2136 | ilei=nleit (iproc) |
---|
2137 | iilb=nimppt(iproc) |
---|
2138 | DO jj = 1, 4 |
---|
2139 | DO ji = ildi, ilei |
---|
2140 | ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) |
---|
2141 | END DO |
---|
2142 | END DO |
---|
2143 | END DO |
---|
2144 | ! |
---|
2145 | CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition |
---|
2146 | ! |
---|
2147 | ! |
---|
2148 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d |
---|
2149 | ij = jj - nlcj + ijpj |
---|
2150 | DO ji = 1, nlci |
---|
2151 | pt2d(ji,jj) = ztab(ji+nimpp-1,ij) |
---|
2152 | END DO |
---|
2153 | END DO |
---|
2154 | ! |
---|
2155 | END SUBROUTINE mpp_lbc_north_2d |
---|
2156 | |
---|
2157 | |
---|
2158 | SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) |
---|
2159 | !!--------------------------------------------------------------------- |
---|
2160 | !! *** routine mpp_lbc_north_2d *** |
---|
2161 | !! |
---|
2162 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2163 | !! in mpp configuration in case of jpn1 > 1 and for 2d |
---|
2164 | !! array with outer extra halo |
---|
2165 | !! |
---|
2166 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2167 | !! in i-direction require a specific treatment. We gather |
---|
2168 | !! the 4+2*jpr2dj northern lines of the global domain on 1 |
---|
2169 | !! processor and apply lbc north-fold on this sub array. |
---|
2170 | !! Then we scatter the north fold array back to the processors. |
---|
2171 | !! |
---|
2172 | !!---------------------------------------------------------------------- |
---|
2173 | REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
2174 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
2175 | ! ! = T , U , V , F or W -points |
---|
2176 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the |
---|
2177 | !! ! north fold, = 1. otherwise |
---|
2178 | INTEGER :: ji, jj, jr |
---|
2179 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2180 | INTEGER :: ijpj, ij, iproc |
---|
2181 | REAL(wp), DIMENSION(jpiglo,4+2*jpr2dj) :: ztab |
---|
2182 | REAL(wp), DIMENSION(jpi ,4+2*jpr2dj) :: znorthloc |
---|
2183 | REAL(wp), DIMENSION(jpi ,4+2*jpr2dj,jpni) :: znorthgloio |
---|
2184 | !!---------------------------------------------------------------------- |
---|
2185 | ! |
---|
2186 | ijpj=4 |
---|
2187 | |
---|
2188 | ij=0 |
---|
2189 | ! put in znorthloc the last 4 jlines of pt2d |
---|
2190 | DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj |
---|
2191 | ij = ij + 1 |
---|
2192 | DO ji = 1, jpi |
---|
2193 | znorthloc(ji,ij)=pt2d(ji,jj) |
---|
2194 | END DO |
---|
2195 | END DO |
---|
2196 | ! |
---|
2197 | itaille = jpi * ( ijpj + 2 * jpr2dj ) |
---|
2198 | CALL MPI_ALLGATHER( znorthloc(1,1) , itaille, MPI_DOUBLE_PRECISION, & |
---|
2199 | & znorthgloio(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2200 | ! |
---|
2201 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2202 | iproc = nrank_north(jr) + 1 |
---|
2203 | ildi = nldit (iproc) |
---|
2204 | ilei = nleit (iproc) |
---|
2205 | iilb = nimppt(iproc) |
---|
2206 | DO jj = 1, ijpj+2*jpr2dj |
---|
2207 | DO ji = ildi, ilei |
---|
2208 | ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) |
---|
2209 | END DO |
---|
2210 | END DO |
---|
2211 | END DO |
---|
2212 | |
---|
2213 | |
---|
2214 | ! 2. North-Fold boundary conditions |
---|
2215 | ! ---------------------------------- |
---|
2216 | CALL lbc_nfd( ztab(:,:), cd_type, psgn, pr2dj = jpr2dj ) |
---|
2217 | |
---|
2218 | ij = jpr2dj |
---|
2219 | !! Scatter back to pt2d |
---|
2220 | DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj |
---|
2221 | ij = ij +1 |
---|
2222 | DO ji= 1, nlci |
---|
2223 | pt2d(ji,jj) = ztab(ji+nimpp-1,ij) |
---|
2224 | END DO |
---|
2225 | END DO |
---|
2226 | ! |
---|
2227 | END SUBROUTINE mpp_lbc_north_e |
---|
2228 | |
---|
2229 | |
---|
2230 | SUBROUTINE mpi_init_opa( code ) |
---|
2231 | !!--------------------------------------------------------------------- |
---|
2232 | !! *** routine mpp_init.opa *** |
---|
2233 | !! |
---|
2234 | !! ** Purpose :: export and attach a MPI buffer for bsend |
---|
2235 | !! |
---|
2236 | !! ** Method :: define buffer size in namelist, if 0 no buffer attachment |
---|
2237 | !! but classical mpi_init |
---|
2238 | !! |
---|
2239 | !! History :: 01/11 :: IDRIS initial version for IBM only |
---|
2240 | !! 08/04 :: R. Benshila, generalisation |
---|
2241 | !!--------------------------------------------------------------------- |
---|
2242 | INTEGER :: code, ierr |
---|
2243 | LOGICAL :: mpi_was_called |
---|
2244 | !!--------------------------------------------------------------------- |
---|
2245 | ! |
---|
2246 | CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization |
---|
2247 | IF ( code /= MPI_SUCCESS ) THEN |
---|
2248 | CALL ctl_stop( ' lib_mpp: Error in routine mpi_initialized' ) |
---|
2249 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
2250 | ENDIF |
---|
2251 | ! |
---|
2252 | IF( .NOT. mpi_was_called ) THEN |
---|
2253 | CALL mpi_init( code ) |
---|
2254 | CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code ) |
---|
2255 | IF ( code /= MPI_SUCCESS ) THEN |
---|
2256 | CALL ctl_stop( ' lib_mpp: Error in routine mpi_comm_dup' ) |
---|
2257 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
2258 | ENDIF |
---|
2259 | ENDIF |
---|
2260 | ! |
---|
2261 | IF( nn_buffer > 0 ) THEN |
---|
2262 | IF ( lwp ) WRITE(numout,*) 'mpi_bsend, buffer allocation of : ', nn_buffer |
---|
2263 | ! Buffer allocation and attachment |
---|
2264 | ALLOCATE( tampon(nn_buffer) ) |
---|
2265 | CALL mpi_buffer_attach( tampon, nn_buffer,code ) |
---|
2266 | ENDIF |
---|
2267 | ! |
---|
2268 | END SUBROUTINE mpi_init_opa |
---|
2269 | |
---|
2270 | #else |
---|
2271 | !!---------------------------------------------------------------------- |
---|
2272 | !! Default case: Dummy module share memory computing |
---|
2273 | !!---------------------------------------------------------------------- |
---|
2274 | INTERFACE mpp_sum |
---|
2275 | MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i |
---|
2276 | END INTERFACE |
---|
2277 | INTERFACE mpp_max |
---|
2278 | MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real |
---|
2279 | END INTERFACE |
---|
2280 | INTERFACE mpp_min |
---|
2281 | MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real |
---|
2282 | END INTERFACE |
---|
2283 | INTERFACE mppobc |
---|
2284 | MODULE PROCEDURE mppobc_1d, mppobc_2d, mppobc_3d, mppobc_4d |
---|
2285 | END INTERFACE |
---|
2286 | INTERFACE mpp_minloc |
---|
2287 | MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d |
---|
2288 | END INTERFACE |
---|
2289 | INTERFACE mpp_maxloc |
---|
2290 | MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d |
---|
2291 | END INTERFACE |
---|
2292 | |
---|
2293 | |
---|
2294 | LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag |
---|
2295 | INTEGER :: ncomm_ice |
---|
2296 | |
---|
2297 | CONTAINS |
---|
2298 | |
---|
2299 | FUNCTION mynode( ldtxt, localComm ) RESULT (function_value) |
---|
2300 | CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt |
---|
2301 | INTEGER, OPTIONAL , INTENT(in ) :: localComm |
---|
2302 | IF( PRESENT( localComm ) .OR. .NOT.PRESENT( localComm ) ) function_value = 0 |
---|
2303 | IF( .FALSE. ) ldtxt(:) = 'never done' |
---|
2304 | END FUNCTION mynode |
---|
2305 | |
---|
2306 | SUBROUTINE mppsync ! Dummy routine |
---|
2307 | END SUBROUTINE mppsync |
---|
2308 | |
---|
2309 | SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine |
---|
2310 | REAL , DIMENSION(:) :: parr |
---|
2311 | INTEGER :: kdim |
---|
2312 | INTEGER, OPTIONAL :: kcom |
---|
2313 | WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
2314 | END SUBROUTINE mpp_sum_as |
---|
2315 | |
---|
2316 | SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine |
---|
2317 | REAL , DIMENSION(:,:) :: parr |
---|
2318 | INTEGER :: kdim |
---|
2319 | INTEGER, OPTIONAL :: kcom |
---|
2320 | WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom |
---|
2321 | END SUBROUTINE mpp_sum_a2s |
---|
2322 | |
---|
2323 | SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine |
---|
2324 | INTEGER, DIMENSION(:) :: karr |
---|
2325 | INTEGER :: kdim |
---|
2326 | INTEGER, OPTIONAL :: kcom |
---|
2327 | WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
2328 | END SUBROUTINE mpp_sum_ai |
---|
2329 | |
---|
2330 | SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine |
---|
2331 | REAL :: psca |
---|
2332 | INTEGER, OPTIONAL :: kcom |
---|
2333 | WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom |
---|
2334 | END SUBROUTINE mpp_sum_s |
---|
2335 | |
---|
2336 | SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine |
---|
2337 | integer :: kint |
---|
2338 | INTEGER, OPTIONAL :: kcom |
---|
2339 | WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom |
---|
2340 | END SUBROUTINE mpp_sum_i |
---|
2341 | |
---|
2342 | SUBROUTINE mppmax_a_real( parr, kdim, kcom ) |
---|
2343 | REAL , DIMENSION(:) :: parr |
---|
2344 | INTEGER :: kdim |
---|
2345 | INTEGER, OPTIONAL :: kcom |
---|
2346 | WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
2347 | END SUBROUTINE mppmax_a_real |
---|
2348 | |
---|
2349 | SUBROUTINE mppmax_real( psca, kcom ) |
---|
2350 | REAL :: psca |
---|
2351 | INTEGER, OPTIONAL :: kcom |
---|
2352 | WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom |
---|
2353 | END SUBROUTINE mppmax_real |
---|
2354 | |
---|
2355 | SUBROUTINE mppmin_a_real( parr, kdim, kcom ) |
---|
2356 | REAL , DIMENSION(:) :: parr |
---|
2357 | INTEGER :: kdim |
---|
2358 | INTEGER, OPTIONAL :: kcom |
---|
2359 | WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
2360 | END SUBROUTINE mppmin_a_real |
---|
2361 | |
---|
2362 | SUBROUTINE mppmin_real( psca, kcom ) |
---|
2363 | REAL :: psca |
---|
2364 | INTEGER, OPTIONAL :: kcom |
---|
2365 | WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom |
---|
2366 | END SUBROUTINE mppmin_real |
---|
2367 | |
---|
2368 | SUBROUTINE mppmax_a_int( karr, kdim ,kcom) |
---|
2369 | INTEGER, DIMENSION(:) :: karr |
---|
2370 | INTEGER :: kdim |
---|
2371 | INTEGER, OPTIONAL :: kcom |
---|
2372 | WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
2373 | END SUBROUTINE mppmax_a_int |
---|
2374 | |
---|
2375 | SUBROUTINE mppmax_int( kint, kcom) |
---|
2376 | INTEGER :: kint |
---|
2377 | INTEGER, OPTIONAL :: kcom |
---|
2378 | WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom |
---|
2379 | END SUBROUTINE mppmax_int |
---|
2380 | |
---|
2381 | SUBROUTINE mppmin_a_int( karr, kdim, kcom ) |
---|
2382 | INTEGER, DIMENSION(:) :: karr |
---|
2383 | INTEGER :: kdim |
---|
2384 | INTEGER, OPTIONAL :: kcom |
---|
2385 | WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
2386 | END SUBROUTINE mppmin_a_int |
---|
2387 | |
---|
2388 | SUBROUTINE mppmin_int( kint, kcom ) |
---|
2389 | INTEGER :: kint |
---|
2390 | INTEGER, OPTIONAL :: kcom |
---|
2391 | WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom |
---|
2392 | END SUBROUTINE mppmin_int |
---|
2393 | |
---|
2394 | SUBROUTINE mppobc_1d( parr, kd1, kd2, kl, kk, ktype, kij ) |
---|
2395 | INTEGER :: kd1, kd2, kl , kk, ktype, kij |
---|
2396 | REAL, DIMENSION(:) :: parr ! variable array |
---|
2397 | WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1), kd1, kd2, kl, kk, ktype, kij |
---|
2398 | END SUBROUTINE mppobc_1d |
---|
2399 | |
---|
2400 | SUBROUTINE mppobc_2d( parr, kd1, kd2, kl, kk, ktype, kij ) |
---|
2401 | INTEGER :: kd1, kd2, kl , kk, ktype, kij |
---|
2402 | REAL, DIMENSION(:,:) :: parr ! variable array |
---|
2403 | WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1), kd1, kd2, kl, kk, ktype, kij |
---|
2404 | END SUBROUTINE mppobc_2d |
---|
2405 | |
---|
2406 | SUBROUTINE mppobc_3d( parr, kd1, kd2, kl, kk, ktype, kij ) |
---|
2407 | INTEGER :: kd1, kd2, kl , kk, ktype, kij |
---|
2408 | REAL, DIMENSION(:,:,:) :: parr ! variable array |
---|
2409 | WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1), kd1, kd2, kl, kk, ktype, kij |
---|
2410 | END SUBROUTINE mppobc_3d |
---|
2411 | |
---|
2412 | SUBROUTINE mppobc_4d( parr, kd1, kd2, kl, kk, ktype, kij ) |
---|
2413 | INTEGER :: kd1, kd2, kl , kk, ktype, kij |
---|
2414 | REAL, DIMENSION(:,:,:,:) :: parr ! variable array |
---|
2415 | WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1,1), kd1, kd2, kl, kk, ktype, kij |
---|
2416 | END SUBROUTINE mppobc_4d |
---|
2417 | |
---|
2418 | SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj ) |
---|
2419 | REAL :: pmin |
---|
2420 | REAL , DIMENSION (:,:) :: ptab, pmask |
---|
2421 | INTEGER :: ki, kj |
---|
2422 | WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1) |
---|
2423 | END SUBROUTINE mpp_minloc2d |
---|
2424 | |
---|
2425 | SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk ) |
---|
2426 | REAL :: pmin |
---|
2427 | REAL , DIMENSION (:,:,:) :: ptab, pmask |
---|
2428 | INTEGER :: ki, kj, kk |
---|
2429 | WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) |
---|
2430 | END SUBROUTINE mpp_minloc3d |
---|
2431 | |
---|
2432 | SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) |
---|
2433 | REAL :: pmax |
---|
2434 | REAL , DIMENSION (:,:) :: ptab, pmask |
---|
2435 | INTEGER :: ki, kj |
---|
2436 | WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1) |
---|
2437 | END SUBROUTINE mpp_maxloc2d |
---|
2438 | |
---|
2439 | SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) |
---|
2440 | REAL :: pmax |
---|
2441 | REAL , DIMENSION (:,:,:) :: ptab, pmask |
---|
2442 | INTEGER :: ki, kj, kk |
---|
2443 | WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) |
---|
2444 | END SUBROUTINE mpp_maxloc3d |
---|
2445 | |
---|
2446 | SUBROUTINE mppstop |
---|
2447 | WRITE(*,*) 'mppstop: You should not have seen this print! error?' |
---|
2448 | END SUBROUTINE mppstop |
---|
2449 | |
---|
2450 | SUBROUTINE mpp_ini_ice( kcom ) |
---|
2451 | INTEGER :: kcom |
---|
2452 | WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom |
---|
2453 | END SUBROUTINE mpp_ini_ice |
---|
2454 | |
---|
2455 | SUBROUTINE mpp_ini_znl |
---|
2456 | WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?' |
---|
2457 | END SUBROUTINE mpp_ini_znl |
---|
2458 | |
---|
2459 | SUBROUTINE mpp_comm_free( kcom ) |
---|
2460 | INTEGER :: kcom |
---|
2461 | WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom |
---|
2462 | END SUBROUTINE mpp_comm_free |
---|
2463 | |
---|
2464 | #endif |
---|
2465 | !!---------------------------------------------------------------------- |
---|
2466 | END MODULE lib_mpp |
---|