1 | MODULE nemogcm |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE nemogcm *** |
---|
4 | !! Ocean system : NEMO GCM (ocean dynamics, on-line tracers, biochemistry and sea-ice) |
---|
5 | !!====================================================================== |
---|
6 | !! History : OPA ! 1990-10 (C. Levy, G. Madec) Original code |
---|
7 | !! 7.0 ! 1991-11 (M. Imbard, C. Levy, G. Madec) |
---|
8 | !! 7.1 ! 1993-03 (M. Imbard, C. Levy, G. Madec, O. Marti, M. Guyon, A. Lazar, |
---|
9 | !! P. Delecluse, C. Perigaud, G. Caniaux, B. Colot, C. Maes) release 7.1 |
---|
10 | !! - ! 1992-06 (L.Terray) coupling implementation |
---|
11 | !! - ! 1993-11 (M.A. Filiberti) IGLOO sea-ice |
---|
12 | !! 8.0 ! 1996-03 (M. Imbard, C. Levy, G. Madec, O. Marti, M. Guyon, A. Lazar, |
---|
13 | !! P. Delecluse, L.Terray, M.A. Filiberti, J. Vialar, A.M. Treguier, M. Levy) release 8.0 |
---|
14 | !! 8.1 ! 1997-06 (M. Imbard, G. Madec) |
---|
15 | !! 8.2 ! 1999-11 (M. Imbard, H. Goosse) LIM sea-ice model |
---|
16 | !! ! 1999-12 (V. Thierry, A-M. Treguier, M. Imbard, M-A. Foujols) OPEN-MP |
---|
17 | !! ! 2000-07 (J-M Molines, M. Imbard) Open Boundary Conditions (CLIPPER) |
---|
18 | !! NEMO 1.0 ! 2002-08 (G. Madec) F90: Free form and modules |
---|
19 | !! - ! 2004-06 (R. Redler, NEC CCRLE, Germany) add OASIS[3/4] coupled interfaces |
---|
20 | !! - ! 2004-08 (C. Talandier) New trends organization |
---|
21 | !! - ! 2005-06 (C. Ethe) Add the 1D configuration possibility |
---|
22 | !! - ! 2005-11 (V. Garnier) Surface pressure gradient organization |
---|
23 | !! - ! 2006-03 (L. Debreu, C. Mazauric) Agrif implementation |
---|
24 | !! - ! 2006-04 (G. Madec, R. Benshila) Step reorganization |
---|
25 | !! - ! 2007-07 (J. Chanut, A. Sellar) Unstructured open boundaries (BDY) |
---|
26 | !! 3.2 ! 2009-08 (S. Masson) open/write in the listing file in mpp |
---|
27 | !! 3.3 ! 2010-05 (K. Mogensen, A. Weaver, M. Martin, D. Lea) Assimilation interface |
---|
28 | !! - ! 2010-10 (C. Ethe, G. Madec) reorganisation of initialisation phase |
---|
29 | !! 4.0 ! 2011-01 (A. R. Porter, STFC Daresbury) dynamical allocation |
---|
30 | !!---------------------------------------------------------------------- |
---|
31 | |
---|
32 | !!---------------------------------------------------------------------- |
---|
33 | !! nemo_gcm : solve ocean dynamics, tracer, biogeochemistry and/or sea-ice |
---|
34 | !! nemo_init : initialization of the NEMO system |
---|
35 | !! nemo_ctl : initialisation of the contol print |
---|
36 | !! nemo_closefile : close remaining open files |
---|
37 | !! nemo_alloc : dynamical allocation |
---|
38 | !! nemo_partition : calculate MPP domain decomposition |
---|
39 | !! sqfact : calculate factors of the no. of MPI processes |
---|
40 | !!---------------------------------------------------------------------- |
---|
41 | USE step_oce ! module used in the ocean time stepping module |
---|
42 | USE sbc_oce ! surface boundary condition: ocean |
---|
43 | USE cla ! cross land advection (tra_cla routine) |
---|
44 | USE domcfg ! domain configuration (dom_cfg routine) |
---|
45 | USE mppini ! shared/distributed memory setting (mpp_init routine) |
---|
46 | USE domain ! domain initialization (dom_init routine) |
---|
47 | USE obcini ! open boundary cond. initialization (obc_ini routine) |
---|
48 | USE bdyini ! unstructured open boundary cond. initialization (bdy_init routine) |
---|
49 | USE istate ! initial state setting (istate_init routine) |
---|
50 | USE ldfdyn ! lateral viscosity setting (ldfdyn_init routine) |
---|
51 | USE ldftra ! lateral diffusivity setting (ldftra_init routine) |
---|
52 | USE zdfini ! vertical physics setting (zdf_init routine) |
---|
53 | USE phycst ! physical constant (par_cst routine) |
---|
54 | USE trdmod ! momentum/tracers trends (trd_mod_init routine) |
---|
55 | USE asminc ! assimilation increments (asm_inc_init routine) |
---|
56 | USE asmtrj ! writing out state trajectory |
---|
57 | USE sshwzv ! vertical velocity used in asm |
---|
58 | USE diaptr ! poleward transports (dia_ptr_init routine) |
---|
59 | USE diaobs ! Observation diagnostics (dia_obs_init routine) |
---|
60 | USE step ! NEMO time-stepping (stp routine) |
---|
61 | #if defined key_oasis3 |
---|
62 | USE cpl_oasis3 ! OASIS3 coupling |
---|
63 | #elif defined key_oasis4 |
---|
64 | USE cpl_oasis4 ! OASIS4 coupling (not working) |
---|
65 | #endif |
---|
66 | USE c1d ! 1D configuration |
---|
67 | USE step_c1d ! Time stepping loop for the 1D configuration |
---|
68 | #if defined key_top |
---|
69 | USE trcini ! passive tracer initialisation |
---|
70 | #endif |
---|
71 | USE lib_mpp ! distributed memory computing |
---|
72 | #if defined key_iomput |
---|
73 | USE mod_ioclient |
---|
74 | #endif |
---|
75 | USE partition_mod ! irregular domain partitioning |
---|
76 | USE timing, ONLY: timing_init, timing_finalize, timing_disable, timing_enable |
---|
77 | |
---|
78 | #define ARPDEBUG |
---|
79 | |
---|
80 | IMPLICIT NONE |
---|
81 | PRIVATE |
---|
82 | |
---|
83 | PUBLIC nemo_gcm ! called by model.F90 |
---|
84 | PUBLIC nemo_init ! needed by AGRIF |
---|
85 | |
---|
86 | CHARACTER(lc) :: cform_aaa="( /, 'AAAAAAAA', / ) " ! flag for output listing |
---|
87 | |
---|
88 | !!---------------------------------------------------------------------- |
---|
89 | !! NEMO/OPA 4.0 , NEMO Consortium (2011) |
---|
90 | !! $Id$ |
---|
91 | !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
92 | !!---------------------------------------------------------------------- |
---|
93 | CONTAINS |
---|
94 | |
---|
95 | SUBROUTINE nemo_gcm |
---|
96 | !!---------------------------------------------------------------------- |
---|
97 | !! *** ROUTINE nemo_gcm *** |
---|
98 | !! |
---|
99 | !! ** Purpose : NEMO solves the primitive equations on an orthogonal |
---|
100 | !! curvilinear mesh on the sphere. |
---|
101 | !! |
---|
102 | !! ** Method : - model general initialization |
---|
103 | !! - launch the time-stepping (stp routine) |
---|
104 | !! - finalize the run by closing files and communications |
---|
105 | !! |
---|
106 | !! References : Madec, Delecluse, Imbard, and Levy, 1997: internal report, IPSL. |
---|
107 | !! Madec, 2008, internal report, IPSL. |
---|
108 | !!---------------------------------------------------------------------- |
---|
109 | INTEGER :: istp ! time step index |
---|
110 | !!---------------------------------------------------------------------- |
---|
111 | ! |
---|
112 | #if defined key_agrif |
---|
113 | CALL Agrif_Init_Grids() ! AGRIF: set the meshes |
---|
114 | #endif |
---|
115 | |
---|
116 | ! !-----------------------! |
---|
117 | CALL nemo_init !== Initialisations ==! |
---|
118 | ! !-----------------------! |
---|
119 | #if defined key_agrif |
---|
120 | CALL Agrif_Declare_Var ! AGRIF: set the meshes |
---|
121 | # if defined key_top |
---|
122 | CALL Agrif_Declare_Var_Top ! AGRIF: set the meshes |
---|
123 | # endif |
---|
124 | #endif |
---|
125 | ! check that all process are still there... If some process have an error, |
---|
126 | ! they will never enter in step and other processes will wait until the end of the cpu time! |
---|
127 | IF( lk_mpp ) CALL mpp_max( nstop ) |
---|
128 | |
---|
129 | IF(lwp) WRITE(numout,cform_aaa) ! Flag AAAAAAA |
---|
130 | |
---|
131 | CALL timing_enable() |
---|
132 | ! !-----------------------! |
---|
133 | ! !== time stepping ==! |
---|
134 | ! !-----------------------! |
---|
135 | istp = nit000 |
---|
136 | #if defined key_c1d |
---|
137 | DO WHILE ( istp <= nitend .AND. nstop == 0 ) |
---|
138 | CALL stp_c1d( istp ) |
---|
139 | istp = istp + 1 |
---|
140 | END DO |
---|
141 | #else |
---|
142 | IF( lk_asminc ) THEN |
---|
143 | IF( ln_bkgwri ) CALL asm_bkg_wri( nit000 - 1 ) ! Output background fields |
---|
144 | IF( ln_trjwri ) CALL asm_trj_wri( nit000 - 1 ) ! Output trajectory fields |
---|
145 | IF( ln_asmdin ) THEN ! Direct initialization |
---|
146 | IF( ln_trainc ) CALL tra_asm_inc( nit000 - 1 ) ! Tracers |
---|
147 | IF( ln_dyninc ) THEN |
---|
148 | CALL dyn_asm_inc( nit000 - 1 ) ! Dynamics |
---|
149 | IF ( ln_asmdin ) CALL ssh_wzv ( nit000 - 1 ) ! update vertical velocity |
---|
150 | ENDIF |
---|
151 | IF( ln_sshinc ) CALL ssh_asm_inc( nit000 - 1 ) ! SSH |
---|
152 | ENDIF |
---|
153 | ENDIF |
---|
154 | |
---|
155 | DO WHILE ( istp <= nitend .AND. nstop == 0 ) |
---|
156 | #if defined key_agrif |
---|
157 | CALL Agrif_Step( stp ) ! AGRIF: time stepping |
---|
158 | #else |
---|
159 | CALL stp( istp ) ! standard time stepping |
---|
160 | #endif |
---|
161 | istp = istp + 1 |
---|
162 | IF( lk_mpp ) CALL mpp_max( nstop ) |
---|
163 | END DO |
---|
164 | #endif |
---|
165 | |
---|
166 | IF( lk_diaobs ) CALL dia_obs_wri |
---|
167 | |
---|
168 | ! !------------------------! |
---|
169 | ! !== finalize the run ==! |
---|
170 | ! !------------------------! |
---|
171 | IF(lwp) WRITE(numout,cform_aaa) ! Flag AAAAAAA |
---|
172 | ! |
---|
173 | IF( nstop /= 0 .AND. lwp ) THEN ! error print |
---|
174 | WRITE(numout,cform_err) |
---|
175 | WRITE(numout,*) nstop, ' error have been found' |
---|
176 | ENDIF |
---|
177 | ! |
---|
178 | CALL timing_finalize ! Timing report |
---|
179 | |
---|
180 | CALL nemo_closefile |
---|
181 | #if defined key_oasis3 || defined key_oasis4 |
---|
182 | CALL cpl_prism_finalize ! end coupling and mpp communications with OASIS |
---|
183 | #else |
---|
184 | IF( lk_mpp ) CALL mppstop ! end mpp communications |
---|
185 | #endif |
---|
186 | ! |
---|
187 | END SUBROUTINE nemo_gcm |
---|
188 | |
---|
189 | |
---|
190 | SUBROUTINE nemo_init |
---|
191 | !!---------------------------------------------------------------------- |
---|
192 | !! *** ROUTINE nemo_init *** |
---|
193 | !! |
---|
194 | !! ** Purpose : initialization of the NEMO GCM |
---|
195 | !!---------------------------------------------------------------------- |
---|
196 | INTEGER :: ji ! dummy loop indices |
---|
197 | INTEGER :: ilocal_comm ! local integer |
---|
198 | CHARACTER(len=80), DIMENSION(24) :: cltxt |
---|
199 | !! |
---|
200 | NAMELIST/namctl/ ln_ctl , nn_print, nn_ictls, nn_ictle, & |
---|
201 | & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, nn_bench |
---|
202 | !!---------------------------------------------------------------------- |
---|
203 | ! |
---|
204 | cltxt(:) = '' |
---|
205 | ! |
---|
206 | ! ! open Namelist file |
---|
207 | CALL ctl_opn( numnam, 'namelist', 'OLD', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. ) |
---|
208 | ! |
---|
209 | READ( numnam, namctl ) ! Namelist namctl : Control prints & Benchmark |
---|
210 | ! |
---|
211 | ! !--------------------------------------------! |
---|
212 | ! ! set communicator & select the local node ! |
---|
213 | ! !--------------------------------------------! |
---|
214 | #if defined key_iomput |
---|
215 | IF( Agrif_Root() ) THEN |
---|
216 | # if defined key_oasis3 || defined key_oasis4 |
---|
217 | CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis |
---|
218 | # endif |
---|
219 | CALL init_ioclient( ilocal_comm ) ! exchange io_server nemo local communicator with the io_server |
---|
220 | ENDIF |
---|
221 | narea = mynode( cltxt, numnam, nstop, ilocal_comm ) ! Nodes selection |
---|
222 | #else |
---|
223 | # if defined key_oasis3 || defined key_oasis4 |
---|
224 | IF( Agrif_Root() ) THEN |
---|
225 | CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis |
---|
226 | ENDIF |
---|
227 | narea = mynode( cltxt, numnam, nstop, ilocal_comm ) ! Nodes selection (control print return in cltxt) |
---|
228 | # else |
---|
229 | ilocal_comm = 0 |
---|
230 | narea = mynode( cltxt, numnam, nstop ) ! Nodes selection (control print return in cltxt) |
---|
231 | # endif |
---|
232 | #endif |
---|
233 | narea = narea + 1 ! mynode return the rank of proc (0 --> jpnij -1 ) |
---|
234 | |
---|
235 | lwp = (narea == 1) .OR. ln_ctl ! control of all listing output print |
---|
236 | |
---|
237 | ! Calculate domain z dimensions as needed when partitioning. |
---|
238 | ! This used to be done in par_oce.F90 when they were parameters rather |
---|
239 | ! than variables |
---|
240 | IF( Agrif_Root() ) THEN |
---|
241 | jpk = jpkdta ! third dim |
---|
242 | jpkm1 = jpk-1 ! inner domain indices |
---|
243 | jpkorig = jpk ! Copy of jpk that is NOT modified |
---|
244 | jpkf = jpk ! Max depth of this sub-domain. Initially set to jpk here |
---|
245 | ! but altered later in domzgr |
---|
246 | ENDIF |
---|
247 | |
---|
248 | CALL timing_init ! Init timing module |
---|
249 | CALL timing_disable ! but disable during startup |
---|
250 | |
---|
251 | ! If dimensions of processor grid weren't specified in the namelist file |
---|
252 | ! then we calculate them here now that we have our communicator size |
---|
253 | IF( (jpni < 1) .OR. (jpnj < 1) )THEN |
---|
254 | #if defined key_mpp_mpi |
---|
255 | #if defined key_mpp_rkpart |
---|
256 | IF( Agrif_Root() ) CALL nemo_recursive_partition(mppsize) |
---|
257 | #else |
---|
258 | IF( Agrif_Root() ) CALL nemo_partition(mppsize) |
---|
259 | #endif |
---|
260 | #else |
---|
261 | jpni = 1 |
---|
262 | jpnj = 1 |
---|
263 | jpnij = jpni*jpnj |
---|
264 | #endif |
---|
265 | |
---|
266 | #if defined key_mpp_rkpart |
---|
267 | ELSE |
---|
268 | CALL ctl_stop( 'STOP', & |
---|
269 | 'nemo_init : invalid inputs in namelist - cannot specify jpn{i,j}>0 & |
---|
270 | & when using recursive k-section paritioning!' ) |
---|
271 | #endif |
---|
272 | END IF |
---|
273 | |
---|
274 | ! Calculate domain dimensions given calculated jpni and jpnj |
---|
275 | ! This used to be done in par_oce.F90 when they were parameters rather |
---|
276 | ! than variables |
---|
277 | IF( Agrif_Root() ) THEN |
---|
278 | #if ! defined key_mpp_rkpart |
---|
279 | jpi = ( jpiglo-2*jpreci + (jpni-1) ) / jpni + 2*jpreci ! first dim. |
---|
280 | jpj = ( jpjglo-2*jprecj + (jpnj-1) ) / jpnj + 2*jprecj ! second dim. |
---|
281 | jpim1 = jpi-1 ! inner domain indices |
---|
282 | jpjm1 = jpj-1 ! " " |
---|
283 | jpij = jpi*jpj ! jpi x j |
---|
284 | #endif |
---|
285 | ENDIF |
---|
286 | |
---|
287 | IF(lwp) THEN ! open listing units |
---|
288 | ! |
---|
289 | CALL ctl_opn( numout, 'ocean.output', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE., narea ) |
---|
290 | ! |
---|
291 | WRITE(numout,*) |
---|
292 | WRITE(numout,*) ' CNRS - NERC - Met OFFICE - MERCATOR-ocean' |
---|
293 | WRITE(numout,*) ' NEMO team' |
---|
294 | WRITE(numout,*) ' Ocean General Circulation Model' |
---|
295 | WRITE(numout,*) ' version 3.3 (2010) ' |
---|
296 | WRITE(numout,*) |
---|
297 | WRITE(numout,*) |
---|
298 | DO ji = 1, SIZE(cltxt,1) |
---|
299 | IF( TRIM(cltxt(ji)) /= '' ) WRITE(numout,*) cltxt(ji) ! control print of mynode |
---|
300 | END DO |
---|
301 | WRITE(numout,cform_aaa) ! Flag AAAAAAA |
---|
302 | ! |
---|
303 | ENDIF |
---|
304 | |
---|
305 | ! Now we know the dimensions of the grid and numout has been set we can |
---|
306 | ! allocate arrays |
---|
307 | CALL nemo_alloc() |
---|
308 | |
---|
309 | ! !-------------------------------! |
---|
310 | ! ! NEMO general initialization ! |
---|
311 | ! !-------------------------------! |
---|
312 | |
---|
313 | CALL nemo_ctl ! Control prints & Benchmark |
---|
314 | |
---|
315 | ! ! Domain decomposition |
---|
316 | #if defined key_mpp_rkpart |
---|
317 | CALL mpp_init3 ! Remainder of set-up for |
---|
318 | ! recursive partitioning |
---|
319 | #else |
---|
320 | IF( jpni*jpnj == jpnij ) THEN ; CALL mpp_init ! standard cutting out |
---|
321 | ELSE ; CALL mpp_init2 ! eliminate land processors |
---|
322 | ENDIF |
---|
323 | #endif |
---|
324 | ! |
---|
325 | ! ! General initialization |
---|
326 | ! CALL timing_init! Timing module |
---|
327 | CALL phy_cst ! Physical constants |
---|
328 | CALL eos_init ! Equation of state |
---|
329 | CALL dom_cfg ! Domain configuration |
---|
330 | CALL dom_init ! Domain |
---|
331 | |
---|
332 | IF( ln_ctl ) CALL prt_ctl_init ! Print control |
---|
333 | |
---|
334 | IF( lk_obc ) CALL obc_init ! Open boundaries |
---|
335 | IF( lk_bdy ) CALL bdy_init ! Unstructured open boundaries |
---|
336 | |
---|
337 | CALL istate_init ! ocean initial state (Dynamics and tracers) |
---|
338 | |
---|
339 | ! ! Ocean physics |
---|
340 | CALL sbc_init ! Forcings : surface module |
---|
341 | ! ! Vertical physics |
---|
342 | CALL zdf_init ! namelist read |
---|
343 | CALL zdf_bfr_init ! bottom friction |
---|
344 | IF( lk_zdfric ) CALL zdf_ric_init ! Richardson number dependent Kz |
---|
345 | IF( lk_zdftke ) CALL zdf_tke_init ! TKE closure scheme |
---|
346 | IF( lk_zdfgls ) CALL zdf_gls_init ! GLS closure scheme |
---|
347 | IF( lk_zdfkpp ) CALL zdf_kpp_init ! KPP closure scheme |
---|
348 | IF( lk_zdftmx ) CALL zdf_tmx_init ! tidal vertical mixing |
---|
349 | IF( lk_zdfddm .AND. .NOT. lk_zdfkpp ) & |
---|
350 | & CALL zdf_ddm_init ! double diffusive mixing |
---|
351 | ! ! Lateral physics |
---|
352 | CALL ldf_tra_init ! Lateral ocean tracer physics |
---|
353 | CALL ldf_dyn_init ! Lateral ocean momentum physics |
---|
354 | IF( lk_ldfslp ) CALL ldf_slp_init ! slope of lateral mixing |
---|
355 | |
---|
356 | ! ! Active tracers |
---|
357 | CALL tra_qsr_init ! penetrative solar radiation qsr |
---|
358 | CALL tra_bbc_init ! bottom heat flux |
---|
359 | IF( lk_trabbl ) CALL tra_bbl_init ! advective (and/or diffusive) bottom boundary layer scheme |
---|
360 | IF( lk_tradmp ) CALL tra_dmp_init ! internal damping trends |
---|
361 | CALL tra_adv_init ! horizontal & vertical advection |
---|
362 | CALL tra_ldf_init ! lateral mixing |
---|
363 | CALL tra_zdf_init ! vertical mixing and after tracer fields |
---|
364 | |
---|
365 | ! ! Dynamics |
---|
366 | CALL dyn_adv_init ! advection (vector or flux form) |
---|
367 | CALL dyn_vor_init ! vorticity term including Coriolis |
---|
368 | CALL dyn_ldf_init ! lateral mixing |
---|
369 | CALL dyn_hpg_init ! horizontal gradient of Hydrostatic pressure |
---|
370 | CALL dyn_zdf_init ! vertical diffusion |
---|
371 | CALL dyn_spg_init ! surface pressure gradient |
---|
372 | |
---|
373 | ! ! Misc. options |
---|
374 | IF( nn_cla == 1 ) CALL cla_init ! Cross Land Advection |
---|
375 | |
---|
376 | #if defined key_top |
---|
377 | ! ! Passive tracers |
---|
378 | CALL trc_init |
---|
379 | #endif |
---|
380 | ! ! Diagnostics |
---|
381 | CALL iom_init ! iom_put initialization |
---|
382 | IF( lk_floats ) CALL flo_init ! drifting Floats |
---|
383 | IF( lk_diaar5 ) CALL dia_ar5_init ! ar5 diag |
---|
384 | CALL dia_ptr_init ! Poleward TRansports initialization |
---|
385 | CALL dia_hsb_init ! heat content, salt content and volume budgets |
---|
386 | CALL trd_mod_init ! Mixed-layer/Vorticity/Integral constraints trends |
---|
387 | IF( lk_diaobs ) THEN ! Observation & model comparison |
---|
388 | CALL dia_obs_init ! Initialize observational data |
---|
389 | CALL dia_obs( nit000 - 1 ) ! Observation operator for restart |
---|
390 | ENDIF |
---|
391 | ! ! Assimilation increments |
---|
392 | IF( lk_asminc ) CALL asm_inc_init ! Initialize assimilation increments |
---|
393 | IF(lwp) WRITE(numout,*) 'Euler time step switch is ', neuler |
---|
394 | ! |
---|
395 | END SUBROUTINE nemo_init |
---|
396 | |
---|
397 | |
---|
398 | SUBROUTINE nemo_ctl |
---|
399 | !!---------------------------------------------------------------------- |
---|
400 | !! *** ROUTINE nemo_ctl *** |
---|
401 | !! |
---|
402 | !! ** Purpose : control print setting |
---|
403 | !! |
---|
404 | !! ** Method : - print namctl information and check some consistencies |
---|
405 | !!---------------------------------------------------------------------- |
---|
406 | ! |
---|
407 | IF(lwp) THEN ! control print |
---|
408 | WRITE(numout,*) |
---|
409 | WRITE(numout,*) 'nemo_ctl: Control prints & Benchmark' |
---|
410 | WRITE(numout,*) '~~~~~~~ ' |
---|
411 | WRITE(numout,*) ' Namelist namctl' |
---|
412 | WRITE(numout,*) ' run control (for debugging) ln_ctl = ', ln_ctl |
---|
413 | WRITE(numout,*) ' level of print nn_print = ', nn_print |
---|
414 | WRITE(numout,*) ' Start i indice for SUM control nn_ictls = ', nn_ictls |
---|
415 | WRITE(numout,*) ' End i indice for SUM control nn_ictle = ', nn_ictle |
---|
416 | WRITE(numout,*) ' Start j indice for SUM control nn_jctls = ', nn_jctls |
---|
417 | WRITE(numout,*) ' End j indice for SUM control nn_jctle = ', nn_jctle |
---|
418 | WRITE(numout,*) ' number of proc. following i nn_isplt = ', nn_isplt |
---|
419 | WRITE(numout,*) ' number of proc. following j nn_jsplt = ', nn_jsplt |
---|
420 | WRITE(numout,*) ' benchmark parameter (0/1) nn_bench = ', nn_bench |
---|
421 | ENDIF |
---|
422 | ! |
---|
423 | nprint = nn_print ! convert DOCTOR namelist names into OLD names |
---|
424 | nictls = nn_ictls |
---|
425 | nictle = nn_ictle |
---|
426 | njctls = nn_jctls |
---|
427 | njctle = nn_jctle |
---|
428 | isplt = nn_isplt |
---|
429 | jsplt = nn_jsplt |
---|
430 | nbench = nn_bench |
---|
431 | ! ! Parameter control |
---|
432 | ! |
---|
433 | IF( ln_ctl ) THEN ! sub-domain area indices for the control prints |
---|
434 | IF( lk_mpp ) THEN |
---|
435 | isplt = jpni ; jsplt = jpnj ; ijsplt = jpni*jpnj ! the domain is forced to the real split domain |
---|
436 | ELSE |
---|
437 | IF( isplt == 1 .AND. jsplt == 1 ) THEN |
---|
438 | CALL ctl_warn( ' - isplt & jsplt are equal to 1', & |
---|
439 | & ' - the print control will be done over the whole domain' ) |
---|
440 | ENDIF |
---|
441 | ijsplt = isplt * jsplt ! total number of processors ijsplt |
---|
442 | ENDIF |
---|
443 | IF(lwp) WRITE(numout,*)' - The total number of processors over which the' |
---|
444 | IF(lwp) WRITE(numout,*)' print control will be done is ijsplt : ', ijsplt |
---|
445 | ! |
---|
446 | ! ! indices used for the SUM control |
---|
447 | IF( nictls+nictle+njctls+njctle == 0 ) THEN ! print control done over the default area |
---|
448 | lsp_area = .FALSE. |
---|
449 | ELSE ! print control done over a specific area |
---|
450 | lsp_area = .TRUE. |
---|
451 | IF( nictls < 1 .OR. nictls > jpiglo ) THEN |
---|
452 | CALL ctl_warn( ' - nictls must be 1<=nictls>=jpiglo, it is forced to 1' ) |
---|
453 | nictls = 1 |
---|
454 | ENDIF |
---|
455 | IF( nictle < 1 .OR. nictle > jpiglo ) THEN |
---|
456 | CALL ctl_warn( ' - nictle must be 1<=nictle>=jpiglo, it is forced to jpiglo' ) |
---|
457 | nictle = jpiglo |
---|
458 | ENDIF |
---|
459 | IF( njctls < 1 .OR. njctls > jpjglo ) THEN |
---|
460 | CALL ctl_warn( ' - njctls must be 1<=njctls>=jpjglo, it is forced to 1' ) |
---|
461 | njctls = 1 |
---|
462 | ENDIF |
---|
463 | IF( njctle < 1 .OR. njctle > jpjglo ) THEN |
---|
464 | CALL ctl_warn( ' - njctle must be 1<=njctle>=jpjglo, it is forced to jpjglo' ) |
---|
465 | njctle = jpjglo |
---|
466 | ENDIF |
---|
467 | ENDIF |
---|
468 | ENDIF |
---|
469 | ! |
---|
470 | IF( nbench == 1 ) THEN ! Benchmark |
---|
471 | SELECT CASE ( cp_cfg ) |
---|
472 | CASE ( 'gyre' ) ; CALL ctl_warn( ' The Benchmark is activated ' ) |
---|
473 | CASE DEFAULT ; CALL ctl_stop( ' The Benchmark is based on the GYRE configuration:', & |
---|
474 | & ' key_gyre must be used or set nbench = 0' ) |
---|
475 | END SELECT |
---|
476 | ENDIF |
---|
477 | ! |
---|
478 | IF( lk_c1d .AND. .NOT.lk_iomput ) CALL ctl_stop( 'nemo_ctl: The 1D configuration must be used ', & |
---|
479 | & 'with the IOM Input/Output manager. ' , & |
---|
480 | & 'Compile with key_iomput enabled' ) |
---|
481 | ! |
---|
482 | END SUBROUTINE nemo_ctl |
---|
483 | |
---|
484 | |
---|
485 | SUBROUTINE nemo_closefile |
---|
486 | !!---------------------------------------------------------------------- |
---|
487 | !! *** ROUTINE nemo_closefile *** |
---|
488 | !! |
---|
489 | !! ** Purpose : Close the files |
---|
490 | !!---------------------------------------------------------------------- |
---|
491 | ! |
---|
492 | IF( lk_mpp ) CALL mppsync |
---|
493 | ! |
---|
494 | CALL iom_close ! close all input/output files managed by iom_* |
---|
495 | ! |
---|
496 | IF( numstp /= -1 ) CLOSE( numstp ) ! time-step file |
---|
497 | IF( numsol /= -1 ) CLOSE( numsol ) ! solver file |
---|
498 | IF( numnam /= -1 ) CLOSE( numnam ) ! oce namelist |
---|
499 | IF( numnam_ice /= -1 ) CLOSE( numnam_ice ) ! ice namelist |
---|
500 | IF( numevo_ice /= -1 ) CLOSE( numevo_ice ) ! ice variables (temp. evolution) |
---|
501 | IF( numout /= 6 ) CLOSE( numout ) ! standard model output file |
---|
502 | ! |
---|
503 | numout = 6 ! redefine numout in case it is used after this point... |
---|
504 | ! |
---|
505 | END SUBROUTINE nemo_closefile |
---|
506 | |
---|
507 | |
---|
508 | SUBROUTINE nemo_alloc |
---|
509 | !!---------------------------------------------------------------------- |
---|
510 | !! *** ROUTINE nemo_alloc *** |
---|
511 | !! |
---|
512 | !! ** Purpose : Allocate all the dynamic arrays of the OPA modules |
---|
513 | !! |
---|
514 | !! ** Method : |
---|
515 | !!---------------------------------------------------------------------- |
---|
516 | USE diawri , ONLY: dia_wri_alloc |
---|
517 | USE dom_oce , ONLY: dom_oce_alloc |
---|
518 | USE ldfdyn_oce, ONLY: ldfdyn_oce_alloc |
---|
519 | USE ldftra_oce, ONLY: ldftra_oce_alloc |
---|
520 | USE trc_oce , ONLY: trc_oce_alloc |
---|
521 | USE wrk_nemo , ONLY: wrk_alloc |
---|
522 | USE exchmod , ONLY: exchmod_alloc |
---|
523 | ! |
---|
524 | INTEGER :: ierr |
---|
525 | !!---------------------------------------------------------------------- |
---|
526 | ! |
---|
527 | ierr = oce_alloc () ! ocean |
---|
528 | ierr = ierr + dia_wri_alloc () |
---|
529 | ierr = ierr + dom_oce_alloc () ! ocean domain |
---|
530 | ierr = ierr + ldfdyn_oce_alloc() ! ocean lateral physics : dynamics |
---|
531 | ierr = ierr + ldftra_oce_alloc() ! ocean lateral physics : tracers |
---|
532 | ierr = ierr + zdf_oce_alloc () ! ocean vertical physics |
---|
533 | ! |
---|
534 | ierr = ierr + lib_mpp_alloc (numout) ! mpp exchanges |
---|
535 | ierr = ierr + trc_oce_alloc () ! shared TRC / TRA arrays |
---|
536 | ! |
---|
537 | ierr = ierr + wrk_alloc(numout, lwp) ! workspace |
---|
538 | ! |
---|
539 | ierr = ierr + exchmod_alloc() ! New mpp msg framework |
---|
540 | ! |
---|
541 | IF( lk_mpp ) CALL mpp_sum( ierr ) |
---|
542 | IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc : unable to allocate standard ocean arrays' ) |
---|
543 | ! |
---|
544 | END SUBROUTINE nemo_alloc |
---|
545 | |
---|
546 | |
---|
547 | SUBROUTINE nemo_partition( num_pes ) |
---|
548 | USE mapcomm_mod, ONLY: trimmed |
---|
549 | !!---------------------------------------------------------------------- |
---|
550 | !! *** ROUTINE nemo_partition *** |
---|
551 | !! |
---|
552 | !! ** Purpose : Work out a sensible factorisation of the number of |
---|
553 | !! processors for the x and y dimensions. |
---|
554 | !! ** Method : |
---|
555 | !!---------------------------------------------------------------------- |
---|
556 | INTEGER, INTENT(in) :: num_pes ! The number of MPI processes we have |
---|
557 | ! |
---|
558 | INTEGER :: ifact1, ifact2 ! factors of num_pes, ifact1 <= ifact2 |
---|
559 | !!---------------------------------------------------------------------- |
---|
560 | |
---|
561 | ! Factorise the number of processors into ifact1*ifact2, such that |
---|
562 | ! ifact1 and ifact2 are as nearly equal as possible. |
---|
563 | |
---|
564 | CALL sqfact( num_pes, ifact1, ifact2 ) |
---|
565 | |
---|
566 | ! Make sure that the smaller dimension of the processor grid |
---|
567 | ! is given the smaller dimension of the global domain |
---|
568 | IF( jpiglo <= jpjglo) THEN |
---|
569 | jpni = ifact1 |
---|
570 | jpnj = ifact2 |
---|
571 | ELSE |
---|
572 | jpni = ifact2 |
---|
573 | jpnj = ifact1 |
---|
574 | ENDIF |
---|
575 | |
---|
576 | ! This should never happen |
---|
577 | IF( (jpni*jpnj) /= num_pes) THEN |
---|
578 | WRITE (numout, *) 'WARNING: internal error - factorisation of number of PEs failed' |
---|
579 | ENDIF |
---|
580 | |
---|
581 | ! This should only happen if num_pes is prime |
---|
582 | IF( ifact1 <= 1 ) THEN |
---|
583 | WRITE (numout, *) 'WARNING: factorisation of number of PEs failed' |
---|
584 | WRITE (numout, *) ' : using grid of ',jpni,' x ',jpnj |
---|
585 | ENDIF |
---|
586 | ! |
---|
587 | jpnij = jpni*jpnj |
---|
588 | ! |
---|
589 | |
---|
590 | ! Array that stores whether domain boundaries have been trimmed. Not used in |
---|
591 | ! this case (regular domain decomp.) so set all to false. |
---|
592 | ALLOCATE(trimmed(4,jpnij)) |
---|
593 | trimmed(:,:) = .FALSE. |
---|
594 | |
---|
595 | END SUBROUTINE nemo_partition |
---|
596 | |
---|
597 | |
---|
598 | SUBROUTINE nemo_recursive_partition( num_pes ) |
---|
599 | USE in_out_manager, ONLY: numnam |
---|
600 | USE dom_oce, ONLY: ln_zco |
---|
601 | USE dom_oce, ONLY: gdepw_0, gdept_0, e3w_0, e3t_0, & |
---|
602 | mig, mjg, mi0, mi1, mj0, mj1, mbathy, bathy |
---|
603 | USE domzgr, ONLY: zgr_z, zgr_bat, namzgr, zgr_zco, zgr_zps |
---|
604 | USE closea, ONLY: dom_clo |
---|
605 | USE domain, ONLY: dom_nam |
---|
606 | USE iom, ONLY: jpiglo, jpjglo, wp, jpdom_unknown, & |
---|
607 | iom_open, iom_get, iom_close |
---|
608 | USE mapcomm_mod, ONLY: ielb, ieub, pielb, pjelb, pieub, pjeub, & |
---|
609 | iesub, jesub, jeub, ilbext, iubext, jubext, & |
---|
610 | jlbext, pnactive, piesub, pjesub, jelb, pilbext, & |
---|
611 | piubext, pjlbext, pjubext, LAND, trimmed, & |
---|
612 | msgtrim_z, set_num_subdomains |
---|
613 | USE partition_mod, ONLY: partition_rk, partition_mca_rk, read_partition, & |
---|
614 | imask, ibotlevel, partition_mask_alloc, & |
---|
615 | smooth_global_bathy, global_bot_level |
---|
616 | USE par_oce, ONLY: do_exchanges |
---|
617 | #if defined key_mpp_mpi |
---|
618 | USE mpi |
---|
619 | #endif |
---|
620 | !!---------------------------------------------------------------------- |
---|
621 | !! *** ROUTINE nemo_recursive_partition *** |
---|
622 | !! |
---|
623 | !! ** Purpose : Work out a sensible factorisation of the number of |
---|
624 | !! processors for the x and y dimensions. |
---|
625 | !! ** Method : |
---|
626 | !!---------------------------------------------------------------------- |
---|
627 | IMPLICIT none |
---|
628 | INTEGER, INTENT(in) :: num_pes ! The number of MPI processes we have |
---|
629 | ! Local vars |
---|
630 | INTEGER :: ierr ! Error flag |
---|
631 | INTEGER :: ii,jj ! Loop index |
---|
632 | CHARACTER(LEN=8) :: lstr ! Local string for reading env. var. |
---|
633 | INTEGER :: lztrim ! Local int for " " " |
---|
634 | REAL(wp), ALLOCATABLE, DIMENSION(:,:) :: zdta ! temporary data workspace |
---|
635 | !!---------------------------------------------------------------------- |
---|
636 | |
---|
637 | ! Allocate masking arrays used in partitioning |
---|
638 | CALL partition_mask_alloc(jpiglo,jpjglo,ierr) |
---|
639 | IF(ierr /= 0)THEN |
---|
640 | CALL ctl_stop('nemo_recursive_partition: failed to allocate masking arrays') |
---|
641 | RETURN |
---|
642 | END IF |
---|
643 | |
---|
644 | ! Allocate local workspace array for this routine |
---|
645 | ALLOCATE(zdta(jpiglo,jpjglo), Stat=ierr) |
---|
646 | IF(ierr /= 0)THEN |
---|
647 | CALL ctl_stop('nemo_recursive_partition: failed to allocate workspace arrays') |
---|
648 | RETURN |
---|
649 | END IF |
---|
650 | |
---|
651 | ! Check whether user has specified halo trimming in z via environment |
---|
652 | ! variable. |
---|
653 | ! Halo trimming in z is on by default |
---|
654 | msgtrim_z = .TRUE. |
---|
655 | CALL GET_ENVIRONMENT_VARIABLE(NAME='NEMO_MSGTRIM_Z', VALUE=lstr, & |
---|
656 | STATUS=ierr) |
---|
657 | IF( ierr == 0)THEN |
---|
658 | READ(lstr,FMT="(I10)",IOSTAT=ierr) lztrim |
---|
659 | IF(ierr == 0)THEN |
---|
660 | IF (lztrim == 0) msgtrim_z = .FALSE. |
---|
661 | ELSE |
---|
662 | CALL ctl_warn('nemo_recursive_partition: failed to parse value of NEMO_MSGTRIM_Z environment variable: '//TRIM(lstr)) |
---|
663 | END IF |
---|
664 | END IF |
---|
665 | |
---|
666 | IF(lwp) WRITE(*,*) 'ARPDBG: msgtrim_z = ',msgtrim_z |
---|
667 | |
---|
668 | ! ============================ |
---|
669 | ! Generate a global mask from the model bathymetry |
---|
670 | ! ============================ |
---|
671 | |
---|
672 | ! Read the z-coordinate options from the namelist file |
---|
673 | REWIND(numnam) |
---|
674 | READ (numnam, namzgr) |
---|
675 | |
---|
676 | ! Read domain options from namelist file |
---|
677 | CALL dom_nam() |
---|
678 | |
---|
679 | ! Allocate these arrays so we can use domzgr::zgr_z routine; free them |
---|
680 | ! when we're done so as not to upset the 'official' allocation once |
---|
681 | ! the domain decomposition is done. |
---|
682 | ALLOCATE(gdepw_0(jpk), gdept_0(jpk), e3w_0(jpk), e3t_0(jpk), & |
---|
683 | mig(jpiglo), mjg(jpjglo), & |
---|
684 | mbathy(jpiglo,jpjglo), bathy(jpiglo,jpjglo), Stat=ierr) |
---|
685 | IF(ierr /= 0)THEN |
---|
686 | CALL ctl_stop('STOP', & |
---|
687 | 'nemo_recursive_partition: failed to allocate zgr_z() arrays') |
---|
688 | RETURN |
---|
689 | END IF |
---|
690 | |
---|
691 | ! Set-up reference depth coordinates |
---|
692 | CALL zgr_z() |
---|
693 | |
---|
694 | ! Set-up sub-domain limits as global domain for zgr_bat() |
---|
695 | nldi = 2 ; nlci = jpiglo - 1 |
---|
696 | nldj = 2 ; nlcj = jpjglo - 1 |
---|
697 | jpi = jpiglo |
---|
698 | jpj = jpjglo |
---|
699 | |
---|
700 | ! Set-up fake m{i,j}g arrays for zgr_bat() call |
---|
701 | DO ii = 1, jpiglo, 1 |
---|
702 | mig(ii) = ii |
---|
703 | mi0(ii) = ii |
---|
704 | mi1(ii) = ii |
---|
705 | END DO |
---|
706 | DO jj = 1, jpjglo, 1 |
---|
707 | mjg(jj) = jj |
---|
708 | mj0(jj) = jj |
---|
709 | mj1(jj) = jj |
---|
710 | END DO |
---|
711 | |
---|
712 | ! Initialise closed seas so loop over closed seas in zgr_bat works |
---|
713 | CALL dom_clo() |
---|
714 | |
---|
715 | ! Read-in bathy (if required) of global domain |
---|
716 | CALL zgr_bat(.TRUE.) |
---|
717 | |
---|
718 | ! land/sea mask (zero on land, 1 otherwise) over the global/zoom domain |
---|
719 | imask(:,:)=1 |
---|
720 | |
---|
721 | ! Copy bathymetry in case we need to smooth it |
---|
722 | zdta(:,:) = bathy(:,:) |
---|
723 | |
---|
724 | IF(ln_sco)THEN |
---|
725 | ! If ln_sco defined then the bathymetry gets smoothed before the |
---|
726 | ! simulation begins and that process can alter the coastlines (bug!) |
---|
727 | ! therefore we do it here too before calculating our mask. |
---|
728 | CALL smooth_global_bathy(zdta, mbathy) |
---|
729 | ELSE IF(ln_zps)THEN |
---|
730 | CALL zgr_zps(.TRUE.) |
---|
731 | ELSE IF(ln_zco)THEN |
---|
732 | ! Not certain this is required since mbathy computed in zgr_bat() |
---|
733 | ! in this case. |
---|
734 | !CALL zgr_zco() |
---|
735 | END IF |
---|
736 | |
---|
737 | ! Compute the deepest/last ocean level for every point on the grid |
---|
738 | ibotlevel(:,:) = mbathy(:,:) |
---|
739 | CALL global_bot_level(ibotlevel) |
---|
740 | |
---|
741 | ! Comment-out line below to achieve a regular partition |
---|
742 | WHERE ( zdta(:,:) <= 1.0E-20 ) imask = LAND |
---|
743 | |
---|
744 | ! Allocate partitioning arrays. |
---|
745 | |
---|
746 | IF ( .NOT. ALLOCATED(pielb) ) THEN |
---|
747 | ALLOCATE (pielb(num_pes), pieub(num_pes), piesub(num_pes), & |
---|
748 | pilbext(num_pes), piubext(num_pes), & |
---|
749 | pjelb(num_pes), pjeub(num_pes), pjesub(num_pes), & |
---|
750 | pjlbext(num_pes), pjubext(num_pes), pnactive(num_pes), & |
---|
751 | trimmed(4,num_pes), Stat = ierr) |
---|
752 | IF(ierr /= 0)THEN |
---|
753 | CALL ctl_stop('STOP', & |
---|
754 | 'nemo_recursive_partition: failed to allocate partitioning arrays') |
---|
755 | RETURN |
---|
756 | END IF |
---|
757 | ENDIF |
---|
758 | |
---|
759 | ! Set error flag so that we calculate domain decomp if not reading |
---|
760 | ! existing decomposition or if read fails. |
---|
761 | ierr = 1 |
---|
762 | |
---|
763 | IF( nn_readpart )THEN |
---|
764 | ! Read the partitioning to use from disk |
---|
765 | CALL read_partition(ierr) |
---|
766 | IF ( ierr /= 0 ) THEN |
---|
767 | CALL ctl_warn('Read of pre-calculated domain decomposition failed - will calculate one instead.') |
---|
768 | END IF |
---|
769 | END IF |
---|
770 | |
---|
771 | ! Set the number of sub-domains for which we are to partition |
---|
772 | ! (module var in mapcomm_mod) |
---|
773 | CALL set_num_subdomains(num_pes) |
---|
774 | |
---|
775 | IF(ierr /= 0)THEN |
---|
776 | ! Multi-core aware version of recursive k-section partitioning. |
---|
777 | ! Currently only accounts for whether a grid point is wet or dry. |
---|
778 | ! It has no knowledge of the number of wet levels at a point. |
---|
779 | CALL partition_mca_rk ( imask, 1, jpiglo, 1, jpjglo, ierr ) |
---|
780 | |
---|
781 | ! Now we can do recursive k-section partitioning |
---|
782 | ! ARPDBG - BUG if limits on array below are set to anything other than |
---|
783 | ! 1 and jp{i,j}glo then check for external boundaries in a few lines |
---|
784 | ! time WILL FAIL! |
---|
785 | ! CALL partition_rk ( imask, 1, jpiglo, 1, jpjglo, ierr ) |
---|
786 | END IF |
---|
787 | |
---|
788 | ! Check the error code from partitioning. |
---|
789 | IF ( ierr /= 0 ) THEN |
---|
790 | CALL ctl_stop('STOP','nemo_recursive_partition: Partitioning failed') |
---|
791 | RETURN |
---|
792 | ENDIF |
---|
793 | |
---|
794 | ! If we used generate_fake_land() above then we must set |
---|
795 | ! the mask correctly now we've partitioned. This is only |
---|
796 | ! necessary when testing. |
---|
797 | !WHERE ( zdta(:,:) <= 0. ) imask = 0 |
---|
798 | |
---|
799 | ! ARPDBG Quick and dirty dump to stdout in gnuplot form |
---|
800 | IF(narea == 1)THEN |
---|
801 | OPEN(UNIT=998, FILE="imask.dat", & |
---|
802 | STATUS='REPLACE', ACTION='WRITE', IOSTAT=jj) |
---|
803 | IF( jj == 0 )THEN |
---|
804 | WRITE (998,*) '# Depth map' |
---|
805 | WRITE (998,*) '# i j bathy imask ibotlevel mbathy' |
---|
806 | DO jj = 1, jpjglo, 1 |
---|
807 | DO ii = 1, jpiglo, 1 |
---|
808 | WRITE (998,"(I4,1x,I4,1x,E16.6,1x,I4,1x,I4,1x,I4)") & |
---|
809 | ii, jj, zdta(ii,jj), imask(ii,jj), ibotlevel(ii,jj), mbathy(ii,jj) |
---|
810 | END DO |
---|
811 | WRITE (998,*) |
---|
812 | END DO |
---|
813 | CLOSE(998) |
---|
814 | END IF |
---|
815 | END IF |
---|
816 | |
---|
817 | jpkm1 = jpk - 1 |
---|
818 | |
---|
819 | ! This chunk taken directly from original mpp_ini - not sure why nbondi |
---|
820 | ! is reset? However, if it isn't reset then bad things happen in dommsk |
---|
821 | ! so I'm doing what the original code does... |
---|
822 | nperio = 0 |
---|
823 | nbondi = 0 |
---|
824 | IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN |
---|
825 | IF( jpni == 1 )THEN |
---|
826 | nbondi = 2 |
---|
827 | nperio = 1 |
---|
828 | END IF |
---|
829 | END IF |
---|
830 | |
---|
831 | #if defined ARPDEBUG |
---|
832 | ! This output is REQUIRED by the check_nemo_comms.pl test script |
---|
833 | WRITE (*,FMT="(I4,' : ARPDBG: ielb, ieub, iesub = ',3I5)") narea-1,& |
---|
834 | ielb, ieub, iesub |
---|
835 | WRITE (*,FMT="(I4,' : ARPDBG: jelb, jeub, jesub = ',3I5)") narea-1,& |
---|
836 | jelb, jeub, jesub |
---|
837 | WRITE (*,FMT="(I4,' : ARPDBG: nldi, nlei, nlci = ',3I5)") narea-1, & |
---|
838 | nldi, nlei, nlci |
---|
839 | WRITE (*,FMT="(I4,' : ARPDBG: nldj, nlej, nlcj = ',3I5)") narea-1, & |
---|
840 | nldj, nlej, nlcj |
---|
841 | WRITE (*,FMT="(I4,' : ARPDBG: jpi, jpj = ',2I5)") narea-1, jpi, jpj |
---|
842 | WRITE (*,FMT="(I4,' : ARPDBG: nimpp, njmpp = ',2I5)") narea-1, & |
---|
843 | nimpp, njmpp |
---|
844 | #endif |
---|
845 | |
---|
846 | ! Debugging option - can turn off all halo exchanges by setting this to |
---|
847 | ! false. |
---|
848 | do_exchanges = .TRUE. |
---|
849 | |
---|
850 | ! Free the domzgr/_oce member arrays that we used earlier in zgr_z() and |
---|
851 | ! zgr_bat(). |
---|
852 | DEALLOCATE(gdepw_0, gdept_0, e3w_0, e3t_0, mig, mjg, & |
---|
853 | mbathy, bathy) |
---|
854 | |
---|
855 | END SUBROUTINE nemo_recursive_partition |
---|
856 | |
---|
857 | |
---|
858 | SUBROUTINE sqfact ( kn, kna, knb ) |
---|
859 | !!---------------------------------------------------------------------- |
---|
860 | !! *** ROUTINE sqfact *** |
---|
861 | !! |
---|
862 | !! ** Purpose : return factors (kna, knb) of kn, such that |
---|
863 | !! (1) kna*knb=kn |
---|
864 | !! (2) kna and knb are as near equal as possible |
---|
865 | !! (3) kna < knb |
---|
866 | !! ** Method : Search backwards from the square root of kn, |
---|
867 | !! until we find an integer that cleanly divides kn |
---|
868 | !! ** Preconditions : kn must be positive |
---|
869 | !!---------------------------------------------------------------------- |
---|
870 | INTEGER, INTENT(in ) :: kn |
---|
871 | INTEGER, INTENT( out) :: kna, knb |
---|
872 | |
---|
873 | ! Search backwards from the square root of n. |
---|
874 | |
---|
875 | fact_loop: DO kna=INT(SQRT(REAL(kn))),1,-1 |
---|
876 | IF ( kn/kna*kna == kn ) THEN |
---|
877 | EXIT fact_loop |
---|
878 | ENDIF |
---|
879 | END DO fact_loop |
---|
880 | |
---|
881 | IF( kna < 1 ) kna = 1 |
---|
882 | |
---|
883 | ! kna divides kn cleanly. Work out the other factor. |
---|
884 | knb = kn/kna |
---|
885 | |
---|
886 | END SUBROUTINE sqfact |
---|
887 | |
---|
888 | |
---|
889 | SUBROUTINE generate_fake_land(imask) |
---|
890 | !!---------------------------------------------------------------------- |
---|
891 | !! Generate a fake land mass to test the decomposition code |
---|
892 | !!---------------------------------------------------------------------- |
---|
893 | USE par_oce, ONLY: jpiglo, jpjglo |
---|
894 | USE partition_mod, ONLY: write_partition_map |
---|
895 | IMPLICIT none |
---|
896 | INTEGER, DIMENSION(jpiglo,jpjglo), INTENT(inout) :: imask |
---|
897 | ! Locals |
---|
898 | INTEGER :: jj |
---|
899 | INTEGER :: icentre, jcentre |
---|
900 | INTEGER :: iwidth, iheight |
---|
901 | INTEGER :: istart, istop |
---|
902 | |
---|
903 | ! imask is zero on land points , unity on ocean points |
---|
904 | iwidth = jpiglo/8 |
---|
905 | iheight = jpjglo/8 |
---|
906 | |
---|
907 | icentre = jpiglo/2 |
---|
908 | jcentre = jpjglo/2 |
---|
909 | |
---|
910 | istart = icentre - iwidth |
---|
911 | istop = icentre + iwidth |
---|
912 | DO jj = jcentre, jcentre - iheight, -1 |
---|
913 | imask(istart:istop,jj) = 0 |
---|
914 | istart = istart + 1 |
---|
915 | istop = istop - 1 |
---|
916 | END DO |
---|
917 | istart = icentre - iwidth |
---|
918 | istop = icentre + iwidth |
---|
919 | DO jj = jcentre+1, jcentre + iheight, 1 |
---|
920 | imask(istart:istop,jj) = 0 |
---|
921 | istart = istart + 1 |
---|
922 | istop = istop - 1 |
---|
923 | END DO |
---|
924 | |
---|
925 | ! Quick and dirty dump to stdout in gnuplot form |
---|
926 | !!$ WRITE (*,*) 'GNUPLOT MAP' |
---|
927 | !!$ DO jj = 1, jpjglo, 1 |
---|
928 | !!$ DO ii = 1, jpiglo, 1 |
---|
929 | !!$ WRITE (*,*) ii, jj, imask(ii,jj) |
---|
930 | !!$ END DO |
---|
931 | !!$ WRITE (*,*) |
---|
932 | !!$ END DO |
---|
933 | !!$ WRITE (*,*) 'END GNUPLOT MAP' |
---|
934 | |
---|
935 | END SUBROUTINE generate_fake_land |
---|
936 | |
---|
937 | !!====================================================================== |
---|
938 | END MODULE nemogcm |
---|