1 | MODULE timestep_unstructured_mod |
---|
2 | USE ISO_C_BINDING |
---|
3 | USE OMP_LIB |
---|
4 | #ifdef CPP_USING_XIOS |
---|
5 | USE xios |
---|
6 | #endif |
---|
7 | USE caldyn_unstructured_mod |
---|
8 | USE transfer_unstructured_mod |
---|
9 | IMPLICIT NONE |
---|
10 | PRIVATE |
---|
11 | SAVE |
---|
12 | |
---|
13 | #ifdef CPP_USING_XIOS |
---|
14 | TYPE(xios_context) :: ctx_hdl |
---|
15 | #endif |
---|
16 | |
---|
17 | CONTAINS |
---|
18 | |
---|
19 | #include "unstructured.h90" |
---|
20 | |
---|
21 | #define HASNAN(field) (ANY(.NOT.ABS(field)<1e20)) |
---|
22 | |
---|
23 | SUBROUTINE scalar_laplacian(s,laps) BINDC(scalar_laplacian) |
---|
24 | FIELD_MASS :: s,laps ! IN, OUT |
---|
25 | FIELD_U :: grads ! BUF |
---|
26 | !$OMP PARALLEL NUM_THREADS(nb_threads) |
---|
27 | CALL update_halo(transfer_primal, s) |
---|
28 | CALL compute_scalar_laplacian(s,grads,laps) |
---|
29 | !$OMP END PARALLEL |
---|
30 | END SUBROUTINE scalar_laplacian |
---|
31 | |
---|
32 | SUBROUTINE curl_laplacian(u,lapu) BINDC(curl_laplacian) |
---|
33 | FIELD_U :: u,lapu ! IN, OUT |
---|
34 | FIELD_Z :: qv ! BUF |
---|
35 | !$OMP PARALLEL NUM_THREADS(nb_threads) |
---|
36 | CALL update_halo(transfer_edge, u) |
---|
37 | CALL compute_curl_laplacian(u,qv,lapu) |
---|
38 | !$OMP END PARALLEL |
---|
39 | END SUBROUTINE curl_laplacian |
---|
40 | |
---|
41 | ! |
---|
42 | SUBROUTINE caldyn_unstructured(tau, mass_col,rhodz,theta_rhodz,u,geopot,w, & ! IN : flow state |
---|
43 | theta,ps,pk,hflux,qv, & ! OUT : diags (except surface geopot : IN) |
---|
44 | dmass_col,drhodz,dtheta_rhodz,du_fast,du_slow, & |
---|
45 | dPhi_fast, dPhi_slow, dW_fast, dW_slow) BINDC(caldyn_unstructured) ! OUT : tendencies |
---|
46 | NUM, VALUE :: tau |
---|
47 | FIELD_MASS :: rhodz, drhodz, pk, berni ! IN, OUT, DIAG |
---|
48 | FIELD_THETA :: theta_rhodz, dtheta_rhodz, theta ! IN, OUT, DIAG |
---|
49 | FIELD_GEOPOT :: wflux, w, geopot, & ! DIAG, INOUT |
---|
50 | dPhi_fast, dPhi_slow, dW_fast, dW_slow ! OUT |
---|
51 | FIELD_U :: u,du_fast,du_slow,hflux,qu ! INOUT,OUT,OUT,DIAG |
---|
52 | FIELD_Z :: qv ! DIAG |
---|
53 | FIELD_PS :: ps,dmass_col,mass_col ! OUT,OUT,IN (if eta_mass) or OUT,UNUSED,UNUSED (if eta_lag) |
---|
54 | ! buffers for fields that need to be shared among OpenMP threads |
---|
55 | ! vertical size is allocated as llm+1 even if only llm is needed |
---|
56 | FIELD_GEOPOT :: bufm1, bufm2, bufm3 |
---|
57 | FIELD_UL :: bufu1, bufu2, bufu3,bufu4 |
---|
58 | |
---|
59 | TIME :: time1,time2 |
---|
60 | INTEGER :: ij |
---|
61 | |
---|
62 | ! CALL CPU_TIME(time1) |
---|
63 | time1=OMP_GET_WTIME() |
---|
64 | |
---|
65 | IF(hydrostatic) THEN |
---|
66 | |
---|
67 | !$OMP PARALLEL NUM_THREADS(nb_threads) |
---|
68 | !$OMP DO SCHEDULE(STATIC) |
---|
69 | DO ij=1,edge_num |
---|
70 | du_fast(:,ij)=0. |
---|
71 | du_slow(:,ij)=0. |
---|
72 | END DO |
---|
73 | !$OMP END DO |
---|
74 | CALL compute_theta(mass_col,rhodz,theta_rhodz, theta) |
---|
75 | CALL compute_geopot(rhodz,theta, ps,pk,geopot) |
---|
76 | |
---|
77 | CALL compute_caldyn_fast(tau, pk,berni,theta,geopot, du_fast,u) |
---|
78 | |
---|
79 | CALL compute_pvort_only(rhodz,u,qv,qu) |
---|
80 | CALL compute_caldyn_slow_hydro(rhodz,theta,u, berni,hflux,du_slow) |
---|
81 | CALL compute_coriolis(hflux,theta,qu, bufu1, drhodz,dtheta_rhodz,du_slow) |
---|
82 | IF(caldyn_eta == eta_mass) THEN |
---|
83 | CALL caldyn_vert(drhodz,rhodz,theta,u, dmass_col,wflux,dtheta_rhodz,du_slow,bufu1) |
---|
84 | END IF |
---|
85 | !$OMP END PARALLEL |
---|
86 | |
---|
87 | ELSE ! NH |
---|
88 | DO ij=1,edge_num |
---|
89 | du_fast(:,ij)=0. |
---|
90 | du_slow(:,ij)=0. |
---|
91 | END DO |
---|
92 | DO ij=1,primal_num |
---|
93 | wflux(1,ij)=0. |
---|
94 | wflux(llm+1,ij)=0. |
---|
95 | END DO |
---|
96 | CALL compute_theta(mass_col,rhodz,theta_rhodz, theta) |
---|
97 | CALL compute_caldyn_solver(tau,rhodz,theta, bufm1,bufm2,bufm3, pk,geopot,W,dPhi_fast,dW_fast,du_fast) |
---|
98 | CALL compute_caldyn_fast(tau, pk,berni,theta,geopot, du_fast,u) |
---|
99 | CALL compute_pvort_only(rhodz,u,qv,qu) |
---|
100 | CALL compute_caldyn_slow_NH(u,rhodz,geopot,W, bufm1,bufm2,bufm3,bufu1,bufu2,bufu3,bufu4, hflux,du_slow,dPhi_slow,dW_slow) |
---|
101 | CALL compute_coriolis(hflux,theta,qu, bufu1, drhodz,dtheta_rhodz,du_slow) |
---|
102 | IF(caldyn_eta == eta_mass) THEN |
---|
103 | CALL caldyn_vert(drhodz,rhodz,theta,u, dmass_col,wflux,dtheta_rhodz,du_slow,bufu1) |
---|
104 | CALL compute_caldyn_vert_NH(rhodz,geopot,W,wflux, bufm1,bufm2,bufm3, du_slow,dPhi_slow,dW_slow) |
---|
105 | END IF |
---|
106 | END IF |
---|
107 | |
---|
108 | time2=OMP_GET_WTIME() |
---|
109 | ! CALL CPU_TIME(time2) |
---|
110 | IF(time2>time1) elapsed = elapsed + time2-time1 |
---|
111 | |
---|
112 | CALL print_trace() |
---|
113 | END SUBROUTINE caldyn_unstructured |
---|
114 | ! |
---|
115 | !----------------------------- Time stepping ------------------------------- |
---|
116 | |
---|
117 | ! |
---|
118 | SUBROUTINE ARK_step(nstep, mass_col,rhodz,theta_rhodz,u,geopot,w, & ! INOUT : flow state |
---|
119 | theta,ps,pk,hflux,qv, & ! OUT : diags (except surface geopot : IN) |
---|
120 | dmass_col,drhodz,dtheta_rhodz,du_fast,du_slow, & |
---|
121 | dPhi_fast, dPhi_slow, dW_fast, dW_slow) BINDC(ARK_step) ! OUT : tendencies |
---|
122 | INTEGER, VALUE :: nstep ! advance by nstep time steps |
---|
123 | FIELD_PS :: mass_col, ps ! OUT,IN (if eta_mass) or OUT,UNUSED (if eta_lag) |
---|
124 | FIELD_MASS :: rhodz, pk, berni ! IN, DIAG |
---|
125 | FIELD_THETA :: theta_rhodz, theta ! IN, DIAG |
---|
126 | FIELD_U :: u,hflux,qu ! INOUT,DIAG*2 |
---|
127 | FIELD_GEOPOT :: geopot, w, wflux ! IN, INOUT, DIAG |
---|
128 | FIELD_Z :: qv ! DIAG |
---|
129 | NUM2( primal_num, max_nb_stage) :: dmass_col ! OUT |
---|
130 | NUM3(llm, primal_num, max_nb_stage) :: drhodz ! OUT |
---|
131 | NUM4(llm, primal_num, nqdyn, max_nb_stage) :: dtheta_rhodz ! OUT |
---|
132 | NUM3(llm, edge_num, max_nb_stage) :: du_fast,du_slow ! OUT |
---|
133 | NUM3(llm+1, primal_num, max_nb_stage) :: & |
---|
134 | dPhi_fast, dPhi_slow, dW_fast, dW_slow ! OUT |
---|
135 | ! buffers for fields that need to be shared among OpenMP threads |
---|
136 | ! vertical size is allocated as llm+1 even if only llm is needed |
---|
137 | FIELD_GEOPOT :: bufm1, bufm2, bufm3 |
---|
138 | FIELD_UL :: bufu1, bufu2, bufu3,bufu4 |
---|
139 | TIME :: time1,time2 |
---|
140 | INTEGER :: step, stage, iq, ij |
---|
141 | |
---|
142 | !CALL CPU_TIME(time1) |
---|
143 | time1=OMP_GET_WTIME() |
---|
144 | |
---|
145 | !$OMP PARALLEL NUM_THREADS(nb_threads) |
---|
146 | |
---|
147 | !$OMP DO SCHEDULE(STATIC) |
---|
148 | DO ij=1,primal_num |
---|
149 | wflux(1,ij)=0. |
---|
150 | wflux(llm+1,ij)=0. |
---|
151 | END DO |
---|
152 | !$OMP END DO |
---|
153 | |
---|
154 | DO step=1, nstep |
---|
155 | DO stage=1, nb_stage |
---|
156 | |
---|
157 | CALL update_halo(transfer_primal, rhodz) |
---|
158 | DO iq=1, nqdyn |
---|
159 | CALL update_halo(transfer_primal, theta_rhodz(:,:,iq)) |
---|
160 | END DO |
---|
161 | |
---|
162 | IF(hydrostatic) THEN |
---|
163 | |
---|
164 | !$OMP DO SCHEDULE(STATIC) |
---|
165 | DO ij=1,edge_num |
---|
166 | du_fast(:,ij,stage)=0. |
---|
167 | du_slow(:,ij,stage)=0. |
---|
168 | END DO |
---|
169 | !$OMP END DO |
---|
170 | |
---|
171 | CALL compute_theta(mass_col,rhodz,theta_rhodz, theta) |
---|
172 | CALL compute_geopot(rhodz,theta, ps,pk,geopot) |
---|
173 | |
---|
174 | CALL compute_caldyn_fast(tauj(stage), pk,berni,theta,geopot, du_fast(:,:,stage), u) |
---|
175 | CALL update_halo(transfer_edge, u) |
---|
176 | CALL compute_pvort_only(rhodz,u,qv,qu) |
---|
177 | CALL update_halo(transfer_edge, qu) |
---|
178 | |
---|
179 | CALL compute_caldyn_slow_hydro(rhodz,theta,u, berni,hflux,du_slow(:,:,stage)) |
---|
180 | CALL compute_coriolis(hflux,theta,qu, bufu1, drhodz(:,:,stage), & |
---|
181 | dtheta_rhodz(:,:,:,stage),du_slow(:,:,stage)) |
---|
182 | IF(caldyn_eta == eta_mass) THEN |
---|
183 | CALL caldyn_vert(drhodz(:,:,stage),rhodz,theta,u, & |
---|
184 | dmass_col(:,stage),wflux,dtheta_rhodz(:,:,:,stage),du_slow(:,:,stage),bufu1) |
---|
185 | END IF |
---|
186 | |
---|
187 | ELSE ! NH |
---|
188 | CALL update_halo(transfer_primal, geopot) |
---|
189 | CALL update_halo(transfer_primal, W) |
---|
190 | |
---|
191 | CALL compute_theta(mass_col,rhodz,theta_rhodz, theta) |
---|
192 | CALL compute_caldyn_solver(tauj(stage),rhodz,theta, bufm1,bufm2,bufm3, pk,geopot,W, & |
---|
193 | dPhi_fast(:,:,stage), dW_fast(:,:,stage), du_fast(:,:,stage)) |
---|
194 | |
---|
195 | CALL compute_caldyn_fast(tauj(stage), pk,berni,theta,geopot, du_fast(:,:,stage),u) |
---|
196 | CALL update_halo(transfer_edge, u) |
---|
197 | CALL compute_pvort_only(rhodz,u,qv,qu) |
---|
198 | CALL update_halo(transfer_edge, qu) |
---|
199 | |
---|
200 | CALL compute_caldyn_slow_NH(u,rhodz,geopot,W, bufm1,bufm2,bufm3, & |
---|
201 | bufu1,bufu2,bufu3,bufu4, hflux, & |
---|
202 | du_slow(:,:,stage), dPhi_slow(:,:,stage), dW_slow(:,:,stage)) |
---|
203 | CALL compute_coriolis(hflux,theta,qu, bufu1, & |
---|
204 | drhodz(:,:,stage),dtheta_rhodz(:,:,:,stage), du_slow(:,:,stage)) |
---|
205 | IF(caldyn_eta == eta_mass) THEN |
---|
206 | CALL caldyn_vert(drhodz(:,:,stage),rhodz,theta,u, & |
---|
207 | dmass_col(:,stage),wflux,dtheta_rhodz(:,:,:,stage), du_slow(:,:,stage), bufu1) |
---|
208 | CALL compute_caldyn_vert_NH(rhodz,geopot,W,wflux, bufm1,bufm2,bufm3, & |
---|
209 | du_slow(:,:,stage), dPhi_slow(:,:,stage), dW_slow(:,:,stage) ) |
---|
210 | END IF |
---|
211 | END IF ! NH |
---|
212 | |
---|
213 | ! FIXME : mass_col is computed from rhodz |
---|
214 | ! so that the DOFs are the same whatever caldyn_eta |
---|
215 | ! in DYNAMICO mass_col is prognosed rather than rhodz |
---|
216 | |
---|
217 | #define UPDATE(clj,field,dfield) update(stage,SIZE(field),clj,field,dfield) |
---|
218 | |
---|
219 | CALL UPDATE(cslj, rhodz, drhodz) |
---|
220 | CALL UPDATE(cslj, theta_rhodz, dtheta_rhodz) |
---|
221 | CALL UPDATE(cslj, u, du_slow) |
---|
222 | CALL UPDATE(cflj, u, du_fast) |
---|
223 | |
---|
224 | IF(.NOT.hydrostatic) THEN |
---|
225 | CALL UPDATE(cslj, geopot, dPhi_slow) |
---|
226 | CALL UPDATE(cflj, geopot, dPhi_fast) |
---|
227 | CALL UPDATE(cslj, W, dW_slow) |
---|
228 | CALL UPDATE(cflj, W, dW_fast) |
---|
229 | END IF |
---|
230 | |
---|
231 | END DO |
---|
232 | END DO |
---|
233 | !$OMP END PARALLEL |
---|
234 | |
---|
235 | time2=OMP_GET_WTIME() |
---|
236 | ! CALL CPU_TIME(time2) |
---|
237 | IF(time2>time1) elapsed = elapsed + time2-time1 |
---|
238 | |
---|
239 | CALL print_trace() |
---|
240 | |
---|
241 | END SUBROUTINE ARK_step |
---|
242 | |
---|
243 | |
---|
244 | SUBROUTINE update(j,sz,clj,field,dfield) |
---|
245 | INTEGER :: j, sz ! stage in ARK scheme, field size |
---|
246 | NUM2(max_nb_stage,max_nb_stage) :: clj ! modified Butcher tableau |
---|
247 | NUM1(sz) :: field |
---|
248 | NUM2(sz, max_nb_stage) :: dfield |
---|
249 | ! |
---|
250 | INTEGER :: l, ij |
---|
251 | CALL enter_trace(id_update, 8*sz*(j+1) ) |
---|
252 | ! PRINT *, clj(1:j,j) |
---|
253 | SELECT CASE(j) |
---|
254 | CASE(1) |
---|
255 | !$OMP DO SCHEDULE(static) |
---|
256 | DO ij=1,sz |
---|
257 | field(ij) = field(ij) & |
---|
258 | + clj(1,j)*dfield(ij,1) |
---|
259 | END DO |
---|
260 | CASE(2) |
---|
261 | !$OMP DO SCHEDULE(static) |
---|
262 | DO ij=1,sz |
---|
263 | field(ij) = field(ij) & |
---|
264 | + clj(1,j)*dfield(ij,1) & |
---|
265 | + clj(2,j)*dfield(ij,2) |
---|
266 | END DO |
---|
267 | CASE(3) |
---|
268 | !$OMP DO SCHEDULE(static) |
---|
269 | DO ij=1,sz |
---|
270 | field(ij) = field(ij) & |
---|
271 | + clj(1,j)*dfield(ij,1) & |
---|
272 | + clj(2,j)*dfield(ij,2) & |
---|
273 | + clj(3,j)*dfield(ij,3) |
---|
274 | |
---|
275 | END DO |
---|
276 | CASE(4) |
---|
277 | !$OMP DO SCHEDULE(static) |
---|
278 | DO ij=1,sz |
---|
279 | field(ij) = field(ij) & |
---|
280 | + clj(1,j)*dfield(ij,1) & |
---|
281 | + clj(2,j)*dfield(ij,2) & |
---|
282 | + clj(3,j)*dfield(ij,3) & |
---|
283 | + clj(4,j)*dfield(ij,4) |
---|
284 | END DO |
---|
285 | CASE(5) |
---|
286 | !$OMP DO SCHEDULE(static) |
---|
287 | DO ij=1,sz |
---|
288 | field(ij) = field(ij) & |
---|
289 | + clj(1,j)*dfield(ij,1) & |
---|
290 | + clj(2,j)*dfield(ij,2) & |
---|
291 | + clj(3,j)*dfield(ij,3) & |
---|
292 | + clj(4,j)*dfield(ij,4) & |
---|
293 | + clj(5,j)*dfield(ij,5) |
---|
294 | END DO |
---|
295 | END SELECT |
---|
296 | CALL exit_trace() |
---|
297 | END SUBROUTINE update |
---|
298 | |
---|
299 | !---------------------------------------------- XIOS ---------------------------------------- |
---|
300 | |
---|
301 | #ifdef CPP_USING_XIOS |
---|
302 | |
---|
303 | SUBROUTINE setup_xios() BINDC(setup_xios) |
---|
304 | ! MPI_INIT / MPI_finalize are assumed to be called BEFORE/AFTER this routine |
---|
305 | ! This is the case when calling from Python after importing mpi4py |
---|
306 | INTEGER :: ierr, mpi_threading_mode |
---|
307 | |
---|
308 | PRINT *, 'Initialize XIOS and obtain our communicator' |
---|
309 | CALL xios_initialize("icosagcm",return_comm=comm_icosa) |
---|
310 | |
---|
311 | PRINT *, 'Initialize our XIOS context' |
---|
312 | |
---|
313 | CALL xios_context_initialize("icosagcm",comm_icosa) |
---|
314 | CALL xios_get_handle("icosagcm",ctx_hdl) |
---|
315 | CALL xios_set_current_context(ctx_hdl) |
---|
316 | END SUBROUTINE setup_xios |
---|
317 | |
---|
318 | SUBROUTINE call_xios_set_timestep(dt) BINDC(xios_set_timestep) |
---|
319 | DBL, VALUE :: dt |
---|
320 | TYPE(xios_duration) :: dtime |
---|
321 | dtime%second=dt |
---|
322 | CALL xios_set_timestep(dtime) |
---|
323 | END SUBROUTINE call_xios_set_timestep |
---|
324 | |
---|
325 | SUBROUTINE call_xios_update_calendar(step) BINDC(xios_update_calendar) |
---|
326 | INDEX, value :: step |
---|
327 | CALL xios_update_calendar(step) |
---|
328 | END SUBROUTINE call_xios_update_calendar |
---|
329 | |
---|
330 | #endif |
---|
331 | |
---|
332 | END MODULE timestep_unstructured_mod |
---|