Changeset 7508 for branches/2016/dev_r6519_HPC_4
- Timestamp:
- 2016-12-19T13:15:59+01:00 (8 years ago)
- Location:
- branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO
- Files:
-
- 91 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limdyn_2.F90
r7037 r7508 78 78 ! 79 79 ! Mean ice and snow thicknesses. 80 !$OMP PARALLEL WORKSHARE 81 hsnm(:,:) = ( 1.0 - frld(:,:) ) * hsnif(:,:) 82 hicm(:,:) = ( 1.0 - frld(:,:) ) * hicif(:,:) 83 !$OMP END PARALLEL WORKSHARE 80 !$OMP PARALLEL DO schedule(static) private(jj, ji) 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 hsnm(ji,jj) = ( 1.0 - frld(ji,jj) ) * hsnif(ji,jj) 84 hicm(ji,jj) = ( 1.0 - frld(ji,jj) ) * hicif(ji,jj) 85 END DO 86 END DO 84 87 ! 85 88 ! ! Rheology (ice dynamics) … … 172 175 SELECT CASE( cp_ice_msh ) ! ice-ocean relative velocity at u- & v-pts 173 176 CASE( 'C' ) ! EVP : C-grid ice dynamics 174 !$OMP PARALLEL WORKSHARE 175 zu_io(:,:) = u_ice(:,:) - ssu_m(:,:) ! ice-ocean & ice velocity at ocean velocity points 176 zv_io(:,:) = v_ice(:,:) - ssv_m(:,:) 177 !$OMP END PARALLEL WORKSHARE 177 !$OMP PARALLEL DO schedule(static) private(jj, ji) 178 DO jj = 1, jpj 179 DO ji = 1, jpi 180 zu_io(ji,jj) = u_ice(ji,jj) - ssu_m(ji,jj) ! ice-ocean & ice velocity at ocean velocity points 181 zv_io(ji,jj) = v_ice(ji,jj) - ssv_m(ji,jj) 182 END DO 183 END DO 178 184 CASE( 'I' ) ! VP : B-grid ice dynamics (I-point) 179 185 !$OMP PARALLEL DO schedule(static) private(jj, ji) … … 232 238 !!------------------------------------------------------------------- 233 239 INTEGER :: ios ! Local integer output status for namelist read 240 INTEGER :: ji, jj ! dummy loop indices 234 241 NAMELIST/namicedyn/ epsd, alpha, & 235 242 & dm, nbiter, nbitdr, om, resl, cw, angvg, pstar, & … … 285 292 pstarh = pstar / 2.0 286 293 ! 287 !$OMP PARALLEL WORKSHARE 288 ahiu(:,:) = ahi0 * umask(:,:,1) ! Ice eddy Diffusivity coefficients. 289 ahiv(:,:) = ahi0 * vmask(:,:,1) 290 !$OMP END PARALLEL WORKSHARE 294 !$OMP PARALLEL DO schedule(static) private(jj, ji) 295 DO jj = 1, jpj 296 DO ji = 1, jpi 297 ahiu(ji,jj) = ahi0 * umask(ji,jj,1) ! Ice eddy Diffusivity coefficients. 298 ahiv(ji,jj) = ahi0 * vmask(ji,jj,1) 299 END DO 300 END DO 291 301 ! 292 302 END SUBROUTINE lim_dyn_init_2 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limhdf_2.F90
r7037 r7508 84 84 zeps = 2._wp * epsi04 85 85 ! 86 !$OMP PARALLEL WORKSHARE 87 ztab0(:, : ) = ptab(:,:) ! Arrays initialization 88 !$OMP END PARALLEL WORKSHARE 86 !$OMP PARALLEL DO schedule(static) private(jj, ji) 87 DO jj = 1, jpj 88 DO ji = 1, jpi 89 ztab0(ji, jj ) = ptab(ji,jj) ! Arrays initialization 90 END DO 91 END DO 89 92 zdiv0(:, 1 ) = 0._wp 90 93 zdiv0(:,jpj) = 0._wp … … 151 154 IF( lk_mpp ) CALL mpp_max( zconv ) ! max over the global domain 152 155 153 !$OMP PARALLEL WORKSHARE 154 ptab(:,:) = zrlx(:,:) 155 !$OMP END PARALLEL WORKSHARE 156 !$OMP PARALLEL DO schedule(static) private(jj, ji) 157 DO jj = 1, jpj 158 DO ji = 1, jpi 159 ptab(ji,jj) = zrlx(ji,jj) 160 END DO 161 END DO 156 162 ! 157 163 END DO ! end of sub-time step loop 158 164 159 165 IF(ln_ctl) THEN 160 !$OMP PARALLEL WORKSHARE 161 zrlx(:,:) = ptab(:,:) - ztab0(:,:) 162 !$OMP END PARALLEL WORKSHARE 166 !$OMP PARALLEL DO schedule(static) private(jj, ji) 167 DO jj = 1, jpj 168 DO ji = 1, jpi 169 zrlx(ji,jj) = ptab(ji,jj) - ztab0(ji,jj) 170 END DO 171 END DO 163 172 WRITE(charout,FMT="(' lim_hdf : zconv =',D23.16, ' iter =',I4,2X)") zconv, iter 164 173 CALL prt_ctl( tab2d_1=zrlx, clinfo1=charout ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limistate_2.F90
r7037 r7508 71 71 CALL eos_fzp( tsn(:,:,1,jp_sal), tfu(:,:) ) ! freezing/melting point of sea water [Celcius] 72 72 !$OMP PARALLEL 73 !$OMP WORKSHARE 74 tfu(:,:) = tfu(:,:) * tmask(:,:,1) 75 !$OMP END WORKSHARE 73 !$OMP DO schedule(static) private(jj, ji) 74 DO jj = 1, jpj 75 DO ji = 1, jpi 76 tfu(ji,jj) = tfu(ji,jj) * tmask(ji,jj,1) 77 END DO 78 END DO 76 79 77 80 !$OMP DO schedule(static) private(jj, ji) … … 95 98 END DO 96 99 97 !$OMP WORKSHARE 98 tfu(:,:) = tfu(:,:) + rt0 ! ftu converted from Celsius to Kelvin (rt0 over land) 100 !$OMP DO schedule(static) private(jj, ji) 101 DO jj = 1, jpj 102 DO ji = 1, jpi 103 tfu(ji,jj) = tfu(ji,jj) + rt0 ! ftu converted from Celsius to Kelvin (rt0 over land) 99 104 100 sist (:,:) = tfu(:,:) 101 tbif (:,:,1) = tfu(:,:) 102 tbif (:,:,2) = tfu(:,:) 103 tbif (:,:,3) = tfu(:,:) 104 !$OMP END WORKSHARE 105 sist (ji,jj) = tfu(ji,jj) 106 tbif (ji,jj,1) = tfu(ji,jj) 107 tbif (ji,jj,2) = tfu(ji,jj) 108 tbif (ji,jj,3) = tfu(ji,jj) 109 END DO 110 END DO 105 111 106 112 !$OMP END PARALLEL 107 113 ENDIF 108 114 109 !$OMP PARALLEL WORKSHARE 110 fsbbq (:,:) = 0.e0 111 qstoif(:,:) = 0.e0 112 u_ice (:,:) = 0.e0 113 v_ice (:,:) = 0.e0 114 115 !--- Moments for advection. 116 117 sxice (:,:) = 0.e0 ; sxsn (:,:) = 0.e0 ; sxa (:,:) = 0.e0 118 syice (:,:) = 0.e0 ; sysn (:,:) = 0.e0 ; sya (:,:) = 0.e0 119 sxxice(:,:) = 0.e0 ; sxxsn(:,:) = 0.e0 ; sxxa (:,:) = 0.e0 120 syyice(:,:) = 0.e0 ; syysn(:,:) = 0.e0 ; syya (:,:) = 0.e0 121 sxyice(:,:) = 0.e0 ; sxysn(:,:) = 0.e0 ; sxya (:,:) = 0.e0 122 123 sxc0 (:,:) = 0.e0 ; sxc1 (:,:) = 0.e0 ; sxc2 (:,:) = 0.e0 124 syc0 (:,:) = 0.e0 ; syc1 (:,:) = 0.e0 ; syc2 (:,:) = 0.e0 125 sxxc0 (:,:) = 0.e0 ; sxxc1(:,:) = 0.e0 ; sxxc2(:,:) = 0.e0 126 syyc0 (:,:) = 0.e0 ; syyc1(:,:) = 0.e0 ; syyc2(:,:) = 0.e0 127 sxyc0 (:,:) = 0.e0 ; sxyc1(:,:) = 0.e0 ; sxyc2(:,:) = 0.e0 128 129 sxst (:,:) = 0.e0 130 syst (:,:) = 0.e0 131 sxxst (:,:) = 0.e0 132 syyst (:,:) = 0.e0 133 sxyst (:,:) = 0.e0 115 !$OMP PARALLEL DO schedule(static) private(jj, ji) 116 DO jj = 1, jpj 117 DO ji = 1, jpi 118 fsbbq (ji,jj) = 0.e0 119 qstoif(ji,jj) = 0.e0 120 u_ice (ji,jj) = 0.e0 121 v_ice (ji,jj) = 0.e0 122 123 !--- Moments for advection. 124 125 sxice (ji,jj) = 0.e0 ; sxsn (ji,jj) = 0.e0 ; sxa (ji,jj) = 0.e0 126 syice (ji,jj) = 0.e0 ; sysn (ji,jj) = 0.e0 ; sya (ji,jj) = 0.e0 127 sxxice(ji,jj) = 0.e0 ; sxxsn(ji,jj) = 0.e0 ; sxxa (ji,jj) = 0.e0 128 syyice(ji,jj) = 0.e0 ; syysn(ji,jj) = 0.e0 ; syya (ji,jj) = 0.e0 129 sxyice(ji,jj) = 0.e0 ; sxysn(ji,jj) = 0.e0 ; sxya (ji,jj) = 0.e0 130 131 sxc0 (ji,jj) = 0.e0 ; sxc1 (ji,jj) = 0.e0 ; sxc2 (ji,jj) = 0.e0 132 syc0 (ji,jj) = 0.e0 ; syc1 (ji,jj) = 0.e0 ; syc2 (ji,jj) = 0.e0 133 sxxc0 (ji,jj) = 0.e0 ; sxxc1(ji,jj) = 0.e0 ; sxxc2(ji,jj) = 0.e0 134 syyc0 (ji,jj) = 0.e0 ; syyc1(ji,jj) = 0.e0 ; syyc2(ji,jj) = 0.e0 135 sxyc0 (ji,jj) = 0.e0 ; sxyc1(ji,jj) = 0.e0 ; sxyc2(ji,jj) = 0.e0 136 137 sxst (ji,jj) = 0.e0 138 syst (ji,jj) = 0.e0 139 sxxst (ji,jj) = 0.e0 140 syyst (ji,jj) = 0.e0 141 sxyst (ji,jj) = 0.e0 134 142 #if ! defined key_lim2_vp 135 stress1_i (:,:) = 0._wp ! EVP rheology136 stress2_i (:,:) = 0._wp137 stress12_i(:,:) = 0._wp143 stress1_i (ji,jj) = 0._wp ! EVP rheology 144 stress2_i (ji,jj) = 0._wp 145 stress12_i(ji,jj) = 0._wp 138 146 #endif 139 !$OMP END PARALLEL WORKSHARE 147 END DO 148 END DO 140 149 141 150 !-- lateral boundary conditions … … 145 154 ! C A U T I O N frld = 1 over land and lbc_lnk put zero along 146 155 ! ************* closed boundaries herefore we force to one over land 147 !$OMP PARALLEL WORKSHARE 148 frld(:,:) = tms(:,:) * frld(:,:) + ( 1. - tms(:,:) ) 149 !$OMP END PARALLEL WORKSHARE 156 !$OMP PARALLEL DO schedule(static) private(jj, ji) 157 DO jj = 1, jpj 158 DO ji = 1, jpi 159 frld(ji,jj) = tms(ji,jj) * frld(ji,jj) + ( 1. - tms(ji,jj) ) 160 END DO 161 END DO 150 162 151 163 CALL lbc_lnk( hsnif, 'T', 1. ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limmsh_2.F90
r7037 r7508 152 152 153 153 !$OMP PARALLEL 154 !$OMP WORKSHARE 155 akappa(:,:,1,1) = 1.0 / ( 2.0 * e1t(:,:) ) 156 akappa(:,:,1,2) = zd1d2(:,:) / ( 4.0 * e1t(:,:) * e2t(:,:) ) 157 akappa(:,:,2,1) = zd2d1(:,:) / ( 4.0 * e1t(:,:) * e2t(:,:) ) 158 akappa(:,:,2,2) = 1.0 / ( 2.0 * e2t(:,:) ) 159 !$OMP END WORKSHARE NOWAIT 154 !$OMP DO schedule(static) private(jj, ji) 155 DO jj = 1, jpj 156 DO ji = 1, jpi 157 akappa(ji,jj,1,1) = 1.0 / ( 2.0 * e1t(ji,jj) ) 158 akappa(ji,jj,1,2) = zd1d2(ji,jj) / ( 4.0 * e1t(ji,jj) * e2t(ji,jj) ) 159 akappa(ji,jj,2,1) = zd2d1(ji,jj) / ( 4.0 * e1t(ji,jj) * e2t(ji,jj) ) 160 akappa(ji,jj,2,2) = 1.0 / ( 2.0 * e2t(ji,jj) ) 161 END DO 162 END DO 163 !$OMP END DO NOWAIT 160 164 161 165 ! ! weights (wght) … … 273 277 !---------------------------- 274 278 275 !$OMP PARALLEL WORKSHARE 276 tms(:,:) = tmask(:,:,1) ! ice T-point : use surface tmask 277 278 !$OMP END PARALLEL WORKSHARE 279 !$OMP PARALLEL DO schedule(static) private(jj, ji) 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 tms(ji,jj) = tmask(ji,jj,1) ! ice T-point : use surface tmask 283 284 END DO 285 END DO 279 286 #if defined key_lim2_vp 280 287 ! VP rheology : ice velocity point is I-point … … 292 299 ! EVP rheology : ice velocity point are U- & V-points ; ice vorticity 293 300 ! point is F-point 294 !$OMP PARALLEL WORKSHARE 295 tmu(:,:) = umask(:,:,1) 296 tmv(:,:) = vmask(:,:,1) 297 tmf(:,:) = 0.e0 ! used of fmask except its special value along the coast (rn_shlat) 298 WHERE( fmask(:,:,1) == 1.e0 ) tmf(:,:) = 1.e0 299 !$OMP END PARALLEL WORKSHARE 301 !$OMP PARALLEL DO schedule(static) private(jj, ji) 302 DO jj = 1, jpj 303 DO ji = 1, jpi 304 tmu(ji,jj) = umask(ji,jj,1) 305 tmv(ji,jj) = vmask(ji,jj,1) 306 tmf(ji,jj) = 0.e0 ! used of fmask except its special value along the coast (rn_shlat) 307 IF( fmask(ji,jj,1) == 1.e0 ) tmf(ji,jj) = 1.e0 308 END DO 309 END DO 300 310 #endif 301 311 ! 302 312 ! unmasked and masked area of T-grid cell 303 !$OMP PARALLEL WORKSHARE 304 area(:,:) = e1t(:,:) * e2t(:,:) 305 !$OMP END PARALLEL WORKSHARE 313 !$OMP PARALLEL DO schedule(static) private(jj, ji) 314 DO jj = 1, jpj 315 DO ji = 1, jpi 316 area(ji,jj) = e1t(ji,jj) * e2t(ji,jj) 317 END DO 318 END DO 306 319 ! 307 320 #if defined key_lim2_vp -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limsbc_2.F90
r7037 r7508 129 129 130 130 !$OMP PARALLEL 131 !$OMP WORKSHARE 132 zqnsoce(:,:) = qns(:,:) 133 !$OMP END WORKSHARE NOWAIT 131 !$OMP DO schedule(static) private(jj, ji) 132 DO jj = 1, jpj 133 DO ji = 1, jpi 134 zqnsoce(ji,jj) = qns(ji,jj) 135 END DO 136 END DO 137 !$OMP END DO NOWAIT 134 138 !$OMP DO schedule(static) private(jj,ji,zinda,ifvt,i1mfr,idfr,iflt,ial,iadv,ifral,ifrdv,zqsr,zqns,zqhc,zemp,zemp_snw,zfmm,zfsalt,zcd) 135 139 DO jj = 1, jpj … … 239 243 ! !------------------------------------------! 240 244 IF( nn_ice_embd /= 0 ) THEN ! embedded sea-ice (mass required) 241 !$OMP PARALLEL WORKSHARE 242 snwice_mass_b(:,:) = snwice_mass(:,:) ! save mass from the previous ice time step 245 !$OMP PARALLEL DO schedule(static) private(jj, ji) 246 DO jj = 1, jpj 247 DO ji = 1, jpi 248 snwice_mass_b(ji,jj) = snwice_mass(ji,jj) ! save mass from the previous ice time step 243 249 ! ! new mass per unit area 244 snwice_mass ( :,:) = tms(:,:) * ( rhosn * hsnif(:,:) + rhoic * hicif(:,:) ) * ( 1.0 - frld(:,:) )250 snwice_mass (ji,jj) = tms(ji,jj) * ( rhosn * hsnif(ji,jj) + rhoic * hicif(ji,jj) ) * ( 1.0 - frld(ji,jj) ) 245 251 ! ! time evolution of snow+ice mass 246 snwice_fmass (:,:) = ( snwice_mass(:,:) - snwice_mass_b(:,:) ) / rdt_ice 247 !$OMP END PARALLEL WORKSHARE 252 snwice_fmass (ji,jj) = ( snwice_mass(ji,jj) - snwice_mass_b(ji,jj) ) / rdt_ice 253 END DO 254 END DO 248 255 ENDIF 249 256 … … 261 268 262 269 IF( ln_cpl) THEN 263 !$OMP PARALLEL WORKSHARE 264 tn_ice(:,:,1) = sist(:,:) ! sea-ice surface temperature 265 ht_i(:,:,1) = hicif(:,:) 266 ht_s(:,:,1) = hsnif(:,:) 267 a_i(:,:,1) = fr_i(:,:) 268 !$OMP END PARALLEL WORKSHARE 270 !$OMP PARALLEL DO schedule(static) private(jj, ji) 271 DO jj = 1, jpj 272 DO ji = 1, jpi 273 tn_ice(ji,jj,1) = sist(ji,jj) ! sea-ice surface temperature 274 ht_i(ji,jj,1) = hicif(ji,jj) 275 ht_s(ji,jj,1) = hsnif(ji,jj) 276 a_i(ji,jj,1) = fr_i(ji,jj) 277 END DO 278 END DO 269 279 ! ! Computation of snow/ice and ocean albedo 270 280 CALL albedo_ice( tn_ice, ht_i, ht_s, zalbp, zalb ) 271 !$OMP PARALLEL WORKSHARE 272 alb_ice(:,:,1) = 0.5 * ( zalbp(:,:,1) + zalb (:,:,1) ) ! Ice albedo (mean clear and overcast skys) 273 !$OMP END PARALLEL WORKSHARE 281 !$OMP PARALLEL DO schedule(static) private(jj, ji) 282 DO jj = 1, jpj 283 DO ji = 1, jpi 284 alb_ice(ji,jj,1) = 0.5 * ( zalbp(ji,jj,1) + zalb (ji,jj,1) ) ! Ice albedo (mean clear and overcast skys) 285 END DO 286 END DO 274 287 IF( iom_use('icealb_cea' ) ) CALL iom_put( 'icealb_cea', alb_ice(:,:,1) * fr_i(:,:) ) ! ice albedo 275 288 ENDIF … … 355 368 CALL lbc_lnk( taum, 'T', 1. ) 356 369 ! 357 !$OMP PARALLEL WORKSHARE 358 utau_oce(:,:) = utau(:,:) !* save the air-ocean stresses at ice time-step 359 vtau_oce(:,:) = vtau(:,:) 360 !$OMP END PARALLEL WORKSHARE 370 !$OMP PARALLEL DO schedule(static) private(jj, ji) 371 DO jj = 1, jpj 372 DO ji = 1, jpi 373 utau_oce(ji,jj) = utau(ji,jj) !* save the air-ocean stresses at ice time-step 374 vtau_oce(ji,jj) = vtau(ji,jj) 375 END DO 376 END DO 361 377 ! 362 378 ENDIF … … 418 434 CALL lbc_lnk( taum, 'T', 1. ) ; CALL lbc_lnk( tmod_io, 'T', 1. ) 419 435 ! 420 !$OMP PARALLEL WORKSHARE 421 utau_oce(:,:) = utau(:,:) !* save the air-ocean stresses at ice time-step 422 vtau_oce(:,:) = vtau(:,:) 423 !$OMP END PARALLEL WORKSHARE 436 !$OMP PARALLEL DO schedule(static) private(jj, ji) 437 DO jj = 1, jpj 438 DO ji = 1, jpi 439 utau_oce(ji,jj) = utau(ji,jj) !* save the air-ocean stresses at ice time-step 440 vtau_oce(ji,jj) = vtau(ji,jj) 441 END DO 442 END DO 424 443 ! 425 444 ENDIF … … 475 494 r1_rdtice = 1._wp / rdt_ice 476 495 ! 477 !$OMP PARALLEL WORKSHARE 478 soce_0(:,:) = soce ! constant SSS and ice salinity used in levitating sea-ice case 479 sice_0(:,:) = sice 480 !$OMP END PARALLEL WORKSHARE 496 !$OMP PARALLEL DO schedule(static) private(jj, ji) 497 DO jj = 1, jpj 498 DO ji = 1, jpi 499 soce_0(ji,jj) = soce ! constant SSS and ice salinity used in levitating sea-ice case 500 sice_0(ji,jj) = sice 501 END DO 502 END DO 481 503 ! 482 504 IF( cp_cfg == "orca" ) THEN ! decrease ocean & ice reference salinities in the Baltic sea … … 489 511 ! ! embedded sea ice 490 512 IF( nn_ice_embd /= 0 ) THEN ! mass exchanges between ice and ocean (case 1 or 2) set the snow+ice mass 491 !$OMP PARALLEL WORKSHARE 492 snwice_mass (:,:) = tms(:,:) * ( rhosn * hsnif(:,:) + rhoic * hicif(:,:) ) * ( 1.0 - frld(:,:) ) 493 snwice_mass_b(:,:) = snwice_mass(:,:) 494 !$OMP END PARALLEL WORKSHARE 513 !$OMP PARALLEL DO schedule(static) private(jj, ji) 514 DO jj = 1, jpj 515 DO ji = 1, jpi 516 snwice_mass (ji,jj) = tms(ji,jj) * ( rhosn * hsnif(ji,jj) + rhoic * hicif(ji,jj) ) * ( 1.0 - frld(ji,jj) ) 517 snwice_mass_b(ji,jj) = snwice_mass(ji,jj) 518 END DO 519 END DO 495 520 ELSE 496 !$OMP PARALLEL WORKSHARE 497 snwice_mass (:,:) = 0.e0 ! no mass exchanges 498 snwice_mass_b(:,:) = 0.e0 ! no mass exchanges 499 snwice_fmass (:,:) = 0.e0 ! no mass exchanges 500 !$OMP END PARALLEL WORKSHARE 521 !$OMP PARALLEL DO schedule(static) private(jj, ji) 522 DO jj = 1, jpj 523 DO ji = 1, jpi 524 snwice_mass (ji,jj) = 0.e0 ! no mass exchanges 525 snwice_mass_b(ji,jj) = 0.e0 ! no mass exchanges 526 snwice_fmass (ji,jj) = 0.e0 ! no mass exchanges 527 END DO 528 END DO 501 529 ENDIF 502 530 IF( nn_ice_embd == 2 .AND. & ! full embedment (case 2) & no restart : 503 531 & .NOT.ln_rstart ) THEN ! deplete the initial ssh below sea-ice area 504 !$OMP PARALLEL WORKSHARE 505 sshn(:,:) = sshn(:,:) - snwice_mass(:,:) * r1_rau0 506 sshb(:,:) = sshb(:,:) - snwice_mass(:,:) * r1_rau0 507 !$OMP END PARALLEL WORKSHARE 532 !$OMP PARALLEL DO schedule(static) private(jj, ji) 533 DO jj = 1, jpj 534 DO ji = 1, jpi 535 sshn(ji,jj) = sshn(ji,jj) - snwice_mass(ji,jj) * r1_rau0 536 sshb(ji,jj) = sshb(ji,jj) - snwice_mass(ji,jj) * r1_rau0 537 END DO 538 END DO 508 539 !!gm I really don't like this staff here... Find a way to put that elsewhere or differently 509 540 !!gm … … 516 547 e3t_b(:,:,jk) = e3t_0(:,:,jk)*( 1._wp + sshb(:,:)*tmask(:,:,1)/(ht_0(:,:) + 1.0 - tmask(:,:,1)) ) 517 548 end do 518 !$OMP WORKSHARE 519 e3t_a(:,:,:) = e3t_b(:,:,:) 520 !$OMP END WORKSHARE NOWAIT 549 !$OMP DO schedule(static) private(jk, jj, ji) 550 DO jk = 1, jpk 551 DO jj = 1, jpj 552 DO ji = 1, jpi 553 e3t_a(ji,jj,jk) = e3t_b(ji,jj,jk) 554 END DO 555 END DO 556 END DO 521 557 !$OMP END PARALLEL 522 558 ! Reconstruction of all vertical scale factors at now and before time steps … … 535 571 ! ! t- and w- points depth 536 572 !$OMP PARALLEL 537 !$OMP WORKSHARE 538 gdept_n(:,:,1) = 0.5_wp * e3w_n(:,:,1) 539 gdepw_n(:,:,1) = 0.0_wp 540 gde3w_n(:,:,1) = gdept_n(:,:,1) - sshn(:,:) 541 !$OMP END WORKSHARE 573 !$OMP DO schedule(static) private(jj, ji) 574 DO jj = 1, jpj 575 DO ji = 1, jpi 576 gdept_n(ji,jj,1) = 0.5_wp * e3w_n(ji,jj,1) 577 gdepw_n(ji,jj,1) = 0.0_wp 578 gde3w_n(ji,jj,1) = gdept_n(ji,jj,1) - sshn(ji,jj) 579 END DO 580 END DO 542 581 DO jk = 2, jpk 543 582 !$OMP DO schedule(static) private(jj,ji) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limthd_2.F90
r7037 r7508 76 76 INTEGER, INTENT(in) :: kt ! number of iteration 77 77 ! 78 INTEGER :: ji, jj 78 INTEGER :: ji, jj, jk ! dummy loop indices 79 79 INTEGER :: nbpb ! nb of icy pts for thermo. cal. 80 80 INTEGER :: nbpac ! nb of pts for lateral accretion … … 121 121 122 122 !!gm needed? yes at least for some of these arrays 123 ztr_fram = 0.e0 ! sea-ice transport through Fram strait 123 124 !$OMP PARALLEL 124 !$OMP WORKSHARE 125 zdvosif(:,:) = 0.e0 ! variation of ice volume at surface 126 zdvobif(:,:) = 0.e0 ! variation of ice volume at bottom 127 zdvolif(:,:) = 0.e0 ! total variation of ice volume 128 zdvonif(:,:) = 0.e0 ! transformation of snow to sea-ice volume 129 zlicegr(:,:) = 0.e0 ! lateral variation of ice volume 130 zdvomif(:,:) = 0.e0 ! variation of ice volume at bottom due to melting only 131 ztr_fram = 0.e0 ! sea-ice transport through Fram strait 132 fstric (:,:) = 0.e0 ! part of solar radiation absorbing inside the ice 133 fscmbq (:,:) = 0.e0 ! linked with fstric 134 ffltbif(:,:) = 0.e0 ! linked with fstric 135 qfvbq (:,:) = 0.e0 ! linked with fstric 136 rdm_snw(:,:) = 0.e0 ! variation of snow mass over 1 time step 137 rdq_snw(:,:) = 0.e0 ! heat content associated with rdm_snw 138 rdm_ice(:,:) = 0.e0 ! variation of ice mass over 1 time step 139 rdq_ice(:,:) = 0.e0 ! heat content associated with rdm_ice 140 zmsk (:,:,:) = 0.e0 141 !$OMP END WORKSHARE NOWAIT 125 !$OMP DO schedule(static) private(jj,ji) 126 DO jj = 1, jpj 127 DO ji = 1, jpi 128 zdvosif(ji,jj) = 0.e0 ! variation of ice volume at surface 129 zdvobif(ji,jj) = 0.e0 ! variation of ice volume at bottom 130 zdvolif(ji,jj) = 0.e0 ! total variation of ice volume 131 zdvonif(ji,jj) = 0.e0 ! transformation of snow to sea-ice volume 132 zlicegr(ji,jj) = 0.e0 ! lateral variation of ice volume 133 zdvomif(ji,jj) = 0.e0 ! variation of ice volume at bottom due to melting only 134 fstric (ji,jj) = 0.e0 ! part of solar radiation absorbing inside the ice 135 fscmbq (ji,jj) = 0.e0 ! linked with fstric 136 ffltbif(ji,jj) = 0.e0 ! linked with fstric 137 qfvbq (ji,jj) = 0.e0 ! linked with fstric 138 rdm_snw(ji,jj) = 0.e0 ! variation of snow mass over 1 time step 139 rdq_snw(ji,jj) = 0.e0 ! heat content associated with rdm_snw 140 rdm_ice(ji,jj) = 0.e0 ! variation of ice mass over 1 time step 141 rdq_ice(ji,jj) = 0.e0 ! heat content associated with rdm_ice 142 END DO 143 END DO 144 !$OMP END DO NOWAIT 145 !$OMP DO schedule(static) private(jk,jj,ji) 146 DO jk = 1, jpk 147 DO jj = 1, jpj 148 DO ji = 1, jpi 149 zmsk (ji,jj,jk) = 0.e0 150 END DO 151 END DO 152 END DO 153 !$OMP END DO NOWAIT 142 154 143 155 ! set to zero snow thickness smaller than epsi04 … … 434 446 fr_i (ji,jj) = 1.0 - frld(ji,jj) 435 447 hicifp(ji,jj) = hicif(ji,jj) * fr_i(ji,jj) - hicifp(ji,jj) 448 ztmp(ji,jj) = 1. - pfrld(ji,jj) ! fraction of ice after the dynamic, before the thermodynamic 436 449 END DO 437 450 END DO … … 439 452 ! Outputs 440 453 !-------------------------------------------------------------------------------- 441 ztmp(:,:) = 1. - pfrld(:,:) ! fraction of ice after the dynamic, before the thermodynamic442 454 IF( iom_use('ist_cea' ) ) CALL iom_put( 'ist_cea', (sist(:,:) - rt0) * ztmp(:,:) ) ! Ice surface temperature [Celius] 443 455 IF( iom_use('qsr_ai_cea' ) ) CALL iom_put( 'qsr_ai_cea', qsr_ice(:,:,1) * ztmp(:,:) ) ! Solar flux over the ice [W/m2] -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limtrp_2.F90
r7037 r7508 89 89 # endif 90 90 91 zsm(:,:) = area(:,:) 91 !$OMP PARALLEL DO schedule(static) private(jj,ji) 92 DO jj = 1, jpj 93 DO ji = 1, jpi 94 zsm(ji,jj) = area(ji,jj) 95 END DO 96 END DO 92 97 93 98 IF( ln_limdyn ) THEN … … 110 115 ! 111 116 ELSE ! EVP rheology : C-grid sea-ice dynamics (u- & v-points ice velocity) 112 !$OMP PARALLEL WORKSHARE 113 zui_u(:,:) = u_ice(:,:) ! EVP rheology: ice (u,v) at u- and v-points 114 zvi_v(:,:) = v_ice(:,:) 115 !$OMP END PARALLEL WORKSHARE 117 !$OMP PARALLEL DO schedule(static) private(jj,ji) 118 DO jj = 1, jpj 119 DO ji = 1, jpi 120 zui_u(ji,jj) = u_ice(ji,jj) ! EVP rheology: ice (u,v) at u- and v-points 121 zvi_v(ji,jj) = v_ice(ji,jj) 122 END DO 123 END DO 116 124 ENDIF 117 125 … … 128 136 ! content of properties 129 137 ! --------------------- 130 !$OMP PARALLEL WORKSHARE 131 zs0sn (:,:) = hsnm(:,:) * area (:,:) ! Snow volume. 132 zs0ice(:,:) = hicm(:,:) * area (:,:) ! Ice volume. 133 zs0a (:,:) = ( 1.0 - frld(:,:) ) * area (:,:) ! Surface covered by ice. 134 zs0c0 (:,:) = tbif(:,:,1) / rt0_snow * zs0sn (:,:) ! Heat content of the snow layer. 135 zs0c1 (:,:) = tbif(:,:,2) / rt0_ice * zs0ice(:,:) ! Heat content of the first ice layer. 136 zs0c2 (:,:) = tbif(:,:,3) / rt0_ice * zs0ice(:,:) ! Heat content of the second ice layer. 137 zs0st (:,:) = qstoif(:,:) / xlic * zs0a (:,:) ! Heat reservoir for brine pockets. 138 !$OMP END PARALLEL WORKSHARE 138 !$OMP PARALLEL DO schedule(static) private(jj,ji) 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 zs0sn (ji,jj) = hsnm(ji,jj) * area (ji,jj) ! Snow volume. 142 zs0ice(ji,jj) = hicm(ji,jj) * area (ji,jj) ! Ice volume. 143 zs0a (ji,jj) = ( 1.0 - frld(ji,jj) ) * area (ji,jj) ! Surface covered by ice. 144 zs0c0 (ji,jj) = tbif(ji,jj,1) / rt0_snow * zs0sn (ji,jj) ! Heat content of the snow layer. 145 zs0c1 (ji,jj) = tbif(ji,jj,2) / rt0_ice * zs0ice(ji,jj) ! Heat content of the first ice layer. 146 zs0c2 (ji,jj) = tbif(ji,jj,3) / rt0_ice * zs0ice(ji,jj) ! Heat content of the second ice layer. 147 zs0st (ji,jj) = qstoif(ji,jj) / xlic * zs0a (ji,jj) ! Heat reservoir for brine pockets. 148 END DO 149 END DO 139 150 140 151 … … 184 195 !!gm Define in limmsh one for all area = 1 /area (CPU time saved !) 185 196 !$OMP PARALLEL 186 !$OMP WORKSHARE 187 zs0ice(:,:) = zs0ice(:,:) / area(:,:) 188 zs0sn (:,:) = zs0sn (:,:) / area(:,:) 189 zs0a (:,:) = zs0a (:,:) / area(:,:) 190 zs0c0 (:,:) = zs0c0 (:,:) / area(:,:) 191 zs0c1 (:,:) = zs0c1 (:,:) / area(:,:) 192 zs0c2 (:,:) = zs0c2 (:,:) / area(:,:) 193 zs0st (:,:) = zs0st (:,:) / area(:,:) 194 195 !$OMP END WORKSHARE NOWAIT 197 !$OMP DO schedule(static) private(jj,ji) 198 DO jj = 1, jpj 199 DO ji = 1, jpi 200 zs0ice(ji,jj) = zs0ice(ji,jj) / area(ji,jj) 201 zs0sn (ji,jj) = zs0sn (ji,jj) / area(ji,jj) 202 zs0a (ji,jj) = zs0a (ji,jj) / area(ji,jj) 203 zs0c0 (ji,jj) = zs0c0 (ji,jj) / area(ji,jj) 204 zs0c1 (ji,jj) = zs0c1 (ji,jj) / area(ji,jj) 205 zs0c2 (ji,jj) = zs0c2 (ji,jj) / area(ji,jj) 206 zs0st (ji,jj) = zs0st (ji,jj) / area(ji,jj) 207 END DO 208 END DO 196 209 197 210 !-------------------------------------! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limwri_2.F90
r7037 r7508 143 143 144 144 !$OMP PARALLEL 145 !$OMP WORKSHARE 146 zcmo(:,:, 1:jpnoumax ) = 0.e0 147 !$OMP END WORKSHARE 145 !$OMP DO schedule(static) private(jj, ji) 146 DO jj = 1, jpj 147 DO ji = 1, jpi 148 zcmo(ji,jj, 1:jpnoumax ) = 0.e0 149 END DO 150 END DO 148 151 !$OMP DO schedule(static) private(jj,ji,zindh,zinda,zindb,ztmu) 149 152 DO jj = 2 , jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_3/limrhg.F90
r7037 r7508 186 186 187 187 !$OMP PARALLEL 188 !$OMP WORKSHARE 189 delta_i(:,:) = 0._wp ; 190 zpresh (:,:) = 0._wp ; 191 zpreshc(:,:) = 0._wp 192 u_ice2 (:,:) = 0._wp ; v_ice1(:,:) = 0._wp 193 divu_i (:,:) = 0._wp ; zdt (:,:) = 0._wp ; zds(:,:) = 0._wp 194 shear_i(:,:) = 0._wp 195 !$OMP END WORKSHARE 188 !$OMP DO schedule(static) private(jj, ji) 189 DO jj = 1, jpj 190 DO ji = 1, jpi 191 delta_i(ji,jj) = 0._wp ; 192 zpresh (ji,jj) = 0._wp ; 193 zpreshc(ji,jj) = 0._wp 194 u_ice2 (ji,jj) = 0._wp ; v_ice1(ji,jj) = 0._wp 195 divu_i (ji,jj) = 0._wp ; zdt (ji,jj) = 0._wp ; zds(ji,jj) = 0._wp 196 shear_i(ji,jj) = 0._wp 197 END DO 198 END DO 196 199 197 200 !$OMP DO schedule(static) private(jj,ji) … … 250 253 zintb = REAL( nn_fsbc + 1 ) / REAL( nn_fsbc ) * 0.5_wp 251 254 ! 252 !$OMP PARALLEL WORKSHARE 253 zpice(:,:) = ssh_m(:,:) + ( zintn * snwice_mass(:,:) + zintb * snwice_mass_b(:,:) ) * r1_rau0 254 !$OMP END PARALLEL WORKSHARE 255 !$OMP PARALLEL DO schedule(static) private(jj, ji) 256 DO jj = 1, jpj 257 DO ji = 1, jpi 258 zpice(ji,jj) = ssh_m(ji,jj) + ( zintn * snwice_mass(ji,jj) + zintb * snwice_mass_b(ji,jj) ) * r1_rau0 259 END DO 260 END DO 255 261 ! 256 262 ELSE !== non-embedded sea ice: use ocean surface for slope calculation ==! 257 !$OMP PARALLEL WORKSHARE 258 zpice(:,:) = ssh_m(:,:) 259 !$OMP END PARALLEL WORKSHARE 263 !$OMP PARALLEL DO schedule(static) private(jj, ji) 264 DO jj = 1, jpj 265 DO ji = 1, jpi 266 zpice(ji,jj) = ssh_m(ji,jj) 267 END DO 268 END DO 260 269 ENDIF 261 270 … … 332 341 333 342 !-Initialise stress tensor 334 !$OMP PARALLEL WORKSHARE 335 zs1 (:,:) = stress1_i (:,:) 336 zs2 (:,:) = stress2_i (:,:) 337 zs12(:,:) = stress12_i(:,:) 338 !$OMP END PARALLEL WORKSHARE 343 !$OMP PARALLEL DO schedule(static) private(jj, ji) 344 DO jj = 1, jpj 345 DO ji = 1, jpi 346 zs1 (ji,jj) = stress1_i (ji,jj) 347 zs2 (ji,jj) = stress2_i (ji,jj) 348 zs12(ji,jj) = stress12_i(ji,jj) 349 END DO 350 END DO 339 351 340 352 ! !----------------------! … … 677 689 !$OMP END DO NOWAIT 678 690 ! * Store the stress tensor for the next time step 679 !$OMP WORKSHARE 680 stress1_i (:,:) = zs1 (:,:) 681 stress2_i (:,:) = zs2 (:,:) 682 stress12_i(:,:) = zs12(:,:) 683 !$OMP END WORKSHARE NOWAIT 691 !$OMP DO schedule(static) private(jj, ji) 692 DO jj = 1, jpj 693 DO ji = 1, jpi 694 stress1_i (ji,jj) = zs1 (ji,jj) 695 stress2_i (ji,jj) = zs2 (ji,jj) 696 stress12_i(ji,jj) = zs12(ji,jj) 697 END DO 698 END DO 699 !$OMP END DO NOWAIT 684 700 !$OMP END PARALLEL 685 701 ! Lateral boundary condition -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/dia25h.F90
r7037 r7508 62 62 INTEGER :: ios ! Local integer output status for namelist read 63 63 INTEGER :: ierror ! Local integer for memory allocation 64 INTEGER :: ji, jj, jk 64 65 ! 65 66 NAMELIST/nam_dia25h/ ln_dia25h … … 134 135 ! ------------------------- ! 135 136 cnt_25h = 1 ! sets the first value of sum at timestep 1 (note - should strictly be at timestep zero so before values used where possible) 136 !$OMP PARALLEL WORKSHARE 137 tn_25h(:,:,:) = tsb(:,:,:,jp_tem) 138 sn_25h(:,:,:) = tsb(:,:,:,jp_sal) 139 sshn_25h(:,:) = sshb(:,:) 140 un_25h(:,:,:) = ub(:,:,:) 141 vn_25h(:,:,:) = vb(:,:,:) 142 wn_25h(:,:,:) = wn(:,:,:) 143 avt_25h(:,:,:) = avt(:,:,:) 144 avm_25h(:,:,:) = avm(:,:,:) 137 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 138 DO jk = 1, jpk 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 tn_25h(ji,jj,jk) = tsb(ji,jj,jk,jp_tem) 142 sn_25h(ji,jj,jk) = tsb(ji,jj,jk,jp_sal) 143 sshn_25h(ji,jj) = sshb(ji,jj) 144 un_25h(ji,jj,jk) = ub(ji,jj,jk) 145 vn_25h(ji,jj,jk) = vb(ji,jj,jk) 146 wn_25h(ji,jj,jk) = wn(ji,jj,jk) 147 avt_25h(ji,jj,jk) = avt(ji,jj,jk) 148 avm_25h(ji,jj,jk) = avm(ji,jj,jk) 145 149 # if defined key_zdfgls || defined key_zdftke 146 en_25h(:,:,:) = en(:,:,:)150 en_25h(ji,jj,jk) = en(ji,jj,jk) 147 151 #endif 148 152 # if defined key_zdfgls 149 rmxln_25h(:,:,:) = mxln(:,:,:) 150 #endif 151 !$OMP END PARALLEL WORKSHARE 153 rmxln_25h(ji,jj,jk) = mxln(ji,jj,jk) 154 #endif 155 END DO 156 END DO 157 END DO 152 158 #if defined key_lim3 || defined key_lim2 153 159 CALL ctl_stop('STOP', 'dia_25h not setup yet to do tidemean ice') … … 225 231 ENDIF 226 232 227 !$OMP PARALLEL WORKSHARE 228 tn_25h(:,:,:) = tn_25h(:,:,:) + tsn(:,:,:,jp_tem) 229 sn_25h(:,:,:) = sn_25h(:,:,:) + tsn(:,:,:,jp_sal) 230 sshn_25h(:,:) = sshn_25h(:,:) + sshn (:,:) 231 un_25h(:,:,:) = un_25h(:,:,:) + un(:,:,:) 232 vn_25h(:,:,:) = vn_25h(:,:,:) + vn(:,:,:) 233 wn_25h(:,:,:) = wn_25h(:,:,:) + wn(:,:,:) 234 avt_25h(:,:,:) = avt_25h(:,:,:) + avt(:,:,:) 235 avm_25h(:,:,:) = avm_25h(:,:,:) + avm(:,:,:) 233 !$OMP PARALLEL 234 !$OMP DO schedule(static) private(jj, ji) 235 DO jj = 1, jpj 236 DO ji = 1, jpi 237 sshn_25h(ji,jj) = sshn_25h(ji,jj) + sshn (ji,jj) 238 END DO 239 END DO 240 !$OMP END DO NOWAIT 241 !$OMP DO schedule(static) private(jk, jj, ji) 242 DO jk = 1, jpk 243 DO jj = 1, jpj 244 DO ji = 1, jpi 245 tn_25h(ji,jj,jk) = tn_25h(ji,jj,jk) + tsn(ji,jj,jk,jp_tem) 246 sn_25h(ji,jj,jk) = sn_25h(ji,jj,jk) + tsn(ji,jj,jk,jp_sal) 247 un_25h(ji,jj,jk) = un_25h(ji,jj,jk) + un(ji,jj,jk) 248 vn_25h(ji,jj,jk) = vn_25h(ji,jj,jk) + vn(ji,jj,jk) 249 wn_25h(ji,jj,jk) = wn_25h(ji,jj,jk) + wn(ji,jj,jk) 250 avt_25h(ji,jj,jk) = avt_25h(ji,jj,jk) + avt(ji,jj,jk) 251 avm_25h(ji,jj,jk) = avm_25h(ji,jj,jk) + avm(ji,jj,jk) 236 252 # if defined key_zdfgls || defined key_zdftke 237 en_25h(:,:,:) = en_25h(:,:,:) + en(:,:,:)253 en_25h(ji,jj,jk) = en_25h(ji,jj,jk) + en(ji,jj,jk) 238 254 #endif 239 255 # if defined key_zdfgls 240 rmxln_25h(:,:,:) = rmxln_25h(:,:,:) + mxln(:,:,:) 241 #endif 242 !$OMP END PARALLEL WORKSHARE 256 rmxln_25h(ji,jj,jk) = rmxln_25h(ji,jj,jk) + mxln(ji,jj,jk) 257 #endif 258 END DO 259 END DO 260 END DO 261 !$OMP END PARALLEL 243 262 cnt_25h = cnt_25h + 1 244 263 … … 257 276 ENDIF 258 277 259 !$OMP PARALLEL WORKSHARE 260 tn_25h(:,:,:) = tn_25h(:,:,:) / 25.0_wp 261 sn_25h(:,:,:) = sn_25h(:,:,:) / 25.0_wp 262 sshn_25h(:,:) = sshn_25h(:,:) / 25.0_wp 263 un_25h(:,:,:) = un_25h(:,:,:) / 25.0_wp 264 vn_25h(:,:,:) = vn_25h(:,:,:) / 25.0_wp 265 wn_25h(:,:,:) = wn_25h(:,:,:) / 25.0_wp 266 avt_25h(:,:,:) = avt_25h(:,:,:) / 25.0_wp 267 avm_25h(:,:,:) = avm_25h(:,:,:) / 25.0_wp 278 !$OMP PARALLEL 279 !$OMP DO schedule(static) private(jj, ji) 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 sshn_25h(ji,jj) = sshn_25h(ji,jj) / 25.0_wp 283 END DO 284 END DO 285 !$OMP END DO NOWAIT 286 !$OMP DO schedule(static) private(jk, jj, ji) 287 DO jk = 1, jpk 288 DO jj = 1, jpj 289 DO ji = 1, jpi 290 tn_25h(ji,jj,jk) = tn_25h(ji,jj,jk) / 25.0_wp 291 sn_25h(ji,jj,jk) = sn_25h(ji,jj,jk) / 25.0_wp 292 un_25h(ji,jj,jk) = un_25h(ji,jj,jk) / 25.0_wp 293 vn_25h(ji,jj,jk) = vn_25h(ji,jj,jk) / 25.0_wp 294 wn_25h(ji,jj,jk) = wn_25h(ji,jj,jk) / 25.0_wp 295 avt_25h(ji,jj,jk) = avt_25h(ji,jj,jk) / 25.0_wp 296 avm_25h(ji,jj,jk) = avm_25h(ji,jj,jk) / 25.0_wp 268 297 # if defined key_zdfgls || defined key_zdftke 269 en_25h(:,:,:) = en_25h(:,:,:) / 25.0_wp298 en_25h(ji,jj,jk) = en_25h(ji,jj,jk) / 25.0_wp 270 299 #endif 271 300 # if defined key_zdfgls 272 rmxln_25h(:,:,:) = rmxln_25h(:,:,:) / 25.0_wp 273 #endif 274 !$OMP END PARALLEL WORKSHARE 301 rmxln_25h(ji,jj,jk) = rmxln_25h(ji,jj,jk) / 25.0_wp 302 #endif 303 END DO 304 END DO 305 END DO 306 !$OMP END PARALLEL 275 307 276 308 IF (lwp) WRITE(numout,*) 'dia_wri_tide : Mean calculated by dividing 25 hour sums and writing output' 277 309 zmdi=1.e+20 !missing data indicator for masking 278 310 ! write tracers (instantaneous) 279 !$OMP PARALLEL WORKSHARE 280 zw3d(:,:,:) = tn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 281 !$OMP END PARALLEL WORKSHARE 311 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 312 DO jk = 1, jpk 313 DO jj = 1, jpj 314 DO ji = 1, jpi 315 zw3d(ji,jj,jk) = tn_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 316 END DO 317 END DO 318 END DO 282 319 CALL iom_put("temper25h", zw3d) ! potential temperature 283 !$OMP PARALLEL WORKSHARE 284 zw3d(:,:,:) = sn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 285 !$OMP END PARALLEL WORKSHARE 320 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 321 DO jk = 1, jpk 322 DO jj = 1, jpj 323 DO ji = 1, jpi 324 zw3d(ji,jj,jk) = sn_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 325 END DO 326 END DO 327 END DO 286 328 CALL iom_put( "salin25h", zw3d ) ! salinity 287 !$OMP PARALLEL WORKSHARE 288 zw2d(:,:) = sshn_25h(:,:)*tmask(:,:,1) + zmdi*(1.0-tmask(:,:,1)) 289 !$OMP END PARALLEL WORKSHARE 329 !$OMP PARALLEL DO schedule(static) private(jj, ji) 330 DO jj = 1, jpj 331 DO ji = 1, jpi 332 zw2d(ji,jj) = sshn_25h(ji,jj)*tmask(ji,jj,1) + zmdi*(1.0-tmask(ji,jj,1)) 333 END DO 334 END DO 290 335 CALL iom_put( "ssh25h", zw2d ) ! sea surface 291 336 292 337 293 338 ! Write velocities (instantaneous) 294 !$OMP PARALLEL WORKSHARE 295 zw3d(:,:,:) = un_25h(:,:,:)*umask(:,:,:) + zmdi*(1.0-umask(:,:,:)) 296 !$OMP END PARALLEL WORKSHARE 339 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 340 DO jk = 1, jpk 341 DO jj = 1, jpj 342 DO ji = 1, jpi 343 zw3d(ji,jj,jk) = un_25h(ji,jj,jk)*umask(ji,jj,jk) + zmdi*(1.0-umask(ji,jj,jk)) 344 END DO 345 END DO 346 END DO 297 347 CALL iom_put("vozocrtx25h", zw3d) ! i-current 298 !$OMP PARALLEL WORKSHARE 299 zw3d(:,:,:) = vn_25h(:,:,:)*vmask(:,:,:) + zmdi*(1.0-vmask(:,:,:)) 300 !$OMP END PARALLEL WORKSHARE 348 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 349 DO jk = 1, jpk 350 DO jj = 1, jpj 351 DO ji = 1, jpi 352 zw3d(ji,jj,jk) = vn_25h(ji,jj,jk)*vmask(ji,jj,jk) + zmdi*(1.0-vmask(ji,jj,jk)) 353 END DO 354 END DO 355 END DO 301 356 CALL iom_put("vomecrty25h", zw3d ) ! j-current 302 303 !$OMP PARALLEL WORKSHARE 304 zw3d(:,:,:) = wn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 305 !$OMP END PARALLEL WORKSHARE 357 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 358 DO jk = 1, jpk 359 DO jj = 1, jpj 360 DO ji = 1, jpi 361 zw3d(ji,jj,jk) = wn_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 362 END DO 363 END DO 364 END DO 306 365 CALL iom_put("vomecrtz25h", zw3d ) ! k-current 307 !$OMP PARALLEL WORKSHARE 308 zw3d(:,:,:) = avt_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 309 !$OMP END PARALLEL WORKSHARE 366 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 367 DO jk = 1, jpk 368 DO jj = 1, jpj 369 DO ji = 1, jpi 370 zw3d(ji,jj,jk) = avt_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 371 END DO 372 END DO 373 END DO 310 374 CALL iom_put("avt25h", zw3d ) ! diffusivity 311 !$OMP PARALLEL WORKSHARE 312 zw3d(:,:,:) = avm_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 313 !$OMP END PARALLEL WORKSHARE 375 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 376 DO jk = 1, jpk 377 DO jj = 1, jpj 378 DO ji = 1, jpi 379 zw3d(ji,jj,jk) = avm_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 380 END DO 381 END DO 382 END DO 314 383 CALL iom_put("avm25h", zw3d) ! viscosity 315 384 #if defined key_zdftke || defined key_zdfgls 316 !$OMP PARALLEL WORKSHARE 317 zw3d(:,:,:) = en_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 318 !$OMP END PARALLEL WORKSHARE 385 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 386 DO jk = 1, jpk 387 DO jj = 1, jpj 388 DO ji = 1, jpi 389 zw3d(ji,jj,jk) = en_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 390 END DO 391 END DO 392 END DO 319 393 CALL iom_put("tke25h", zw3d) ! tke 320 394 #endif 321 395 #if defined key_zdfgls 322 !$OMP PARALLEL WORKSHARE 323 zw3d(:,:,:) = rmxln_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 324 !$OMP END PARALLEL WORKSHARE 396 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 397 DO jk = 1, jpk 398 DO jj = 1, jpj 399 DO ji = 1, jpi 400 zw3d(ji,jj,jk) = rmxln_25h(ji,jj,jk)*tmask(ji,jj,jk) + zmdi*(1.0-tmask(ji,jj,jk)) 401 END DO 402 END DO 403 END DO 325 404 CALL iom_put( "mxln25h",zw3d) 326 405 #endif 327 406 328 407 ! After the write reset the values to cnt=1 and sum values equal current value 329 !$OMP PARALLEL WORKSHARE 330 tn_25h(:,:,:) = tsn(:,:,:,jp_tem) 331 sn_25h(:,:,:) = tsn(:,:,:,jp_sal) 332 sshn_25h(:,:) = sshn (:,:) 333 un_25h(:,:,:) = un(:,:,:) 334 vn_25h(:,:,:) = vn(:,:,:) 335 wn_25h(:,:,:) = wn(:,:,:) 336 avt_25h(:,:,:) = avt(:,:,:) 337 avm_25h(:,:,:) = avm(:,:,:) 408 !$OMP PARALLEL 409 !$OMP DO schedule(static) private(jj, ji) 410 DO jj = 1, jpj 411 DO ji = 1, jpi 412 sshn_25h(ji,jj) = sshn (ji,jj) 413 END DO 414 END DO 415 !$OMP END DO NOWAIT 416 !$OMP DO schedule(static) private(jk, jj, ji) 417 DO jk = 1, jpk 418 DO jj = 1, jpj 419 DO ji = 1, jpi 420 tn_25h(ji,jj,jk) = tsn(ji,jj,jk,jp_tem) 421 sn_25h(ji,jj,jk) = tsn(ji,jj,jk,jp_sal) 422 un_25h(ji,jj,jk) = un(ji,jj,jk) 423 vn_25h(ji,jj,jk) = vn(ji,jj,jk) 424 wn_25h(ji,jj,jk) = wn(ji,jj,jk) 425 avt_25h(ji,jj,jk) = avt(ji,jj,jk) 426 avm_25h(ji,jj,jk) = avm(ji,jj,jk) 338 427 # if defined key_zdfgls || defined key_zdftke 339 en_25h(:,:,:) = en(:,:,:)428 en_25h(ji,jj,jk) = en(ji,jj,jk) 340 429 #endif 341 430 # if defined key_zdfgls 342 rmxln_25h(:,:,:) = mxln(:,:,:) 343 #endif 344 !$OMP END PARALLEL WORKSHARE 431 rmxln_25h(ji,jj,jk) = mxln(ji,jj,jk) 432 #endif 433 END DO 434 END DO 435 END DO 436 !$OMP END PARALLEL 345 437 cnt_25h = 1 346 438 IF (lwp) WRITE(numout,*) 'dia_wri_tide : After 25hr mean write, reset sum to current value and cnt_25h to one for overlapping average',cnt_25h -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diacfl.F90
r7037 r7508 173 173 !!---------------------------------------------------------------------- 174 174 175 INTEGER :: ji, jj, jk ! dummy loop indices 175 176 176 177 IF( nn_diacfl == 1 ) THEN … … 182 183 183 184 ALLOCATE( zcu_cfl(jpi, jpj, jpk), zcv_cfl(jpi, jpj, jpk), zcw_cfl(jpi, jpj, jpk) ) 184 !$OMP PARALLEL WORKSHARE 185 zcu_cfl(:,:,:)=0.0 186 zcv_cfl(:,:,:)=0.0 187 zcw_cfl(:,:,:)=0.0 188 !$OMP END PARALLEL WORKSHARE 185 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 186 DO jk = 1, jpk 187 DO jj = 1, jpj 188 DO ji = 1, jpi 189 zcu_cfl(ji,jj,jk)=0.0 190 zcv_cfl(ji,jj,jk)=0.0 191 zcw_cfl(ji,jj,jk)=0.0 192 END DO 193 END DO 194 END DO 189 195 IF( lwp ) THEN 190 196 WRITE(numout,*) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diawri.F90
r7037 r7508 182 182 IF ( iom_use("taubot") ) THEN ! bottom stress 183 183 !$OMP PARALLEL 184 !$OMP WORKSHARE 185 z2d(:,:) = 0._wp 186 !$OMP END WORKSHARE 184 !$OMP DO schedule(static) private(jj, ji) 185 DO jj = 1, jpj 186 DO ji = 1, jpi 187 z2d(ji,jj) = 0._wp 188 END DO 189 END DO 187 190 !$OMP DO schedule(static) private(jj, ji, zztmpx,zztmpy) 188 191 DO jj = 2, jpjm1 … … 232 235 ! Caution: in the VVL case, it only correponds to the baroclinic mass transport. 233 236 !$OMP PARALLEL 234 !$OMP WORKSHARE 235 z2d(:,:) = rau0 * e1e2t(:,:) 236 !$OMP END WORKSHARE 237 !$OMP DO schedule(static) private(jj, ji) 238 DO jj = 1, jpj 239 DO ji = 1, jpi 240 z2d(ji,jj) = rau0 * e1e2t(ji,jj) 241 END DO 242 END DO 237 243 !$OMP DO schedule(static) private(jk) 238 244 DO jk = 1, jpk … … 277 283 IF( iom_use("heatc") ) THEN 278 284 !$OMP PARALLEL 279 !$OMP WORKSHARE 280 z2d(:,:) = 0._wp 281 !$OMP END WORKSHARE 282 !$OMP DO schedule(static) private(jk, jj, ji) 285 !$OMP DO schedule(static) private(jj, ji) 286 DO jj = 1, jpj 287 DO ji = 1, jpi 288 z2d(ji,jj) = 0._wp 289 END DO 290 END DO 283 291 DO jk = 1, jpkm1 292 !$OMP DO schedule(static) private(jj, ji) 284 293 DO jj = 1, jpj 285 294 DO ji = 1, jpi … … 287 296 END DO 288 297 END DO 289 END DO290 298 !$OMP END DO NOWAIT 299 END DO 291 300 !$OMP END PARALLEL 292 301 CALL iom_put( "heatc", (rau0 * rcp) * z2d ) ! vertically integrated heat content (J/m2) … … 295 304 IF( iom_use("saltc") ) THEN 296 305 !$OMP PARALLEL 297 !$OMP WORKSHARE 298 z2d(:,:) = 0._wp 299 !$OMP END WORKSHARE 300 !$OMP DO schedule(static) private(jk, jj, ji) 306 !$OMP DO schedule(static) private(jj, ji) 307 DO jj = 1, jpj 308 DO ji = 1, jpi 309 z2d(ji,jj) = 0._wp 310 END DO 311 END DO 301 312 DO jk = 1, jpkm1 313 !$OMP DO schedule(static) private(jj, ji) 302 314 DO jj = 1, jpj 303 315 DO ji = 1, jpi … … 305 317 END DO 306 318 END DO 307 END DO308 319 !$OMP END DO NOWAIT 320 END DO 309 321 !$OMP END PARALLEL 310 322 CALL iom_put( "saltc", rau0 * z2d ) ! vertically integrated salt content (PSU*kg/m2) … … 313 325 IF ( iom_use("eken") ) THEN 314 326 !$OMP PARALLEL 315 !$OMP WORKSHARE 316 rke(:,:,jk) = 0._wp ! kinetic energy 317 !$OMP END WORKSHARE 327 !$OMP DO schedule(static) private(jj, ji) 328 DO jj = 1, jpj 329 DO ji = 1, jpi 330 rke(ji,jj,jk) = 0._wp ! kinetic energy 331 END DO 332 END DO 318 333 !$OMP DO schedule(static) private(jk, jj, ji, zztmp, zztmpx, zztmpy) 319 334 DO jk = 1, jpkm1 … … 344 359 IF( iom_use("u_masstr") .OR. iom_use("u_heattr") .OR. iom_use("u_salttr") ) THEN 345 360 !$OMP PARALLEL 346 !$OMP WORKSHARE 347 z3d(:,:,jpk) = 0.e0 348 !$OMP END WORKSHARE 361 !$OMP DO schedule(static) private(jj, ji) 362 DO jj = 1, jpj 363 DO ji = 1, jpi 364 z3d(ji,jj,jpk) = 0.e0 365 END DO 366 END DO 349 367 !$OMP DO schedule(static) private(jk) 350 368 DO jk = 1, jpkm1 … … 358 376 IF( iom_use("u_heattr") ) THEN 359 377 !$OMP PARALLEL 360 !$OMP WORKSHARE 361 z2d(:,:) = 0.e0 362 !$OMP END WORKSHARE 363 !$OMP DO schedule(static) private(jk, jj, ji) 378 !$OMP DO schedule(static) private(jj, ji) 379 DO jj = 1, jpj 380 DO ji = 1, jpi 381 z2d(ji,jj) = 0.e0 382 END DO 383 END DO 364 384 DO jk = 1, jpkm1 385 !$OMP DO schedule(static) private(jj, ji) 365 386 DO jj = 2, jpjm1 366 387 DO ji = fs_2, fs_jpim1 ! vector opt. … … 368 389 END DO 369 390 END DO 370 END DO371 391 !$OMP END DO NOWAIT 392 END DO 372 393 !$OMP END PARALLEL 373 394 CALL lbc_lnk( z2d, 'U', -1. ) … … 377 398 IF( iom_use("u_salttr") ) THEN 378 399 !$OMP PARALLEL 379 !$OMP WORKSHARE 380 z2d(:,:) = 0.e0 381 !$OMP END WORKSHARE 382 !$OMP DO schedule(static) private(jk, jj, ji) 400 !$OMP DO schedule(static) private(jj, ji) 401 DO jj = 1, jpj 402 DO ji = 1, jpi 403 z2d(ji,jj) = 0.e0 404 END DO 405 END DO 383 406 DO jk = 1, jpkm1 407 !$OMP DO schedule(static) private(jj, ji) 384 408 DO jj = 2, jpjm1 385 409 DO ji = fs_2, fs_jpim1 ! vector opt. … … 387 411 END DO 388 412 END DO 389 END DO390 413 !$OMP END DO NOWAIT 414 END DO 391 415 !$OMP END PARALLEL 392 416 CALL lbc_lnk( z2d, 'U', -1. ) … … 397 421 IF( iom_use("v_masstr") .OR. iom_use("v_heattr") .OR. iom_use("v_salttr") ) THEN 398 422 !$OMP PARALLEL 399 !$OMP WORKSHARE 400 z3d(:,:,jpk) = 0.e0 401 !$OMP END WORKSHARE 423 !$OMP DO schedule(static) private(jj, ji) 424 DO jj = 1, jpj 425 DO ji = 1, jpi 426 z3d(ji,jj,jpk) = 0.e0 427 END DO 428 END DO 402 429 !$OMP DO schedule(static) private(jk) 403 430 DO jk = 1, jpkm1 … … 411 438 IF( iom_use("v_heattr") ) THEN 412 439 !$OMP PARALLEL 413 !$OMP WORKSHARE 414 z2d(:,:) = 0.e0 415 !$OMP END WORKSHARE 416 !$OMP DO schedule(static) private(jk, jj, ji) 440 !$OMP DO schedule(static) private(jj, ji) 441 DO jj = 1, jpj 442 DO ji = 1, jpi 443 z2d(ji,jj) = 0.e0 444 END DO 445 END DO 417 446 DO jk = 1, jpkm1 447 !$OMP DO schedule(static) private(jj, ji) 418 448 DO jj = 2, jpjm1 419 449 DO ji = fs_2, fs_jpim1 ! vector opt. … … 421 451 END DO 422 452 END DO 423 END DO424 453 !$OMP END DO NOWAIT 454 END DO 425 455 !$OMP END PARALLEL 426 456 CALL lbc_lnk( z2d, 'V', -1. ) … … 430 460 IF( iom_use("v_salttr") ) THEN 431 461 !$OMP PARALLEL 432 !$OMP WORKSHARE 433 z2d(:,:) = 0.e0 434 !$OMP END WORKSHARE 435 !$OMP DO schedule(static) private(jk, jj, ji) 462 !$OMP DO schedule(static) private(jj, ji) 463 DO jj = 1, jpj 464 DO ji = 1, jpi 465 z2d(ji,jj) = 0.e0 466 END DO 467 END DO 436 468 DO jk = 1, jpkm1 469 !$OMP DO schedule(static) private(jj, ji) 437 470 DO jj = 2, jpjm1 438 471 DO ji = fs_2, fs_jpim1 ! vector opt. … … 440 473 END DO 441 474 END DO 442 END DO443 475 !$OMP END DO NOWAIT 476 END DO 444 477 !$OMP END PARALLEL 445 478 CALL lbc_lnk( z2d, 'V', -1. ) … … 818 851 ENDIF 819 852 IF( .NOT.ln_linssh ) THEN 820 !$OMP PARALLEL WORKSHARE 821 zw3d(:,:,:) = ( ( e3t_n(:,:,:) - e3t_0(:,:,:) ) / e3t_0(:,:,:) * 100 * tmask(:,:,:) ) ** 2 822 !$OMP END PARALLEL WORKSHARE 853 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 854 DO jk = 1, jpk 855 DO jj = 1, jpj 856 DO ji = 1, jpi 857 zw3d(ji,jj,jk) = ( ( e3t_n(ji,jj,jk) - e3t_0(ji,jj,jk) ) / e3t_0(ji,jj,jk) * 100 * tmask(ji,jj,jk) ) ** 2 858 END DO 859 END DO 860 END DO 823 861 CALL histwrite( nid_T, "vovvle3t", it, e3t_n (:,:,:) , ndim_T , ndex_T ) ! level thickness 824 862 CALL histwrite( nid_T, "vovvldep", it, gdept_n(:,:,:) , ndim_T , ndex_T ) ! t-point depth … … 832 870 ! in linear free surface case) 833 871 IF( ln_linssh ) THEN 834 !$OMP PARALLEL WORKSHARE 835 zw2d(:,:) = emp (:,:) * tsn(:,:,1,jp_tem) 836 !$OMP END PARALLEL WORKSHARE 872 !$OMP PARALLEL DO schedule(static) private(jj, ji) 873 DO jj = 1, jpj 874 DO ji = 1, jpi 875 zw2d(ji,jj) = emp (ji,jj) * tsn(ji,jj,1,jp_tem) 876 END DO 877 END DO 837 878 CALL histwrite( nid_T, "sosst_cd", it, zw2d, ndim_hT, ndex_hT ) ! c/d term on sst 838 !$OMP PARALLEL WORKSHARE 839 zw2d(:,:) = emp (:,:) * tsn(:,:,1,jp_sal) 840 !$OMP END PARALLEL WORKSHARE 879 !$OMP PARALLEL DO schedule(static) private(jj, ji) 880 DO jj = 1, jpj 881 DO ji = 1, jpi 882 zw2d(ji,jj) = emp (ji,jj) * tsn(ji,jj,1,jp_sal) 883 END DO 884 END DO 841 885 CALL histwrite( nid_T, "sosss_cd", it, zw2d, ndim_hT, ndex_hT ) ! c/d term on sss 842 886 ENDIF … … 875 919 CALL histwrite( nid_T, "sowafldp", it, erp , ndim_hT, ndex_hT ) ! freshwater flux damping 876 920 IF( ln_ssr ) THEN 877 !$OMP PARALLEL WORKSHARE 878 zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 879 !$OMP END PARALLEL WORKSHARE 921 !$OMP PARALLEL DO schedule(static) private(jj, ji) 922 DO jj = 1, jpj 923 DO ji = 1, jpi 924 zw2d(ji,jj) = erp(ji,jj) * tsn(ji,jj,1,jp_sal) * tmask(ji,jj,1) 925 END DO 926 END DO 880 927 END IF 881 928 CALL histwrite( nid_T, "sosafldp", it, zw2d , ndim_hT, ndex_hT ) ! salt flux damping … … 885 932 CALL histwrite( nid_T, "sowafldp", it, erp , ndim_hT, ndex_hT ) ! freshwater flux damping 886 933 IF( ln_ssr ) THEN 887 !$OMP PARALLEL WORKSHARE 888 zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 889 !$OMP END PARALLEL WORKSHARE 934 !$OMP PARALLEL DO schedule(static) private(jj, ji) 935 DO jj = 1, jpj 936 DO ji = 1, jpi 937 zw2d(ji,jj) = erp(ji,jj) * tsn(ji,jj,1,jp_sal) * tmask(ji,jj,1) 938 END DO 939 END DO 890 940 END IF 891 941 CALL histwrite( nid_T, "sosafldp", it, zw2d , ndim_hT, ndex_hT ) ! salt flux damping -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domain.F90
r7037 r7508 93 93 ! 94 94 !$OMP PARALLEL 95 !$OMP WORKSHARE 96 ht_0(:,:) = e3t_0(:,:,1) * tmask(:,:,1) ! Reference ocean thickness 97 hu_0(:,:) = e3u_0(:,:,1) * umask(:,:,1) 98 hv_0(:,:) = e3v_0(:,:,1) * vmask(:,:,1) 99 !$OMP END WORKSHARE 95 !$OMP DO schedule(static) private(jj,ji) 96 DO jj =1, jpj 97 DO ji=1, jpi 98 ht_0(ji,jj) = e3t_0(ji,jj,1) * tmask(ji,jj,1) ! Reference ocean thickness 99 hu_0(ji,jj) = e3u_0(ji,jj,1) * umask(ji,jj,1) 100 hv_0(ji,jj) = e3v_0(ji,jj,1) * vmask(ji,jj,1) 101 END DO 102 END DO 100 103 DO jk = 2, jpk 101 104 !$OMP DO schedule(static) private(jj,ji) … … 128 131 CALL wrk_alloc( jpi,jpj, z1_hu_0, z1_hv_0 ) 129 132 ! 130 !$OMP PARALLEL WORKSHARE 131 z1_hu_0(:,:) = ssumask(:,:) / ( hu_0(:,:) + 1._wp - ssumask(:,:) ) ! _i mask due to ISF 132 z1_hv_0(:,:) = ssvmask(:,:) / ( hv_0(:,:) + 1._wp - ssvmask(:,:) ) 133 !$OMP END PARALLEL WORKSHARE 133 !$OMP PARALLEL DO schedule(static) private(jj,ji) 134 DO jj =1, jpj 135 DO ji=1, jpi 136 z1_hu_0(ji,jj) = ssumask(ji,jj) / ( hu_0(ji,jj) + 1._wp - ssumask(ji,jj) ) ! _i mask due to ISF 137 z1_hv_0(ji,jj) = ssvmask(ji,jj) / ( hv_0(ji,jj) + 1._wp - ssvmask(ji,jj) ) 138 END DO 139 END DO 134 140 ! 135 141 ! before ! now ! after ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domhgr.F90
r7037 r7508 140 140 IF( ie1e2u_v == 0 ) THEN ! e1e2u and e1e2v have not been read: compute them 141 141 ! ! e2u and e1v does not include a reduction in some strait: apply reduction 142 !$OMP PARALLEL WORKSHARE 143 e1e2u (:,:) = e1u(:,:) * e2u(:,:) 144 e1e2v (:,:) = e1v(:,:) * e2v(:,:) 145 !$OMP END PARALLEL WORKSHARE 142 !$OMP PARALLEL DO schedule(static) private(jj, ji) 143 DO jj = 1, jpj 144 DO ji = 1, jpi 145 e1e2u (ji,jj) = e1u(ji,jj) * e2u(ji,jj) 146 e1e2v (ji,jj) = e1v(ji,jj) * e2v(ji,jj) 147 END DO 148 END DO 146 149 ENDIF 147 150 ! … … 222 225 ! Horizontal scale factors (in meters) 223 226 ! ====== 224 !$OMP WORKSHARE 225 e1t(:,:) = ppe1_m ; e2t(:,:) = ppe2_m 226 e1u(:,:) = ppe1_m ; e2u(:,:) = ppe2_m 227 e1v(:,:) = ppe1_m ; e2v(:,:) = ppe2_m 228 e1f(:,:) = ppe1_m ; e2f(:,:) = ppe2_m 229 !$OMP END WORKSHARE NOWAIT 227 !$OMP DO schedule(static) private(jj, ji) 228 DO jj = 1, jpj 229 DO ji = 1, jpi 230 e1t(ji,jj) = ppe1_m ; e2t(ji,jj) = ppe2_m 231 e1u(ji,jj) = ppe1_m ; e2u(ji,jj) = ppe2_m 232 e1v(ji,jj) = ppe1_m ; e2v(ji,jj) = ppe2_m 233 e1f(ji,jj) = ppe1_m ; e2f(ji,jj) = ppe2_m 234 END DO 235 END DO 236 !$OMP END DO NOWAIT 230 237 !$OMP END PARALLEL 231 238 ! … … 330 337 ! Horizontal scale factors (in meters) 331 338 ! ====== 332 !$OMP WORKSHARE 333 e1t(:,:) = ze1 ; e2t(:,:) = ze1 334 e1u(:,:) = ze1 ; e2u(:,:) = ze1 335 e1v(:,:) = ze1 ; e2v(:,:) = ze1 336 e1f(:,:) = ze1 ; e2f(:,:) = ze1 337 !$OMP END WORKSHARE 339 !$OMP DO schedule(static) private(jj, ji) 340 DO jj = 1, jpj 341 DO ji = 1, jpi 342 e1t(ji,jj) = ze1 ; e2t(ji,jj) = ze1 343 e1u(ji,jj) = ze1 ; e2u(ji,jj) = ze1 344 e1v(ji,jj) = ze1 ; e2v(ji,jj) = ze1 345 e1f(ji,jj) = ze1 ; e2f(ji,jj) = ze1 346 END DO 347 END DO 338 348 !$OMP END PARALLEL 339 349 ! … … 347 357 ! ----------------------------- 348 358 ! 349 !$OMP PARALLEL WORKSHARE 350 r1_e1t(:,:) = 1._wp / e1t(:,:) ; r1_e2t (:,:) = 1._wp / e2t(:,:) 351 r1_e1u(:,:) = 1._wp / e1u(:,:) ; r1_e2u (:,:) = 1._wp / e2u(:,:) 352 r1_e1v(:,:) = 1._wp / e1v(:,:) ; r1_e2v (:,:) = 1._wp / e2v(:,:) 353 r1_e1f(:,:) = 1._wp / e1f(:,:) ; r1_e2f (:,:) = 1._wp / e2f(:,:) 354 ! 355 e1e2t (:,:) = e1t(:,:) * e2t(:,:) ; r1_e1e2t(:,:) = 1._wp / e1e2t(:,:) 356 e1e2f (:,:) = e1f(:,:) * e2f(:,:) ; r1_e1e2f(:,:) = 1._wp / e1e2f(:,:) 357 !$OMP END PARALLEL WORKSHARE 359 !$OMP PARALLEL DO schedule(static) private(jj, ji) 360 DO jj = 1, jpj 361 DO ji = 1, jpi 362 r1_e1t(ji,jj) = 1._wp / e1t(ji,jj) ; r1_e2t (ji,jj) = 1._wp / e2t(ji,jj) 363 r1_e1u(ji,jj) = 1._wp / e1u(ji,jj) ; r1_e2u (ji,jj) = 1._wp / e2u(ji,jj) 364 r1_e1v(ji,jj) = 1._wp / e1v(ji,jj) ; r1_e2v (ji,jj) = 1._wp / e2v(ji,jj) 365 r1_e1f(ji,jj) = 1._wp / e1f(ji,jj) ; r1_e2f (ji,jj) = 1._wp / e2f(ji,jj) 366 ! 367 e1e2t (ji,jj) = e1t(ji,jj) * e2t(ji,jj) ; r1_e1e2t(ji,jj) = 1._wp / e1e2t(ji,jj) 368 e1e2f (ji,jj) = e1f(ji,jj) * e2f(ji,jj) ; r1_e1e2f(ji,jj) = 1._wp / e1e2f(ji,jj) 369 END DO 370 END DO 371 358 372 IF( jphgr_msh /= 0 ) THEN ! e1e2u and e1e2v have not been set: compute them 359 !$OMP PARALLEL WORKSHARE 360 e1e2u (:,:) = e1u(:,:) * e2u(:,:) 361 e1e2v (:,:) = e1v(:,:) * e2v(:,:) 362 !$OMP END PARALLEL WORKSHARE 373 !$OMP PARALLEL DO schedule(static) private(jj, ji) 374 DO jj = 1, jpj 375 DO ji = 1, jpi 376 e1e2u (ji,jj) = e1u(ji,jj) * e2u(ji,jj) 377 e1e2v (ji,jj) = e1v(ji,jj) * e2v(ji,jj) 378 END DO 379 END DO 363 380 ENDIF 364 !$OMP PARALLEL WORKSHARE 365 r1_e1e2u(:,:) = 1._wp / e1e2u(:,:) ! compute their invert in both cases 366 r1_e1e2v(:,:) = 1._wp / e1e2v(:,:) 367 ! 368 e2_e1u(:,:) = e2u(:,:) / e1u(:,:) 369 e1_e2v(:,:) = e1v(:,:) / e2v(:,:) 370 !$OMP END PARALLEL WORKSHARE 381 !$OMP PARALLEL DO schedule(static) private(jj, ji) 382 DO jj = 1, jpj 383 DO ji = 1, jpi 384 r1_e1e2u(ji,jj) = 1._wp / e1e2u(ji,jj) ! compute their invert in both cases 385 r1_e1e2v(ji,jj) = 1._wp / e1e2v(ji,jj) 386 ! 387 e2_e1u(ji,jj) = e2u(ji,jj) / e1u(ji,jj) 388 e1_e2v(ji,jj) = e1v(ji,jj) / e2v(ji,jj) 389 END DO 390 END DO 371 391 372 392 IF( lwp .AND. nn_print >=1 .AND. .NOT.ln_rstart ) THEN ! Control print : Grid informations (if not restart) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/dommsk.F90
r7037 r7508 158 158 159 159 ! (ISF) define barotropic mask and mask the ice shelf point 160 !$OMP WORKSHARE 161 ssmask(:,:)=tmask(:,:,1) ! at this stage ice shelf is not masked 162 !$OMP END WORKSHARE 160 !$OMP DO schedule(static) private(jj, ji) 161 DO jj = 1, jpj 162 DO ji = 1, jpi 163 ssmask(ji,jj)=tmask(ji,jj,1) ! at this stage ice shelf is not masked 164 END DO 165 END DO 163 166 !$OMP DO schedule(static) private(jk, jj, ji) 164 167 DO jk = 1, jpk … … 174 177 ! Interior domain mask (used for global sum) 175 178 ! -------------------- 176 !$OMP WORKSHARE 177 tmask_i(:,:) = ssmask(:,:) ! (ISH) tmask_i = 1 even on the ice shelf 178 179 tmask_h(:,:) = 1._wp ! 0 on the halo and 1 elsewhere 180 !$OMP END WORKSHARE NOWAIT 179 !$OMP DO schedule(static) private(jj, ji) 180 DO jj = 1, jpj 181 DO ji = 1, jpi 182 tmask_i(ji,jj) = ssmask(ji,jj) ! (ISH) tmask_i = 1 even on the ice shelf 183 tmask_h(ji,jj) = 1._wp ! 0 on the halo and 1 elsewhere 184 END DO 185 END DO 181 186 !$OMP END PARALLEL 182 187 iif = jpreci ! ??? … … 204 209 ENDIF 205 210 206 tmask_i(:,:) = tmask_i(:,:) * tmask_h(:,:) 211 !$OMP PARALLEL DO schedule(static) private(jj, ji) 212 DO jj = 1, jpj 213 DO ji = 1, jpi 214 tmask_i(ji,jj) = tmask_i(ji,jj) * tmask_h(ji,jj) 215 END DO 216 END DO 207 217 208 218 IF( jperio == 5 .OR. jperio == 6 ) THEN ! F-point pivot … … 251 261 !---------------------------------------------- 252 262 !$OMP PARALLEL 253 !$OMP WORKSHARE 254 wmask (:,:,1) = tmask(:,:,1) ! surface 255 wumask(:,:,1) = umask(:,:,1) 256 wvmask(:,:,1) = vmask(:,:,1) 257 !$OMP END WORKSHARE 263 !$OMP DO schedule(static) private(jj, ji) 264 DO jj = 1, jpj 265 DO ji = 1, jpi 266 wmask (ji,jj,1) = tmask(ji,jj,1) ! surface 267 wumask(ji,jj,1) = umask(ji,jj,1) 268 wvmask(ji,jj,1) = vmask(ji,jj,1) 269 END DO 270 END DO 258 271 !$OMP DO schedule(static) private(jk) 259 272 DO jk = 2, jpk ! interior values -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domngb.F90
r6748 r7508 42 42 CHARACTER(len=1), INTENT(in ) :: cdgrid ! grid name 'T', 'U', 'V', 'W' 43 43 ! 44 INTEGER :: ik ! working level44 INTEGER :: ik, jj, ji ! working level 45 45 INTEGER , DIMENSION(2) :: iloc 46 46 REAL(wp) :: zlon, zmini … … 52 52 CALL wrk_alloc( jpi,jpj, zglam, zgphi, zmask, zdist ) 53 53 ! 54 zmask(:,:) = 0._wp 54 !$OMP PARALLEL DO schedule(static) private(jj, ji) 55 DO jj = 1, jpj 56 DO ji = 1, jpi 57 zmask(ji,jj) = 0._wp 58 END DO 59 END DO 55 60 ik = 1 56 61 IF ( PRESENT(kkk) ) ik=kkk 57 62 SELECT CASE( cdgrid ) 58 63 CASE( 'U' ) 59 !$OMP PARALLEL WORKSHARE 60 zglam(:,:) = glamu(:,:) ; zgphi(:,:) = gphiu(:,:) ; zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 61 !$OMP END PARALLEL WORKSHARE 64 !$OMP PARALLEL DO schedule(static) private(jj, ji) 65 DO jj = 1, jpj 66 DO ji = 1, jpi 67 zglam(ji,jj) = glamu(ji,jj) ; zgphi(ji,jj) = gphiu(ji,jj) 68 END DO 69 END DO 70 zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 62 71 CASE( 'V' ) 63 !$OMP PARALLEL WORKSHARE 64 zglam(:,:) = glamv(:,:) ; zgphi(:,:) = gphiv(:,:) ; zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 65 !$OMP END PARALLEL WORKSHARE 72 !$OMP PARALLEL DO schedule(static) private(jj, ji) 73 DO jj = 1, jpj 74 DO ji = 1, jpi 75 zglam(ji,jj) = glamv(ji,jj) ; zgphi(ji,jj) = gphiv(ji,jj) 76 END DO 77 END DO 78 zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 66 79 CASE( 'F' ) 67 !$OMP PARALLEL WORKSHARE 68 zglam(:,:) = glamf(:,:) ; zgphi(:,:) = gphif(:,:) ; zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 69 !$OMP END PARALLEL WORKSHARE 80 !$OMP PARALLEL DO schedule(static) private(jj, ji) 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 zglam(ji,jj) = glamf(ji,jj) ; zgphi(ji,jj) = gphif(ji,jj) 84 END DO 85 END DO 86 zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 70 87 CASE DEFAULT 71 !$OMP PARALLEL WORKSHARE 72 zglam(:,:) = glamt(:,:) ; zgphi(:,:) = gphit(:,:) ; zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 73 !$OMP END PARALLEL WORKSHARE 88 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 DO jj = 1, jpj 90 DO ji = 1, jpi 91 zglam(ji,jj) = glamt(ji,jj) ; zgphi(ji,jj) = gphit(ji,jj) 92 END DO 93 END DO 94 zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 74 95 END SELECT 75 96 76 97 IF (jphgr_msh /= 2 .AND. jphgr_msh /= 3) THEN 77 98 zlon = MOD( plon + 720., 360. ) ! plon between 0 and 360 78 zglam(:,:) = MOD( zglam(:,:) + 720., 360. ) ! glam between 0 and 360 79 IF( zlon > 270. ) zlon = zlon - 360. ! zlon between -90 and 270 80 IF( zlon < 90. ) WHERE( zglam(:,:) > 180. ) zglam(:,:) = zglam(:,:) - 360. ! glam between -180 and 180 81 zglam(:,:) = zglam(:,:) - zlon 99 !$OMP PARALLEL DO schedule(static) private(jj, ji, zglam, zlon) 100 DO jj = 1, jpj 101 DO ji = 1, jpi 102 zglam(ji,jj) = MOD( zglam(ji,jj) + 720., 360. ) ! glam between 0 and 360 103 IF( zlon > 270. ) zlon = zlon - 360. ! zlon between -90 and 270 104 IF( zlon < 90. ) THEN 105 IF( zglam(ji,jj) > 180. ) zglam(ji,jj) = zglam(ji,jj) - 360. ! glam between -180 and 180 106 END IF 107 zglam(ji,jj) = zglam(ji,jj) - zlon 108 END DO 109 END DO 82 110 ELSE 83 zglam(:,:) = zglam(:,:) - plon 111 !$OMP PARALLEL DO schedule(static) private(jj, ji) 112 DO jj = 1, jpj 113 DO ji = 1, jpi 114 zglam(ji,jj) = zglam(ji,jj) - plon 115 END DO 116 END DO 84 117 END IF 85 !$OMP PARALLEL WORKSHARE 86 zgphi(:,:) = zgphi(:,:) - plat 87 zdist(:,:) = zglam(:,:) * zglam(:,:) + zgphi(:,:) * zgphi(:,:) 88 !$OMP END PARALLEL WORKSHARE 118 !$OMP PARALLEL DO schedule(static) private(jj, ji) 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 zgphi(ji,jj) = zgphi(ji,jj) - plat 122 zdist(ji,jj) = zglam(ji,jj) * zglam(ji,jj) + zgphi(ji,jj) * zgphi(ji,jj) 123 END DO 124 END DO 89 125 IF( lk_mpp ) THEN 90 126 CALL mpp_minloc( zdist(:,:), zmask, zmini, kii, kjj) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domzgr.F90
r7037 r7508 103 103 INTEGER :: ioptio, ibat ! local integer 104 104 INTEGER :: ios 105 INTEGER :: jj, ji ! dummy loop indices 105 106 ! 106 107 NAMELIST/namzgr/ ln_zco, ln_zps, ln_sco, ln_isfcav, ln_linssh … … 161 162 IF( lk_c1d ) THEN ! 1D config.: same mbathy value over the 3x3 domain 162 163 ibat = mbathy(2,2) 163 mbathy(:,:) = ibat 164 !$OMP PARALLEL DO schedule(static) private(jj, ji) 165 DO jj = 1, jpj 166 DO ji = 1, jpi 167 mbathy(ji,jj) = ibat 168 END DO 169 END DO 164 170 END IF 165 171 ! … … 419 425 IF( rn_bathy > 0.01 ) THEN 420 426 IF(lwp) WRITE(numout,*) ' Depth = rn_bathy read in namelist' 421 !$OMP PARALLEL WORKSHARE 422 zdta(:,:) = rn_bathy 423 !$OMP END PARALLEL WORKSHARE 427 !$OMP PARALLEL DO schedule(static) private(jj, ji) 428 DO jj = 1, jpjdta 429 DO ji = 1, jpidta 430 zdta(ji,jj) = rn_bathy 431 END DO 432 END DO 424 433 IF( ln_sco ) THEN ! s-coordinate (zsc ): idta()=jpk 425 !$OMP PARALLEL WORKSHARE 426 idta(:,:) = jpkm1 427 !$OMP END PARALLEL WORKSHARE 434 !$OMP PARALLEL DO schedule(static) private(jj, ji) 435 DO jj = 1, jpjdta 436 DO ji = 1, jpidta 437 idta(ji,jj) = jpkm1 438 END DO 439 END DO 428 440 ELSE ! z-coordinate (zco or zps): step-like topography 429 441 !$OMP PARALLEL 430 !$OMP WORKSHARE 431 idta(:,:) = jpkm1 432 !$OMP END WORKSHARE 442 !$OMP DO schedule(static) private(jj, ji) 443 DO jj = 1, jpjdta 444 DO ji = 1, jpidta 445 idta(ji,jj) = jpkm1 446 END DO 447 END DO 433 448 DO jk = 1, jpkm1 434 !$OMP WORKSHARE 435 WHERE( gdept_1d(jk) < zdta(:,:) .AND. zdta(:,:) <= gdept_1d(jk+1) ) idta(:,:) = jk 436 !$OMP END WORKSHARE 449 !$OMP DO schedule(static) private(jj, ji) 450 DO jj = 1, jpjdta 451 DO ji = 1, jpidta 452 IF ( gdept_1d(jk) < zdta(ji,jj) .AND. zdta(ji,jj) <= gdept_1d(jk+1) ) idta(ji,jj) = jk 453 END DO 454 END DO 437 455 END DO 438 456 !$OMP END PARALLEL … … 440 458 ELSE 441 459 IF(lwp) WRITE(numout,*) ' Depth = depthw(jpkm1)' 442 !$OMP PARALLEL WORKSHARE 443 idta(:,:) = jpkm1 ! before last level 444 zdta(:,:) = gdepw_1d(jpk) ! last w-point depth 445 !$OMP END PARALLEL WORKSHARE 460 !$OMP PARALLEL DO schedule(static) private(jj, ji) 461 DO jj = 1, jpjdta 462 DO ji = 1, jpidta 463 idta(ji,jj) = jpkm1 ! before last level 464 zdta(ji,jj) = gdepw_1d(jpk) ! last w-point depth 465 END DO 466 END DO 446 467 h_oce = gdepw_1d(jpk) 447 468 ENDIF … … 470 491 ! ! idta : 471 492 IF( ln_sco ) THEN ! s-coordinate (zsc ): idta()=jpk 472 !$OMP PARALLEL WORKSHARE 473 idta(:,:) = jpkm1 474 !$OMP END PARALLEL WORKSHARE 493 !$OMP PARALLEL DO schedule(static) private(jj, ji) 494 DO jj = 1, jpjdta 495 DO ji = 1, jpidta 496 idta(ji,jj) = jpkm1 497 END DO 498 END DO 475 499 ELSE ! z-coordinate (zco or zps): step-like topography 476 !$OMP PARALLEL WORKSHARE 477 idta(:,:) = jpkm1 478 !$OMP END PARALLEL WORKSHARE 500 !$OMP PARALLEL DO schedule(static) private(jj, ji) 501 DO jj = 1, jpjdta 502 DO ji = 1, jpidta 503 idta(ji,jj) = jpkm1 504 END DO 505 END DO 479 506 DO jk = 1, jpkm1 480 WHERE( gdept_1d(jk) < zdta(:,:) .AND. zdta(:,:) <= gdept_1d(jk+1) ) idta(:,:) = jk 507 !$OMP PARALLEL DO schedule(static) private(jj, ji) 508 DO jj = 1, jpjdta 509 DO ji = 1, jpidta 510 IF( gdept_1d(jk) < zdta(ji,jj) .AND. zdta(ji,jj) <= gdept_1d(jk+1) ) idta(ji,jj) = jk 511 END DO 512 END DO 481 513 END DO 482 514 ENDIF … … 485 517 ! ! Caution : idta on the global domain: use of jperio, not nperio 486 518 IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN 487 !$OMP PARALLEL WORKSHARE488 519 idta( : , 1 ) = -1 ; zdta( : , 1 ) = -1._wp 489 520 idta( : ,jpjdta) = 0 ; zdta( : ,jpjdta) = 0._wp 490 !$OMP END PARALLEL WORKSHARE491 521 ELSEIF( jperio == 2 ) THEN 492 !$OMP PARALLEL WORKSHARE493 522 idta( : , 1 ) = idta( : , 3 ) ; zdta( : , 1 ) = zdta( : , 3 ) 494 523 idta( : ,jpjdta) = 0 ; zdta( : ,jpjdta) = 0._wp 495 524 idta( 1 , : ) = 0 ; zdta( 1 , : ) = 0._wp 496 525 idta(jpidta, : ) = 0 ; zdta(jpidta, : ) = 0._wp 497 !$OMP END PARALLEL WORKSHARE498 526 ELSE 499 527 ih = 0 ; zh = 0._wp 500 528 IF( ln_sco ) ih = jpkm1 ; IF( ln_sco ) zh = h_oce 501 !$OMP PARALLEL WORKSHARE502 529 idta( : , 1 ) = ih ; zdta( : , 1 ) = zh 503 530 idta( : ,jpjdta) = ih ; zdta( : ,jpjdta) = zh 504 531 idta( 1 , : ) = ih ; zdta( 1 , : ) = zh 505 532 idta(jpidta, : ) = ih ; zdta(jpidta, : ) = zh 506 !$OMP END PARALLEL WORKSHARE507 533 ENDIF 508 534 509 535 ! ! local domain level and meter bathymetries (mbathy,bathy) 510 536 !$OMP PARALLEL 511 !$OMP WORKSHARE 512 mbathy(:,:) = 0 ! set to zero extra halo points 513 bathy (:,:) = 0._wp ! (require for mpp case) 514 !$OMP END WORKSHARE 537 !$OMP DO schedule(static) private(jj, ji) 538 DO jj = 1, jpj 539 DO ji = 1, jpi 540 mbathy(ji,jj) = 0 ! set to zero extra halo points 541 bathy (ji,jj) = 0._wp ! (require for mpp case) 542 END DO 543 END DO 515 544 !$OMP DO schedule(static) private(jj, ji) 516 545 DO jj = 1, nlcj ! interior values … … 521 550 END DO 522 551 !$OMP END DO NOWAIT 523 !$OMP WORKSHARE 524 risfdep(:,:)=0.e0 525 misfdep(:,:)=1 526 !$OMP END WORKSHARE NOWAIT 552 !$OMP DO schedule(static) private(jj, ji) 553 DO jj = 1, jpj 554 DO ji = 1, jpi 555 risfdep(ji,jj)=0.e0 556 misfdep(ji,jj)=1 557 END DO 558 END DO 527 559 !$OMP END PARALLEL 528 560 ! … … 537 569 CALL iom_get ( inum, jpdom_data, 'Bathy_level', bathy ) 538 570 CALL iom_close( inum ) 539 mbathy(:,:) = INT( bathy(:,:) ) 540 ! initialisation isf variables 541 !$OMP PARALLEL WORKSHARE 542 risfdep(:,:)=0._wp ; misfdep(:,:)=1 543 !$OMP END PARALLEL WORKSHARE 571 !$OMP PARALLEL DO schedule(static) private(jj, ji) 572 DO jj = 1, jpj 573 DO ji = 1, jpi 574 mbathy(ji,jj) = INT( bathy(ji,jj) ) ! initialisation isf variables 575 risfdep(ji,jj)=0._wp ; misfdep(ji,jj)=1 576 END DO 577 END DO 544 578 ! ! ===================== 545 579 IF( cp_cfg == "orca" .AND. jp_cfg == 2 ) THEN ! ORCA R2 configuration … … 579 613 ! 580 614 ! initialisation isf variables 581 !$OMP PARALLEL WORKSHARE 582 risfdep(:,:)=0._wp ; misfdep(:,:)=1 583 !$OMP END PARALLEL WORKSHARE 615 !$OMP PARALLEL DO schedule(static) private(jj, ji) 616 DO jj = 1, jpj 617 DO ji = 1, jpi 618 risfdep(ji,jj)=0._wp ; misfdep(ji,jj)=1 619 END DO 620 END DO 584 621 ! 585 622 IF ( ln_isfcav ) THEN … … 587 624 CALL iom_get ( inum, jpdom_data, 'isf_draft', risfdep ) 588 625 CALL iom_close( inum ) 589 WHERE( bathy(:,:) <= 0._wp ) risfdep(:,:) = 0._wp 590 591 ! set grounded point to 0 592 ! (a treshold could be set here if needed, or set it offline based on the grounded fraction) 593 WHERE ( bathy(:,:) <= risfdep(:,:) + rn_isfhmin ) 594 misfdep(:,:) = 0 ; risfdep(:,:) = 0._wp 595 mbathy (:,:) = 0 ; bathy (:,:) = 0._wp 596 END WHERE 626 !$OMP PARALLEL DO schedule(static) private(jj, ji) 627 DO jj = 1, jpj 628 DO ji = 1, jpi 629 IF( bathy(ji,jj) <= 0._wp ) risfdep(ji,jj) = 0._wp 630 ! set grounded point to 0 631 ! (a treshold could be set here if needed, or set it offline based on the grounded fraction) 632 IF ( bathy(ji,jj) <= risfdep(ji,jj) + rn_isfhmin ) THEN 633 misfdep(ji,jj) = 0 ; risfdep(ji,jj) = 0._wp 634 mbathy (ji,jj) = 0 ; bathy (ji,jj) = 0._wp 635 END IF 636 END DO 637 END DO 597 638 END IF 598 639 ! … … 601 642 ii0 = 140 ; ii1 = 140 ! Gibraltar Strait open 602 643 ij0 = 102 ; ij1 = 102 ! (Thomson, Ocean Modelling, 1995) 603 DO ji = mi0(ii0), mi1(ii1) 604 DO jj = mj0(ij0), mj1(ij1) 644 !$OMP PARALLEL DO schedule(static) private(jj, ji) 645 DO jj = mj0(ij0), mj1(ij1) 646 DO ji = mi0(ii0), mi1(ii1) 605 647 bathy(ji,jj) = 284._wp 606 648 END DO … … 611 653 ii0 = 160 ; ii1 = 160 ! Bab el mandeb Strait open 612 654 ij0 = 88 ; ij1 = 88 ! (Thomson, Ocean Modelling, 1995) 613 DO ji = mi0(ii0), mi1(ii1) 614 DO jj = mj0(ij0), mj1(ij1) 655 !$OMP PARALLEL DO schedule(static) private(jj, ji) 656 DO jj = mj0(ij0), mj1(ij1) 657 DO ji = mi0(ii0), mi1(ii1) 615 658 bathy(ji,jj) = 137._wp 616 659 END DO 617 660 END DO 618 661 IF(lwp) WRITE(numout,*) 619 662 IF(lwp) WRITE(numout,*) ' orca_r2: Bab el Mandeb strait open at i=',ii0,' j=',ij0 620 663 ! 621 664 ENDIF … … 636 679 ENDIF 637 680 zhmin = gdepw_1d(ik+1) ! minimum depth = ik+1 w-levels 638 WHERE( bathy(:,:) <= 0._wp ) ; bathy(:,:) = 0._wp ! min=0 over the lands 639 ELSE WHERE ; bathy(:,:) = MAX( zhmin , bathy(:,:) ) ! min=zhmin over the oceans 640 END WHERE 681 !$OMP PARALLEL DO schedule(static) private(jj, ji) 682 DO jj = 1, jpj 683 DO ji = 1, jpi 684 IF( bathy(ji,jj) <= 0._wp ) THEN 685 bathy(ji,jj) = 0._wp ! min=0 over the lands 686 ELSE 687 bathy(ji,jj) = MAX( zhmin , bathy(ji,jj) ) ! min=zhmin over the oceans 688 END IF 689 END DO 690 END DO 641 691 IF(lwp) write(numout,*) 'Minimum ocean depth: ', zhmin, ' minimum number of ocean levels : ', ik 642 692 ENDIF … … 763 813 ENDIF 764 814 IF( lk_mpp ) THEN 765 zbathy(:,:) = FLOAT( mbathy(:,:) ) 815 !$OMP PARALLEL DO schedule(static) private(jj, ji) 816 DO jj = 1, jpj 817 DO ji = 1, jpi 818 zbathy(ji,jj) = FLOAT( mbathy(ji,jj) ) 819 END DO 820 END DO 766 821 CALL lbc_lnk( zbathy, 'T', 1._wp ) 767 mbathy(:,:) = INT( zbathy(:,:) ) 822 !$OMP PARALLEL DO schedule(static) private(jj, ji) 823 DO jj = 1, jpj 824 DO ji = 1, jpi 825 mbathy(ji,jj) = INT( zbathy(ji,jj) ) 826 END DO 827 END DO 768 828 ENDIF 769 829 ! ! East-west cyclic boundary conditions … … 801 861 !!gm !!bug ??? think about it ! 802 862 ! ... mono- or macro-tasking: T-point, >0, 2D array, no slab 803 zbathy(:,:) = FLOAT( mbathy(:,:) ) 863 !$OMP PARALLEL DO schedule(static) private(jj, ji) 864 DO jj = 1, jpj 865 DO ji = 1, jpi 866 zbathy(ji,jj) = FLOAT( mbathy(ji,jj) ) 867 END DO 868 END DO 804 869 CALL lbc_lnk( zbathy, 'T', 1._wp ) 805 mbathy(:,:) = INT( zbathy(:,:) ) 870 !$OMP PARALLEL DO schedule(static) private(jj, ji) 871 DO jj = 1, jpj 872 DO ji = 1, jpi 873 mbathy(ji,jj) = INT( zbathy(ji,jj) ) 874 END DO 875 END DO 806 876 ENDIF 807 877 ! Number of ocean level inferior or equal to jpkm1 808 878 ikmax = 0 879 !$OMP PARALLEL DO schedule(static) private(jj, ji) reduction(MAX:ikmax) 809 880 DO jj = 1, jpj 810 881 DO ji = 1, jpi … … 852 923 IF(lwp) WRITE(numout,*) ' ~~~~~~~~~~~~~' 853 924 ! 854 mbkt(:,:) = MAX( mbathy(:,:) , 1 ) ! bottom k-index of T-level (=1 over land) 855 925 !$OMP PARALLEL 926 !$OMP DO schedule(static) private(jj, ji) 927 DO jj = 1, jpj 928 DO ji = 1, jpi 929 mbkt(ji,jj) = MAX( mbathy(ji,jj) , 1 ) ! bottom k-index of T-level (=1 over land) 930 END DO 931 END DO 856 932 ! ! bottom k-index of W-level = mbkt+1 933 !$OMP DO schedule(static) private(jj, ji) 857 934 DO jj = 1, jpjm1 ! bottom k-index of u- (v-) level 858 935 DO ji = 1, jpim1 … … 862 939 END DO 863 940 ! converte into REAL to use lbc_lnk ; impose a min value of 1 as a zero can be set in lbclnk 864 zmbk(:,:) = REAL( mbku(:,:), wp ) ; CALL lbc_lnk(zmbk,'U',1.) ; mbku (:,:) = MAX( INT( zmbk(:,:) ), 1 ) 865 zmbk(:,:) = REAL( mbkv(:,:), wp ) ; CALL lbc_lnk(zmbk,'V',1.) ; mbkv (:,:) = MAX( INT( zmbk(:,:) ), 1 ) 941 !$OMP DO schedule(static) private(jj, ji) 942 DO jj = 1, jpj 943 DO ji = 1, jpi 944 zmbk(ji,jj) = REAL( mbku(ji,jj), wp ) 945 END DO 946 END DO 947 !$OMP END PARALLEL 948 CALL lbc_lnk(zmbk,'U',1.) 949 !$OMP PARALLEL DO schedule(static) private(jj, ji) 950 DO jj = 1, jpj 951 DO ji = 1, jpi 952 mbku (ji,jj) = MAX( INT( zmbk(ji,jj) ), 1 ) 953 zmbk(ji,jj) = REAL( mbkv(ji,jj), wp ) 954 END DO 955 END DO 956 CALL lbc_lnk(zmbk,'V',1.) 957 !$OMP PARALLEL DO schedule(static) private(jj, ji) 958 DO jj = 1, jpj 959 DO ji = 1, jpi 960 mbkv (ji,jj) = MAX( INT( zmbk(ji,jj) ), 1 ) 961 END DO 962 END DO 866 963 ! 867 964 CALL wrk_dealloc( jpi, jpj, zmbk ) … … 896 993 IF(lwp) WRITE(numout,*) ' ~~~~~~~~~~~~~' 897 994 ! 898 mikt(:,:) = MAX( misfdep(:,:) , 1 ) ! top k-index of T-level (=1) 995 !$OMP PARALLEL 996 !$OMP DO schedule(static) private(jj, ji) 997 DO jj = 1, jpj 998 DO ji = 1, jpi 999 mikt(ji,jj) = MAX( misfdep(ji,jj) , 1 ) ! top k-index of T-level (=1) 1000 END DO 1001 END DO 899 1002 ! ! top k-index of W-level (=mikt) 900 !$OMP PARALLELDO schedule(static) private(jj, ji)1003 !$OMP DO schedule(static) private(jj, ji) 901 1004 DO jj = 1, jpjm1 ! top k-index of U- (U-) level 902 1005 DO ji = 1, jpim1 … … 906 1009 END DO 907 1010 END DO 908 909 1011 ! converte into REAL to use lbc_lnk ; impose a min value of 1 as a zero can be set in lbclnk 910 zmik(:,:) = REAL( miku(:,:), wp ) ; CALL lbc_lnk(zmik,'U',1.) ; miku (:,:) = MAX( INT( zmik(:,:) ), 1 ) 911 zmik(:,:) = REAL( mikv(:,:), wp ) ; CALL lbc_lnk(zmik,'V',1.) ; mikv (:,:) = MAX( INT( zmik(:,:) ), 1 ) 912 zmik(:,:) = REAL( mikf(:,:), wp ) ; CALL lbc_lnk(zmik,'F',1.) ; mikf (:,:) = MAX( INT( zmik(:,:) ), 1 ) 1012 !$OMP DO schedule(static) private(jj, ji) 1013 DO jj = 1, jpj 1014 DO ji = 1, jpi 1015 zmik(ji,jj) = REAL( miku(ji,jj), wp ) 1016 END DO 1017 END DO 1018 !$OMP END PARALLEL 1019 CALL lbc_lnk(zmik,'U',1.) 1020 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1021 DO jj = 1, jpj 1022 DO ji = 1, jpi 1023 miku (ji,jj) = MAX( INT( zmik(ji,jj) ), 1 ) 1024 zmik(ji,jj) = REAL( mikv(ji,jj), wp ) 1025 END DO 1026 END DO 1027 CALL lbc_lnk(zmik,'V',1.) 1028 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1029 DO jj = 1, jpj 1030 DO ji = 1, jpi 1031 mikv (ji,jj) = MAX( INT( zmik(ji,jj) ), 1 ) 1032 zmik(ji,jj) = REAL( mikf(ji,jj), wp ) 1033 END DO 1034 END DO 1035 CALL lbc_lnk(zmik,'F',1.) 1036 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1037 DO jj = 1, jpj 1038 DO ji = 1, jpi 1039 mikf (ji,jj) = MAX( INT( zmik(ji,jj) ), 1 ) 1040 END DO 1041 END DO 913 1042 ! 914 1043 CALL wrk_dealloc( jpi, jpj, zmik ) … … 1017 1146 ! =================== 1018 1147 zmax = gdepw_1d(jpk) + e3t_1d(jpk) ! maximum depth (i.e. the last ocean level thickness <= 2*e3t_1d(jpkm1) ) 1019 bathy(:,:) = MIN( zmax , bathy(:,:) ) ! bounded value of bathy (min already set at the end of zgr_bat) 1020 WHERE( bathy(:,:) == 0._wp ) ; mbathy(:,:) = 0 ! land : set mbathy to 0 1021 ELSE WHERE ; mbathy(:,:) = jpkm1 ! ocean : initialize mbathy to the max ocean level 1022 END WHERE 1148 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1149 DO jj = 1, jpj 1150 DO ji = 1, jpi 1151 bathy(ji,jj) = MIN( zmax , bathy(ji,jj) ) ! bounded value of bathy (min already set at the end of zgr_bat) 1152 IF ( bathy(ji,jj) == 0._wp ) THEN 1153 mbathy(ji,jj) = 0 ! land : set mbathy to 0 1154 ELSE 1155 mbathy(ji,jj) = jpkm1 ! ocean : initialize mbathy to the max ocean level 1156 END IF 1157 END DO 1158 END DO 1023 1159 1024 1160 ! Compute mbathy for ocean points (i.e. the number of ocean levels) … … 1045 1181 ! Scale factors and depth at T- and W-points 1046 1182 IF ( .NOT. ln_isfcav ) THEN 1183 !$OMP PARALLEL DO schedule(static) private(jj,ji,ik,zdepwp,ze3tp,ze3wp) 1047 1184 DO jj = 1, jpj 1048 1185 DO ji = 1, jpi … … 1106 1243 ! 1107 1244 ! Scale factors and depth at U-, V-, UW and VW-points 1108 !$OMP PARALLEL DO schedule(static) private(jk) 1245 !$OMP PARALLEL 1246 !$OMP DO schedule(static) private(jk) 1109 1247 DO jk = 1, jpk ! initialisation to z-scale factors 1110 1248 e3u_0 (:,:,jk) = e3t_1d(jk) … … 1114 1252 END DO 1115 1253 1116 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)1254 !$OMP DO schedule(static) private(jk, jj, ji) 1117 1255 DO jk = 1,jpk ! Computed as the minimum of neighbooring scale factors 1118 1256 DO jj = 1, jpjm1 … … 1125 1263 END DO 1126 1264 END DO 1265 !$OMP END PARALLEL 1127 1266 IF ( ln_isfcav ) THEN 1128 1267 ! (ISF) define e3uw (adapted for 2 cells in the water column) 1268 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikb, ikt) 1129 1269 DO jj = 2, jpjm1 1130 1270 DO ji = 2, fs_jpim1 ! vector opt. … … 1145 1285 ! 1146 1286 1287 !$OMP PARALLEL 1288 !$OMP DO schedule(static) private(jk) 1147 1289 DO jk = 1, jpk ! set to z-scale factor if zero (i.e. along closed boundaries) 1148 1290 WHERE( e3u_0 (:,:,jk) == 0._wp ) e3u_0 (:,:,jk) = e3t_1d(jk) … … 1153 1295 1154 1296 ! Scale factor at F-point 1155 !$OMP PARALLELDO schedule(static) private(jk)1297 !$OMP DO schedule(static) private(jk) 1156 1298 DO jk = 1, jpk ! initialisation to z-scale factors 1157 1299 e3f_0(:,:,jk) = e3t_1d(jk) 1158 1300 END DO 1159 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)1301 !$OMP DO schedule(static) private(jk, jj, ji) 1160 1302 DO jk = 1, jpk ! Computed as the minimum of neighbooring V-scale factors 1161 1303 DO jj = 1, jpjm1 … … 1165 1307 END DO 1166 1308 END DO 1309 !$OMP END PARALLEL 1167 1310 CALL lbc_lnk( e3f_0, 'F', 1._wp ) ! Lateral boundary conditions 1168 1311 ! … … 1172 1315 !!gm bug ? : must be a do loop with mj0,mj1 1173 1316 ! 1174 !$OMP PARALLEL WORKSHARE1175 1317 e3t_0(:,mj0(1),:) = e3t_0(:,mj0(2),:) ! we duplicate factor scales for jj = 1 and jj = 2 1176 1318 e3w_0(:,mj0(1),:) = e3w_0(:,mj0(2),:) … … 1178 1320 e3v_0(:,mj0(1),:) = e3v_0(:,mj0(2),:) 1179 1321 e3f_0(:,mj0(1),:) = e3f_0(:,mj0(2),:) 1180 !$OMP END PARALLEL WORKSHARE1181 1322 ! Control of the sign 1182 1323 IF( MINVAL( e3t_0 (:,:,:) ) <= 0._wp ) CALL ctl_stop( ' zgr_zps : e r r o r e3t_0 <= 0' ) 1183 1324 IF( MINVAL( e3w_0 (:,:,:) ) <= 0._wp ) CALL ctl_stop( ' zgr_zps : e r r o r e3w_0 <= 0' ) 1325 1326 1184 1327 IF( MINVAL( gdept_0(:,:,:) ) < 0._wp ) CALL ctl_stop( ' zgr_zps : e r r o r gdept_0 < 0' ) 1185 1328 IF( MINVAL( gdepw_0(:,:,:) ) < 0._wp ) CALL ctl_stop( ' zgr_zps : e r r o r gdepw_0 < 0' ) … … 1202 1345 ELSE ! no cavity 1203 1346 !$OMP PARALLEL 1204 !$OMP WORKSHARE 1205 gde3w_0(:,:,1) = 0.5_wp * e3w_0(:,:,1) 1206 !$OMP END WORKSHARE 1347 !$OMP DO schedule(static) private(jj, ji) 1348 DO jj = 1, jpj 1349 DO ji = 1, jpi 1350 gde3w_0(ji,jj,1) = 0.5_wp * e3w_0(ji,jj,1) 1351 END DO 1352 END DO 1207 1353 DO jk = 2, jpk 1208 1354 !$OMP DO schedule(static) private(jj, ji) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/dtatsd.F90
r7037 r7508 180 180 ENDIF 181 181 ! 182 !$OMP PARALLEL WORKSHARE 183 ptsd(:,:,:,jp_tem) = sf_tsd(jp_tem)%fnow(:,:,:) ! NO mask 184 ptsd(:,:,:,jp_sal) = sf_tsd(jp_sal)%fnow(:,:,:) 185 !$OMP END PARALLEL WORKSHARE 182 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 183 DO jk = 1, jpk 184 DO jj = 1, jpj 185 DO ji = 1, jpi 186 ptsd(ji,jj,jk,jp_tem) = sf_tsd(jp_tem)%fnow(ji,jj,jk) ! NO mask 187 ptsd(ji,jj,jk,jp_sal) = sf_tsd(jp_sal)%fnow(ji,jj,jk) 188 END DO 189 END DO 190 END DO 186 191 ! 187 192 IF( ln_sco ) THEN !== s- or mixed s-zps-coordinate ==! … … 228 233 ELSE !== z- or zps- coordinate ==! 229 234 ! 230 !$OMP PARALLEL WORKSHARE 231 ptsd(:,:,:,jp_tem) = ptsd(:,:,:,jp_tem) * tmask(:,:,:) ! Mask 232 ptsd(:,:,:,jp_sal) = ptsd(:,:,:,jp_sal) * tmask(:,:,:) 233 !$OMP END PARALLEL WORKSHARE 235 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 236 DO jk = 1, jpk 237 DO jj = 1, jpj 238 DO ji = 1, jpi 239 ptsd(ji,jj,jk,jp_tem) = ptsd(ji,jj,jk,jp_tem) * tmask(ji,jj,jk) ! Mask 240 ptsd(ji,jj,jk,jp_sal) = ptsd(ji,jj,jk,jp_sal) * tmask(ji,jj,jk) 241 END DO 242 END DO 243 END DO 234 244 ! 235 245 IF( ln_zps ) THEN ! zps-coordinate (partial steps) interpolation at the last ocean level -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/istate.F90
r7037 r7508 64 64 !! ** Purpose : Initialization of the dynamics and tracer fields. 65 65 !!---------------------------------------------------------------------- 66 INTEGER :: ji, jj, jk ! dummy loop indices66 INTEGER :: ji, jj, jk, jp ! dummy loop indices 67 67 REAL(wp), POINTER, DIMENSION(:,:,:,:) :: zuvd ! U & V data workspace 68 68 !!---------------------------------------------------------------------- … … 78 78 IF( lk_c1d ) CALL dta_uvd_init ! Initialization of U & V input data 79 79 80 !$OMP PARALLEL WORKSHARE 81 rhd (:,:,: ) = 0._wp ; rhop (:,:,: ) = 0._wp ! set one for all to 0 at level jpk 82 rn2b (:,:,: ) = 0._wp ; rn2 (:,:,: ) = 0._wp ! set one for all to 0 at levels 1 and jpk 83 tsa (:,:,:,:) = 0._wp ! set one for all to 0 at level jpk 84 rab_b(:,:,:,:) = 0._wp ; rab_n(:,:,:,:) = 0._wp ! set one for all to 0 at level jpk 85 !$OMP END PARALLEL WORKSHARE 80 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 81 DO jk = 1, jpk 82 DO jj = 1, jpj 83 DO ji = 1, jpi 84 rhd (ji,jj,jk ) = 0._wp ; rhop (ji,jj,jk ) = 0._wp ! set one for all to 0 at level jpk 85 rn2b (ji,jj,jk ) = 0._wp ; rn2 (ji,jj,jk ) = 0._wp ! set one for all to 0 at levels 1 and jpk 86 tsa (ji,jj,jk,:) = 0._wp ! set one for all to 0 at level jpk 87 rab_b(ji,jj,jk,:) = 0._wp ; rab_n(ji,jj,jk,:) = 0._wp ! set one for all to 0 at level jpk 88 END DO 89 END DO 90 END DO 86 91 87 92 IF( ln_rstart ) THEN ! Restart from a file … … 98 103 ! ! Initialization of ocean to zero 99 104 ! before fields ! now fields 100 !$OMP PARALLEL WORKSHARE 101 sshb (:,:) = 0._wp ; sshn (:,:) = 0._wp 102 ub (:,:,:) = 0._wp ; un (:,:,:) = 0._wp 103 vb (:,:,:) = 0._wp ; vn (:,:,:) = 0._wp 104 hdivn(:,:,:) = 0._wp 105 !$OMP END PARALLEL WORKSHARE 105 !$OMP PARALLEL 106 !$OMP DO schedule(static) private(jj, ji) 107 DO jj = 1, jpj 108 DO ji = 1, jpi 109 sshb (ji,jj) = 0._wp ; sshn (ji,jj) = 0._wp 110 END DO 111 END DO 112 !$OMP END DO NOWAIT 113 !$OMP DO schedule(static) private(jk, jj, ji) 114 DO jk = 1, jpk 115 DO jj = 1, jpj 116 DO ji = 1, jpi 117 ub (ji,jj,jk) = 0._wp ; un (ji,jj,jk) = 0._wp 118 vb (ji,jj,jk) = 0._wp ; vn (ji,jj,jk) = 0._wp 119 hdivn(ji,jj,jk) = 0._wp 120 END DO 121 END DO 122 END DO 123 !$OMP END PARALLEL 106 124 ! 107 125 IF( cp_cfg == 'eel' ) THEN … … 112 130 IF ( ln_tsd_init ) THEN ! read 3D T and S data at nit000 113 131 CALL dta_tsd( nit000, tsb ) 114 !$OMP PARALLEL WORKSHARE 115 tsn(:,:,:,:) = tsb(:,:,:,:) 116 !$OMP END PARALLEL WORKSHARE 132 !$OMP PARALLEL DO schedule(static) private(jp, jk, jj, ji) 133 DO jp = 1, jpts 134 DO jk = 1, jpk 135 DO jj = 1, jpj 136 DO ji = 1, jpi 137 tsn(ji,jj,jk,jp) = tsb(ji,jj,jk,jp) 138 END DO 139 END DO 140 END DO 141 END DO 117 142 ! 118 143 ELSE ! Initial T-S fields defined analytically … … 122 147 CALL wrk_alloc( jpi,jpj,jpk,2, zuvd ) 123 148 CALL dta_uvd( nit000, zuvd ) 124 !$OMP PARALLEL WORKSHARE 125 ub(:,:,:) = zuvd(:,:,:,1) ; un(:,:,:) = ub(:,:,:) 126 vb(:,:,:) = zuvd(:,:,:,2) ; vn(:,:,:) = vb(:,:,:) 127 !$OMP END PARALLEL WORKSHARE 149 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 150 DO jk = 1, jpk 151 DO jj = 1, jpj 152 DO ji = 1, jpi 153 ub(ji,jj,jk) = zuvd(ji,jj,jk,1) ; un(ji,jj,jk) = ub(ji,jj,jk) 154 vb(ji,jj,jk) = zuvd(ji,jj,jk,2) ; vn(ji,jj,jk) = vb(ji,jj,jk) 155 END DO 156 END DO 157 END DO 128 158 CALL wrk_dealloc( jpi,jpj,jpk,2, zuvd ) 129 159 ENDIF … … 146 176 ! 147 177 !$OMP PARALLEL 148 !$OMP WORKSHARE 149 un_b(:,:) = 0._wp ; vn_b(:,:) = 0._wp 150 ub_b(:,:) = 0._wp ; vb_b(:,:) = 0._wp 151 !$OMP END WORKSHARE 178 !$OMP DO schedule(static) private(jj, ji) 179 DO jj = 1, jpj 180 DO ji = 1, jpi 181 un_b(ji,jj) = 0._wp ; vn_b(ji,jj) = 0._wp 182 ub_b(ji,jj) = 0._wp ; vb_b(ji,jj) = 0._wp 183 END DO 184 END DO 152 185 ! 153 186 !!gm the use of umsak & vmask is not necessary belox as un, vn, ub, vb are always masked … … 165 198 END DO 166 199 ! 167 !$OMP WORKSHARE 168 un_b(:,:) = un_b(:,:) * r1_hu_n(:,:) 169 vn_b(:,:) = vn_b(:,:) * r1_hv_n(:,:) 170 ! 171 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 172 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 173 !$OMP END WORKSHARE NOWAIT 200 !$OMP DO schedule(static) private(jj, ji) 201 DO jj = 1, jpj 202 DO ji = 1, jpi 203 un_b(ji,jj) = un_b(ji,jj) * r1_hu_n(ji,jj) 204 vn_b(ji,jj) = vn_b(ji,jj) * r1_hv_n(ji,jj) 205 ! 206 ub_b(ji,jj) = ub_b(ji,jj) * r1_hu_b(ji,jj) 207 vb_b(ji,jj) = vb_b(ji,jj) * r1_hv_b(ji,jj) 208 END DO 209 END DO 174 210 !$OMP END PARALLEL 175 211 ! … … 406 442 CALL iom_close( inum ) 407 443 408 !$OMP PARALLEL WORKSHARE 409 tsn(:,:,:,jp_tem) = tsn(:,:,:,jp_tem) * tmask(:,:,:) 410 tsb(:,:,:,jp_tem) = tsn(:,:,:,jp_tem) 411 !$OMP END PARALLEL WORKSHARE 444 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 445 DO jk = 1, jpk 446 DO jj = 1, jpj 447 DO ji = 1, jpi 448 tsn(ji,jj,jk,jp_tem) = tsn(ji,jj,jk,jp_tem) * tmask(ji,jj,jk) 449 tsb(ji,jj,jk,jp_tem) = tsn(ji,jj,jk,jp_tem) 450 END DO 451 END DO 452 END DO 412 453 413 454 ! Read salinity field … … 417 458 CALL iom_close( inum ) 418 459 419 !$OMP PARALLEL WORKSHARE 420 tsn(:,:,:,jp_sal) = tsn(:,:,:,jp_sal) * tmask(:,:,:) 421 tsb(:,:,:,jp_sal) = tsn(:,:,:,jp_sal) 422 !$OMP END PARALLEL WORKSHARE 460 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 461 DO jk = 1, jpk 462 DO jj = 1, jpj 463 DO ji = 1, jpi 464 tsn(ji,jj,jk,jp_sal) = tsn(ji,jj,jk,jp_sal) * tmask(ji,jj,jk) 465 tsb(ji,jj,jk,jp_sal) = tsn(ji,jj,jk,jp_sal) 466 END DO 467 END DO 468 END DO 423 469 ! 424 470 END SELECT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynbfr.F90
r6748 r7508 47 47 INTEGER, INTENT(in) :: kt ! ocean time-step index 48 48 !! 49 INTEGER :: j i, jj ! dummy loop indexes49 INTEGER :: jk, ji, jj ! dummy loop indexes 50 50 INTEGER :: ikbu, ikbv ! local integers 51 51 REAL(wp) :: zm1_2dt ! local scalar … … 65 65 IF( l_trddyn ) THEN ! trends: store the input trends 66 66 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 67 !$OMP PARALLEL WORKSHARE 68 ztrdu(:,:,:) = ua(:,:,:) 69 ztrdv(:,:,:) = va(:,:,:) 70 !$OMP END PARALLEL WORKSHARE 67 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 68 DO jk = 1, jpk 69 DO jj = 1, jpj 70 DO ji = 1, jpi 71 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 72 ztrdv(ji,jj,jk) = va(ji,jj,jk) 73 END DO 74 END DO 75 END DO 71 76 ENDIF 72 77 … … 102 107 ! 103 108 IF( l_trddyn ) THEN ! trends: send trends to trddyn for further diagnostics 104 !$OMP PARALLEL WORKSHARE 105 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 106 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 107 !$OMP END PARALLEL WORKSHARE 109 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 110 DO jk = 1, jpk 111 DO jj = 1, jpj 112 DO ji = 1, jpi 113 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 114 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 115 END DO 116 END DO 117 END DO 108 118 CALL trd_dyn( ztrdu(:,:,:), ztrdv(:,:,:), jpdyn_bfr, kt ) 109 119 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynhpg.F90
r7037 r7508 84 84 !!---------------------------------------------------------------------- 85 85 INTEGER, INTENT(in) :: kt ! ocean time-step index 86 INTEGER :: jk, jj, ji 86 87 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv 87 88 !!---------------------------------------------------------------------- … … 90 91 ! 91 92 IF( l_trddyn ) THEN ! Temporary saving of ua and va trends (l_trddyn) 92 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 93 !$OMP PARALLEL WORKSHARE 94 ztrdu(:,:,:) = ua(:,:,:) 95 ztrdv(:,:,:) = va(:,:,:) 96 !$OMP END PARALLEL WORKSHARE 93 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 94 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 95 DO jk = 1, jpk 96 DO jj = 1, jpj 97 DO ji = 1, jpi 98 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 99 ztrdv(ji,jj,jk) = va(ji,jj,jk) 100 END DO 101 END DO 102 END DO 97 103 ENDIF 98 104 ! … … 107 113 ! 108 114 IF( l_trddyn ) THEN ! save the hydrostatic pressure gradient trends for momentum trend diagnostics 109 !$OMP PARALLEL WORKSHARE 110 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 111 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 112 !$OMP END PARALLEL WORKSHARE 115 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 116 DO jk = 1, jpk 117 DO jj = 1, jpj 118 DO ji = 1, jpi 119 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 120 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 121 END DO 122 END DO 123 END DO 113 124 CALL trd_dyn( ztrdu, ztrdv, jpdyn_hpg, kt ) 114 125 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) … … 202 213 ! 203 214 ! initialisation of ice shelf load 204 IF ( .NOT. ln_isfcav ) riceload(:,:)=0.0 215 IF ( .NOT. ln_isfcav ) THEN 216 !$OMP PARALLEL DO schedule(static) private(jj, ji) 217 DO jj = 1, jpj 218 DO ji = 1, jpi 219 riceload(ji,jj)=0.0 220 END DO 221 END DO 222 END IF 205 223 IF ( ln_isfcav ) THEN 206 224 CALL wrk_alloc( jpi,jpj, 2, ztstop) … … 216 234 217 235 ! assume water displaced by the ice shelf is at T=-1.9 and S=34.4 (rude) 218 ztstop(:,:,1)=-1.9_wp ; ztstop(:,:,2)=34.4_wp 236 !$OMP PARALLEL DO schedule(static) private(jj, ji) 237 DO jj = 1, jpj 238 DO ji = 1, jpi 239 ztstop(ji,jj,1)=-1.9_wp 240 ztstop(ji,jj,2)=34.4_wp 241 END DO 242 END DO 219 243 220 244 ! compute density of the water displaced by the ice shelf … … 244 268 END DO 245 269 END DO 246 !$OMP WORKSHARE 247 riceload(:,:)=ziceload(:,:) ! need to be saved for diaar5 248 !$OMP END WORKSHARE NOWAIT 270 !$OMP DO schedule(static) private(jj, ji) 271 DO jj = 1, jpj 272 DO ji = 1, jpi 273 riceload(ji,jj)=ziceload(ji,jj) ! need to be saved for diaar5 274 END DO 275 END DO 249 276 !$OMP END PARALLEL 250 277 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynkeg.F90
r6748 r7508 92 92 IF( l_trddyn ) THEN ! Save ua and va trends 93 93 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 94 !$OMP PARALLEL WORKSHARE 95 ztrdu(:,:,:) = ua(:,:,:) 96 ztrdv(:,:,:) = va(:,:,:) 97 !$OMP END PARALLEL WORKSHARE 94 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 95 DO jk = 1, jpk 96 DO jj = 1, jpj 97 DO ji = 1, jpi 98 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 99 ztrdv(ji,jj,jk) = va(ji,jj,jk) 100 END DO 101 END DO 102 END DO 98 103 ENDIF 99 100 zhke(:,:,jpk) = 0._wp 104 !$OMP PARALLEL DO schedule(static) private(jj, ji) 105 DO jj = 1, jpj 106 DO ji = 1, jpi 107 zhke(ji,jj,jpk) = 0._wp 108 END DO 109 END DO 101 110 102 111 SELECT CASE ( kscheme ) !== Horizontal kinetic energy at T-point ==! … … 149 158 ! 150 159 IF( l_trddyn ) THEN ! save the Kinetic Energy trends for diagnostic 151 !$OMP PARALLEL WORKSHARE 152 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 153 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 154 !$OMP END PARALLEL WORKSHARE 160 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 161 DO jk = 1, jpk 162 DO jj = 1, jpj 163 DO ji = 1, jpi 164 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 165 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 166 END DO 167 END DO 168 END DO 155 169 CALL trd_dyn( ztrdu, ztrdv, jpdyn_keg, kt ) 156 170 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynldf.F90
r6748 r7508 61 61 !!---------------------------------------------------------------------- 62 62 INTEGER, INTENT(in) :: kt ! ocean time-step index 63 INTEGER :: jk, jj, ji 63 64 ! 64 65 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv … … 69 70 IF( l_trddyn ) THEN ! temporary save of momentum trends 70 71 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 71 !$OMP PARALLEL WORKSHARE 72 ztrdu(:,:,:) = ua(:,:,:) 73 ztrdv(:,:,:) = va(:,:,:) 74 !$OMP END PARALLEL WORKSHARE 72 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 73 DO jk = 1, jpk 74 DO jj = 1, jpj 75 DO ji = 1, jpi 76 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 77 ztrdv(ji,jj,jk) = va(ji,jj,jk) 78 END DO 79 END DO 80 END DO 75 81 ENDIF 76 82 … … 84 90 85 91 IF( l_trddyn ) THEN ! save the horizontal diffusive trends for further diagnostics 86 !$OMP PARALLEL WORKSHARE 87 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 88 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 89 !$OMP END PARALLEL WORKSHARE 92 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 93 DO jk = 1, jpk 94 DO jj = 1, jpj 95 DO ji = 1, jpi 96 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 97 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 98 END DO 99 END DO 100 END DO 90 101 CALL trd_dyn( ztrdu, ztrdv, jpdyn_ldf, kt ) 91 102 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynldf_lap_blp.F90
r7037 r7508 132 132 !!---------------------------------------------------------------------- 133 133 INTEGER , INTENT(in ) :: kt ! ocean time-step index 134 INTEGER :: jk, jj, ji 134 135 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(in ) :: pub, pvb ! before velocity fields 135 136 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pua, pva ! momentum trend … … 148 149 ENDIF 149 150 ! 150 !$OMP PARALLEL WORKSHARE 151 zulap(:,:,:) = 0._wp 152 zvlap(:,:,:) = 0._wp 153 !$OMP END PARALLEL WORKSHARE 151 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 152 DO jk = 1, jpk 153 DO jj = 1, jpj 154 DO ji = 1, jpi 155 zulap(ji,jj,jk) = 0._wp 156 zvlap(ji,jj,jk) = 0._wp 157 END DO 158 END DO 159 END DO 154 160 ! 155 161 CALL dyn_ldf_lap( kt, pub, pvb, zulap, zvlap, 1 ) ! rotated laplacian applied to ptb (output in zlap) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynnxt.F90
r7037 r7508 116 116 ! Compute actual transport and replace it with ts estimate at "after" time step 117 117 !$OMP PARALLEL 118 !$OMP WORKSHARE 119 zue(:,:) = e3u_a(:,:,1) * ua(:,:,1) * umask(:,:,1) 120 zve(:,:) = e3v_a(:,:,1) * va(:,:,1) * vmask(:,:,1) 121 !$OMP END WORKSHARE 118 !$OMP DO schedule(static) private(jj, ji) 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 zue(ji,jj) = e3u_a(ji,jj,1) * ua(ji,jj,1) * umask(ji,jj,1) 122 zve(ji,jj) = e3v_a(ji,jj,1) * va(ji,jj,1) * vmask(ji,jj,1) 123 END DO 124 END DO 122 125 DO jk = 2, jpkm1 123 126 !$OMP DO schedule(static) private(jj,ji) … … 176 179 ! 177 180 IF( ln_dyn_trd ) THEN ! 3D output: total momentum trends 178 !$OMP PARALLEL WORKSHARE 179 zua(:,:,:) = ( ua(:,:,:) - ub(:,:,:) ) * z1_2dt 180 zva(:,:,:) = ( va(:,:,:) - vb(:,:,:) ) * z1_2dt 181 !$OMP END PARALLEL WORKSHARE 182 CALL iom_put( "utrd_tot", zua ) ! total momentum trends, except the asselin time filter 183 CALL iom_put( "vtrd_tot", zva ) 181 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 182 DO jk = 1, jpk 183 DO jj = 1, jpj 184 DO ji = 1, jpi 185 zua(ji,jj,jk) = ( ua(ji,jj,jk) - ub(ji,jj,jk) ) * z1_2dt 186 zva(ji,jj,jk) = ( va(ji,jj,jk) - vb(ji,jj,jk) ) * z1_2dt 187 END DO 188 END DO 189 END DO 190 CALL iom_put( "utrd_tot", zua ) ! total momentum trends, except the asselin time filter 191 CALL iom_put( "vtrd_tot", zva ) 184 192 ENDIF 185 193 ! 186 !$OMP PARALLEL WORKSHARE 187 zua(:,:,:) = un(:,:,:) ! save the now velocity before the asselin filter 188 zva(:,:,:) = vn(:,:,:) ! (caution: there will be a shift by 1 timestep in the 189 !$OMP END PARALLEL WORKSHARE 194 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 195 DO jk = 1, jpk 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 zua(ji,jj,jk) = un(ji,jj,jk) ! save the now velocity before the asselin filter 199 zva(ji,jj,jk) = vn(ji,jj,jk) ! (caution: there will be a shift by 1 timestep in the 200 END DO 201 END DO 202 END DO 190 203 ! ! computation of the asselin filter trends) 191 204 ENDIF … … 318 331 END DO 319 332 END DO 320 !$OMP WORKSHARE 321 e3u_b(:,:,1:jpkm1) = ze3u_f(:,:,1:jpkm1) ! e3u_b <-- filtered scale factor 322 e3v_b(:,:,1:jpkm1) = ze3v_f(:,:,1:jpkm1) 323 !$OMP END WORKSHARE NOWAIT 333 !$OMP DO schedule(static) private(jj, ji) 334 DO jj = 1, jpj 335 DO ji = 1, jpi 336 e3u_b(ji,jj,1:jpkm1) = ze3u_f(ji,jj,1:jpkm1) ! e3u_b <-- filtered scale factor 337 e3v_b(ji,jj,1:jpkm1) = ze3v_f(ji,jj,1:jpkm1) 338 END DO 339 END DO 324 340 !$OMP END PARALLEL 325 341 ! … … 333 349 ! Doing it here also means that asselin filter contribution is removed 334 350 !$OMP PARALLEL 335 !$OMP WORKSHARE 336 zue(:,:) = e3u_b(:,:,1) * ub(:,:,1) * umask(:,:,1) 337 zve(:,:) = e3v_b(:,:,1) * vb(:,:,1) * vmask(:,:,1) 338 !$OMP END WORKSHARE 351 !$OMP DO schedule(static) private(jj, ji) 352 DO jj = 1, jpj 353 DO ji = 1, jpi 354 zue(ji,jj) = e3u_b(ji,jj,1) * ub(ji,jj,1) * umask(ji,jj,1) 355 zve(ji,jj) = e3v_b(ji,jj,1) * vb(ji,jj,1) * vmask(ji,jj,1) 356 END DO 357 END DO 339 358 DO jk = 2, jpkm1 340 359 !$OMP DO schedule(static) private(jj, ji) … … 364 383 IF(.NOT.ln_linssh ) THEN 365 384 !$OMP PARALLEL 366 !$OMP WORKSHARE 367 hu_b(:,:) = e3u_b(:,:,1) * umask(:,:,1) 368 hv_b(:,:) = e3v_b(:,:,1) * vmask(:,:,1) 369 !$OMP END WORKSHARE 385 !$OMP DO schedule(static) private(jj, ji) 386 DO jj = 1, jpj 387 DO ji = 1, jpi 388 hu_b(ji,jj) = e3u_b(ji,jj,1) * umask(ji,jj,1) 389 hv_b(ji,jj) = e3v_b(ji,jj,1) * vmask(ji,jj,1) 390 END DO 391 END DO 370 392 DO jk = 2, jpkm1 371 393 !$OMP DO schedule(static) private(jj, ji) … … 378 400 !$OMP END DO 379 401 END DO 380 !$OMP WORKSHARE 381 r1_hu_b(:,:) = ssumask(:,:) / ( hu_b(:,:) + 1._wp - ssumask(:,:) ) 382 r1_hv_b(:,:) = ssvmask(:,:) / ( hv_b(:,:) + 1._wp - ssvmask(:,:) ) 383 !$OMP END WORKSHARE 402 !$OMP DO schedule(static) private(jj, ji) 403 DO jj = 1, jpj 404 DO ji = 1, jpi 405 r1_hu_b(ji,jj) = ssumask(ji,jj) / ( hu_b(ji,jj) + 1._wp - ssumask(ji,jj) ) 406 r1_hv_b(ji,jj) = ssvmask(ji,jj) / ( hv_b(ji,jj) + 1._wp - ssvmask(ji,jj) ) 407 END DO 408 END DO 384 409 !$OMP END PARALLEL 385 410 ENDIF 386 411 ! 387 412 !$OMP PARALLEL 388 !$OMP WORKSHARE 389 un_b(:,:) = e3u_a(:,:,1) * un(:,:,1) * umask(:,:,1) 390 ub_b(:,:) = e3u_b(:,:,1) * ub(:,:,1) * umask(:,:,1) 391 vn_b(:,:) = e3v_a(:,:,1) * vn(:,:,1) * vmask(:,:,1) 392 vb_b(:,:) = e3v_b(:,:,1) * vb(:,:,1) * vmask(:,:,1) 393 !$OMP END WORKSHARE 413 !$OMP DO schedule(static) private(jj, ji) 414 DO jj = 1, jpj 415 DO ji = 1, jpi 416 un_b(ji,jj) = e3u_a(ji,jj,1) * un(ji,jj,1) * umask(ji,jj,1) 417 ub_b(ji,jj) = e3u_b(ji,jj,1) * ub(ji,jj,1) * umask(ji,jj,1) 418 vn_b(ji,jj) = e3v_a(ji,jj,1) * vn(ji,jj,1) * vmask(ji,jj,1) 419 vb_b(ji,jj) = e3v_b(ji,jj,1) * vb(ji,jj,1) * vmask(ji,jj,1) 420 END DO 421 END DO 394 422 DO jk = 2, jpkm1 395 423 !$OMP DO schedule(static) private(jj, ji) … … 403 431 END DO 404 432 END DO 405 !$OMP WORKSHARE 406 un_b(:,:) = un_b(:,:) * r1_hu_a(:,:) 407 vn_b(:,:) = vn_b(:,:) * r1_hv_a(:,:) 408 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 409 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 410 !$OMP END WORKSHARE NOWAIT 433 !$OMP DO schedule(static) private(jj, ji) 434 DO jj = 1, jpj 435 DO ji = 1, jpi 436 un_b(ji,jj) = un_b(ji,jj) * r1_hu_a(ji,jj) 437 vn_b(ji,jj) = vn_b(ji,jj) * r1_hv_a(ji,jj) 438 ub_b(ji,jj) = ub_b(ji,jj) * r1_hu_b(ji,jj) 439 vb_b(ji,jj) = vb_b(ji,jj) * r1_hv_b(ji,jj) 440 END DO 441 END DO 411 442 !$OMP END PARALLEL 412 443 ! … … 416 447 ENDIF 417 448 IF( l_trddyn ) THEN ! 3D output: asselin filter trends on momentum 418 !$OMP PARALLEL WORKSHARE 419 zua(:,:,:) = ( ub(:,:,:) - zua(:,:,:) ) * z1_2dt 420 zva(:,:,:) = ( vb(:,:,:) - zva(:,:,:) ) * z1_2dt 421 !$OMP END PARALLEL WORKSHARE 422 CALL trd_dyn( zua, zva, jpdyn_atf, kt ) 449 !$OMP DO schedule(static) private(jk, jj, ji) 450 DO jk = 1, jpkm1 451 DO jj = 1, jpj 452 DO ji = 1, jpi 453 zua(ji,jj,jk) = ( ub(ji,jj,jk) - zua(ji,jj,jk) ) * z1_2dt 454 zva(ji,jj,jk) = ( vb(ji,jj,jk) - zva(ji,jj,jk) ) * z1_2dt 455 END DO 456 END DO 457 END DO 458 CALL trd_dyn( zua, zva, jpdyn_atf, kt ) 423 459 ENDIF 424 460 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg.F90
r7037 r7508 83 83 IF( l_trddyn ) THEN ! temporary save of ta and sa trends 84 84 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 85 !$OMP PARALLEL WORKSHARE 86 ztrdu(:,:,:) = ua(:,:,:) 87 ztrdv(:,:,:) = va(:,:,:) 88 !$OMP END PARALLEL WORKSHARE 85 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 86 DO jk = 1, jpk 87 DO jj = 1, jpj 88 DO ji = 1, jpi 89 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 90 ztrdv(ji,jj,jk) = va(ji,jj,jk) 91 END DO 92 END DO 93 END DO 89 94 ENDIF 90 95 ! … … 134 139 zgrau0r = - grav * r1_rau0 135 140 !$OMP PARALLEL 136 !$OMP WORKSHARE 137 zpice(:,:) = ( zintp * snwice_mass(:,:) + ( 1.- zintp ) * snwice_mass_b(:,:) ) * zgrau0r 138 !$OMP END WORKSHARE 141 !$OMP DO schedule(static) private(jj, ji) 142 DO jj = 1, jpj 143 DO ji = 1, jpi 144 zpice(ji,jj) = ( zintp * snwice_mass(ji,jj) + ( 1.- zintp ) * snwice_mass_b(ji,jj) ) * zgrau0r 145 END DO 146 END DO 139 147 !$OMP DO schedule(static) private(jj, ji) 140 148 DO jj = 2, jpjm1 … … 170 178 ! 171 179 IF( l_trddyn ) THEN ! save the surface pressure gradient trends for further diagnostics 172 !$OMP PARALLEL WORKSHARE 173 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 174 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 175 !$OMP END PARALLEL WORKSHARE 180 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 181 DO jk = 1, jpk 182 DO jj = 1, jpj 183 DO ji = 1, jpi 184 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 185 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 186 END DO 187 END DO 188 END DO 176 189 CALL trd_dyn( ztrdu, ztrdv, jpdyn_spg, kt ) 177 190 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg_ts.F90
r7037 r7508 244 244 ! 245 245 !$OMP PARALLEL 246 !$OMP WORKSHARE247 ftne(1,:) = 0._wp ; ftnw(1,:) = 0._wp ; ftse(1,:) = 0._wp ; ftsw(1,:) = 0._wp248 !$OMP END WORKSHARE 249 246 !$OMP DO schedule(static) private(jj) 247 DO jj = 1, jpj 248 ftne(1,jj) = 0._wp ; ftnw(1,jj) = 0._wp ; ftse(1,jj) = 0._wp ; ftsw(1,jj) = 0._wp 249 END DO 250 250 !$OMP DO schedule(static) private(jj, ji) 251 251 DO jj = 2, jpj … … 261 261 ! 262 262 ELSE !== all other schemes (ENE, ENS, MIX) 263 !$OMP PARALLEL WORKSHARE 264 zwz(:,:) = 0._wp 265 zhf(:,:) = 0._wp 266 !$OMP END PARALLEL WORKSHARE 263 !$OMP PARALLEL DO schedule(static) private(jj, ji) 264 DO jj = 1, jpj 265 DO ji = 1, jpi 266 zwz(ji,jj) = 0._wp 267 zhf(ji,jj) = 0._wp 268 END DO 269 END DO 267 270 IF ( .not. ln_sco ) THEN 268 271 … … 276 279 ! zhf(:,:) = gdepw_0(:,:,jk+1) 277 280 ELSE 278 !$OMP PARALLEL WORKSHARE 279 zhf(:,:) = hbatf(:,:) 280 !$OMP END PARALLEL WORKSHARE 281 !$OMP PARALLEL DO schedule(static) private(jj, ji) 282 DO jj = 1, jpj 283 DO ji = 1, jpi 284 zhf(ji,jj) = hbatf(ji,jj) 285 END DO 286 END DO 281 287 END IF 282 288 … … 308 314 END DO 309 315 END DO 310 !$OMP WORKSHARE 311 zwz(:,:) = ff(:,:) * zwz(:,:) 312 !$OMP END WORKSHARE NOWAIT 316 !$OMP DO schedule(static) private(jj, ji) 317 DO jj = 1, jpj 318 DO ji = 1, jpi 319 zwz(ji,jj) = ff(ji,jj) * zwz(ji,jj) 320 END DO 321 END DO 313 322 !$OMP END PARALLEL 314 323 ENDIF … … 330 339 ! ! -------------------------------------------------- 331 340 !$OMP PARALLEL 332 !$OMP WORKSHARE 333 zu_frc(:,:) = 0._wp 334 zv_frc(:,:) = 0._wp 335 !$OMP END WORKSHARE 341 !$OMP DO schedule(static) private(jj, ji) 342 DO jj = 1, jpj 343 DO ji = 1, jpi 344 zu_frc(ji,jj) = 0._wp 345 zv_frc(ji,jj) = 0._wp 346 END DO 347 END DO 336 348 ! 337 349 DO jk = 1, jpkm1 … … 345 357 END DO 346 358 ! 347 !$OMP WORKSHARE 348 zu_frc(:,:) = zu_frc(:,:) * r1_hu_n(:,:) 349 zv_frc(:,:) = zv_frc(:,:) * r1_hv_n(:,:) 350 !$OMP END WORKSHARE 351 ! 359 !$OMP DO schedule(static) private(jj, ji) 360 DO jj = 1, jpj 361 DO ji = 1, jpi 362 zu_frc(ji,jj) = zu_frc(ji,jj) * r1_hu_n(ji,jj) 363 zv_frc(ji,jj) = zv_frc(ji,jj) * r1_hv_n(ji,jj) 364 END DO 365 END DO 352 366 ! 353 367 ! !* baroclinic momentum trend (remove the vertical mean trend) … … 364 378 ! !* barotropic Coriolis trends (vorticity scheme dependent) 365 379 ! ! -------------------------------------------------------- 366 !$OMP WORKSHARE 367 zwx(:,:) = un_b(:,:) * hu_n(:,:) * e2u(:,:) ! now fluxes 368 zwy(:,:) = vn_b(:,:) * hv_n(:,:) * e1v(:,:) 369 !$OMP END WORKSHARE NOWAIT 380 !$OMP DO schedule(static) private(jj, ji) 381 DO jj = 1, jpj 382 DO ji = 1, jpi 383 zwx(ji,jj) = un_b(ji,jj) * hu_n(ji,jj) * e2u(ji,jj) ! now fluxes 384 zwy(ji,jj) = vn_b(ji,jj) * hv_n(ji,jj) * e1v(ji,jj) 385 END DO 386 END DO 370 387 !$OMP END PARALLEL 371 388 ! … … 419 436 IF( ln_wd ) THEN ! Calculating and applying W/D gravity filters 420 437 !$OMP PARALLEL 421 !$OMP WORKSHARE 422 wduflt1(:,:) = 1.0_wp 423 wdvflt1(:,:) = 1.0_wp 424 !$OMP END WORKSHARE 438 !$OMP DO schedule(static) private(jj, ji) 439 DO jj = 1, jpj 440 DO ji = 1, jpi 441 wduflt1(ji,jj) = 1.0_wp 442 wdvflt1(ji,jj) = 1.0_wp 443 END DO 444 END DO 425 445 !$OMP DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 426 446 DO jj = 2, jpjm1 … … 529 549 END DO 530 550 ELSE 531 !$OMP PARALLEL WORKSHARE 532 zu_frc(:,:) = zu_frc(:,:) + r1_hu_n(:,:) * bfrua(:,:) * zwx(:,:) 533 zv_frc(:,:) = zv_frc(:,:) + r1_hv_n(:,:) * bfrva(:,:) * zwy(:,:) 534 !$OMP END PARALLEL WORKSHARE 551 !$OMP PARALLEL DO schedule(static) private(jj,ji) 552 DO jj = 1, jpj 553 DO ji = 1, jpi 554 zu_frc(ji,jj) = zu_frc(ji,jj) + r1_hu_n(ji,jj) * bfrua(ji,jj) * zwx(ji,jj) 555 zv_frc(ji,jj) = zv_frc(ji,jj) + r1_hv_n(ji,jj) * bfrva(ji,jj) * zwy(ji,jj) 556 END DO 557 END DO 535 558 END IF 536 559 ! … … 559 582 ! 560 583 ! Note that the "unclipped" top friction parameter is used even with explicit drag 561 !$OMP PARALLEL WORKSHARE 562 zu_frc(:,:) = zu_frc(:,:) + r1_hu_n(:,:) * tfrua(:,:) * zwx(:,:) 563 zv_frc(:,:) = zv_frc(:,:) + r1_hv_n(:,:) * tfrva(:,:) * zwy(:,:) 564 !$OMP END PARALLEL WORKSHARE 584 !$OMP PARALLEL DO schedule(static) private(jj,ji) 585 DO jj = 1, jpj 586 DO ji = 1, jpi 587 zu_frc(ji,jj) = zu_frc(ji,jj) + r1_hu_n(ji,jj) * tfrua(ji,jj) * zwx(ji,jj) 588 zv_frc(ji,jj) = zv_frc(ji,jj) + r1_hv_n(ji,jj) * tfrva(ji,jj) * zwy(ji,jj) 589 END DO 590 END DO 565 591 ! 566 592 IF (ln_bt_fw) THEN ! Add wind forcing 567 !$OMP PARALLEL WORKSHARE 568 zu_frc(:,:) = zu_frc(:,:) + zraur * utau(:,:) * r1_hu_n(:,:) 569 zv_frc(:,:) = zv_frc(:,:) + zraur * vtau(:,:) * r1_hv_n(:,:) 570 !$OMP END PARALLEL WORKSHARE 593 !$OMP PARALLEL DO schedule(static) private(jj,ji) 594 DO jj = 1, jpj 595 DO ji = 1, jpi 596 zu_frc(ji,jj) = zu_frc(ji,jj) + zraur * utau(ji,jj) * r1_hu_n(ji,jj) 597 zv_frc(ji,jj) = zv_frc(ji,jj) + zraur * vtau(ji,jj) * r1_hv_n(ji,jj) 598 END DO 599 END DO 571 600 ELSE 572 !$OMP PARALLEL WORKSHARE 573 zu_frc(:,:) = zu_frc(:,:) + zraur * z1_2 * ( utau_b(:,:) + utau(:,:) ) * r1_hu_n(:,:) 574 zv_frc(:,:) = zv_frc(:,:) + zraur * z1_2 * ( vtau_b(:,:) + vtau(:,:) ) * r1_hv_n(:,:) 575 !$OMP END PARALLEL WORKSHARE 601 !$OMP PARALLEL DO schedule(static) private(jj,ji) 602 DO jj = 1, jpj 603 DO ji = 1, jpi 604 zu_frc(ji,jj) = zu_frc(ji,jj) + zraur * z1_2 * ( utau_b(ji,jj) + utau(ji,jj) ) * r1_hu_n(ji,jj) 605 zv_frc(ji,jj) = zv_frc(ji,jj) + zraur * z1_2 * ( vtau_b(ji,jj) + vtau(ji,jj) ) * r1_hv_n(ji,jj) 606 END DO 607 END DO 576 608 ENDIF 577 609 ! … … 605 637 ! ! Surface net water flux and rivers 606 638 IF (ln_bt_fw) THEN 607 !$OMP PARALLEL WORKSHARE 608 zssh_frc(:,:) = zraur * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) ) 609 !$OMP END PARALLEL WORKSHARE 639 !$OMP PARALLEL DO schedule(static) private(jj,ji) 640 DO jj = 1, jpj 641 DO ji = 1, jpi 642 zssh_frc(ji,jj) = zraur * ( emp(ji,jj) - rnf(ji,jj) + fwfisf(ji,jj) ) 643 END DO 644 END DO 610 645 ELSE 611 !$OMP PARALLEL WORKSHARE 612 zssh_frc(:,:) = zraur * z1_2 * ( emp(:,:) + emp_b(:,:) - rnf(:,:) - rnf_b(:,:) & 613 & + fwfisf(:,:) + fwfisf_b(:,:) ) 614 !$OMP END PARALLEL WORKSHARE 646 !$OMP PARALLEL DO schedule(static) private(jj,ji) 647 DO jj = 1, jpj 648 DO ji = 1, jpi 649 zssh_frc(ji,jj) = zraur * z1_2 * ( emp(ji,jj) + emp_b(ji,jj) - rnf(ji,jj) - rnf_b(ji,jj) & 650 & + fwfisf(ji,jj) + fwfisf_b(ji,jj) ) 651 END DO 652 END DO 615 653 ENDIF 616 654 #if defined key_asminc 617 655 ! ! Include the IAU weighted SSH increment 618 656 IF( lk_asminc .AND. ln_sshinc .AND. ln_asmiau ) THEN 619 !$OMP PARALLEL WORKSHARE 620 zssh_frc(:,:) = zssh_frc(:,:) - ssh_iau(:,:) 621 !$OMP END PARALLEL WORKSHARE 657 !$OMP PARALLEL DO schedule(static) private(jj,ji) 658 DO jj = 1, jpj 659 DO ji = 1, jpi 660 zssh_frc(ji,jj) = zssh_frc(ji,jj) - ssh_iau(ji,jj) 661 END DO 662 END DO 622 663 ENDIF 623 664 #endif … … 637 678 ! Initialize barotropic variables: 638 679 IF( ll_init )THEN 639 !$OMP PARALLEL WORKSHARE 640 sshbb_e(:,:) = 0._wp 641 ubb_e (:,:) = 0._wp 642 vbb_e (:,:) = 0._wp 643 sshb_e (:,:) = 0._wp 644 ub_e (:,:) = 0._wp 645 vb_e (:,:) = 0._wp 646 !$OMP END PARALLEL WORKSHARE 680 !$OMP PARALLEL DO schedule(static) private(jj,ji) 681 DO jj = 1, jpj 682 DO ji = 1, jpi 683 sshbb_e(ji,jj) = 0._wp 684 ubb_e (ji,jj) = 0._wp 685 vbb_e (ji,jj) = 0._wp 686 sshb_e (ji,jj) = 0._wp 687 ub_e (ji,jj) = 0._wp 688 vb_e (ji,jj) = 0._wp 689 END DO 690 END DO 647 691 ENDIF 648 692 … … 659 703 ! 660 704 IF (ln_bt_fw) THEN ! FORWARD integration: start from NOW fields 661 !$OMP PARALLEL WORKSHARE 662 sshn_e(:,:) = sshn(:,:) 663 un_e (:,:) = un_b(:,:) 664 vn_e (:,:) = vn_b(:,:) 665 ! 666 hu_e (:,:) = hu_n(:,:) 667 hv_e (:,:) = hv_n(:,:) 668 hur_e (:,:) = r1_hu_n(:,:) 669 hvr_e (:,:) = r1_hv_n(:,:) 670 !$OMP END PARALLEL WORKSHARE 705 !$OMP PARALLEL DO schedule(static) private(jj,ji) 706 DO jj = 1, jpj 707 DO ji = 1, jpi 708 sshn_e(ji,jj) = sshn(ji,jj) 709 un_e (ji,jj) = un_b(ji,jj) 710 vn_e (ji,jj) = vn_b(ji,jj) 711 ! 712 hu_e (ji,jj) = hu_n(ji,jj) 713 hv_e (ji,jj) = hv_n(ji,jj) 714 hur_e (ji,jj) = r1_hu_n(ji,jj) 715 hvr_e (ji,jj) = r1_hv_n(ji,jj) 716 END DO 717 END DO 671 718 ELSE ! CENTRED integration: start from BEFORE fields 672 !$OMP PARALLEL WORKSHARE 673 sshn_e(:,:) = sshb(:,:) 674 un_e (:,:) = ub_b(:,:) 675 vn_e (:,:) = vb_b(:,:) 676 ! 677 hu_e (:,:) = hu_b(:,:) 678 hv_e (:,:) = hv_b(:,:) 679 hur_e (:,:) = r1_hu_b(:,:) 680 hvr_e (:,:) = r1_hv_b(:,:) 681 !$OMP END PARALLEL WORKSHARE 719 !$OMP PARALLEL DO schedule(static) private(jj,ji) 720 DO jj = 1, jpj 721 DO ji = 1, jpi 722 sshn_e(ji,jj) = sshb(ji,jj) 723 un_e (ji,jj) = ub_b(ji,jj) 724 vn_e (ji,jj) = vb_b(ji,jj) 725 ! 726 hu_e (ji,jj) = hu_b(ji,jj) 727 hv_e (ji,jj) = hv_b(ji,jj) 728 hur_e (ji,jj) = r1_hu_b(ji,jj) 729 hvr_e (ji,jj) = r1_hv_b(ji,jj) 730 END DO 731 END DO 682 732 ENDIF 683 733 ! … … 685 735 ! 686 736 ! Initialize sums: 687 !$OMP PARALLEL WORKSHARE 688 ua_b (:,:) = 0._wp ! After barotropic velocities (or transport if flux form) 689 va_b (:,:) = 0._wp 690 ssha (:,:) = 0._wp ! Sum for after averaged sea level 691 un_adv(:,:) = 0._wp ! Sum for now transport issued from ts loop 692 vn_adv(:,:) = 0._wp 693 !$OMP END PARALLEL WORKSHARE 737 !$OMP PARALLEL DO schedule(static) private(jj,ji) 738 DO jj = 1, jpj 739 DO ji = 1, jpi 740 ua_b (ji,jj) = 0._wp ! After barotropic velocities (or transport if flux form) 741 va_b (ji,jj) = 0._wp 742 ssha (ji,jj) = 0._wp ! Sum for after averaged sea level 743 un_adv(ji,jj) = 0._wp ! Sum for now transport issued from ts loop 744 vn_adv(ji,jj) = 0._wp 745 END DO 746 END DO 694 747 ! ! ==================== ! 695 748 DO jn = 1, icycle ! sub-time-step loop ! … … 715 768 716 769 ! Extrapolate barotropic velocities at step jit+0.5: 717 !$OMP PARALLEL WORKSHARE 718 ua_e(:,:) = za1 * un_e(:,:) + za2 * ub_e(:,:) + za3 * ubb_e(:,:) 719 va_e(:,:) = za1 * vn_e(:,:) + za2 * vb_e(:,:) + za3 * vbb_e(:,:) 720 !$OMP END PARALLEL WORKSHARE 770 !$OMP PARALLEL DO schedule(static) private(jj,ji) 771 DO jj = 1, jpj 772 DO ji = 1, jpi 773 ua_e(ji,jj) = za1 * un_e(ji,jj) + za2 * ub_e(ji,jj) + za3 * ubb_e(ji,jj) 774 va_e(ji,jj) = za1 * vn_e(ji,jj) + za2 * vb_e(ji,jj) + za3 * vbb_e(ji,jj) 775 END DO 776 END DO 721 777 722 778 IF( .NOT.ln_linssh ) THEN !* Update ocean depth (variable volume case only) … … 724 780 ! Extrapolate Sea Level at step jit+0.5: 725 781 !$OMP PARALLEL 726 !$OMP WORKSHARE 727 zsshp2_e(:,:) = za1 * sshn_e(:,:) + za2 * sshb_e(:,:) + za3 * sshbb_e(:,:) 728 !$OMP END WORKSHARE 782 !$OMP DO schedule(static) private(jj,ji) 783 DO jj = 1, jpj 784 DO ji = 1, jpi 785 zsshp2_e(ji,jj) = za1 * sshn_e(ji,jj) + za2 * sshb_e(ji,jj) + za3 * sshbb_e(ji,jj) 786 END DO 787 END DO 729 788 ! 730 789 !$OMP DO schedule(static) private(jj,ji) … … 743 802 CALL lbc_lnk_multi( zwx, 'U', 1._wp, zwy, 'V', 1._wp ) 744 803 ! 745 !$OMP PARALLEL WORKSHARE 746 zhup2_e (:,:) = hu_0(:,:) + zwx(:,:) ! Ocean depth at U- and V-points 747 zhvp2_e (:,:) = hv_0(:,:) + zwy(:,:) 748 !$OMP END PARALLEL WORKSHARE 804 !$OMP PARALLEL DO schedule(static) private(jj,ji) 805 DO jj = 1, jpj 806 DO ji = 1, jpi 807 zhup2_e (ji,jj) = hu_0(ji,jj) + zwx(ji,jj) ! Ocean depth at U- and V-points 808 zhvp2_e (ji,jj) = hv_0(ji,jj) + zwy(ji,jj) 809 END DO 810 END DO 749 811 IF( ln_wd ) THEN 750 812 !$OMP PARALLEL DO schedule(static) private(jj,ji) 751 DO jj = 1, jpj752 DO ji = 1, jpi ! vector opt.753 zhup2_e(ji,jj) = MAX(zhup2_e (ji,jj), rn_wdmin1)754 zhvp2_e(ji,jj) = MAX(zhvp2_e (ji,jj), rn_wdmin1)755 END DO756 END DO813 DO jj = 1, jpj 814 DO ji = 1, jpi ! vector opt. 815 zhup2_e(ji,jj) = MAX(zhup2_e (ji,jj), rn_wdmin1) 816 zhvp2_e(ji,jj) = MAX(zhvp2_e (ji,jj), rn_wdmin1) 817 END DO 818 END DO 757 819 END IF 758 820 ELSE 759 !$OMP PARALLEL WORKSHARE 760 zhup2_e (:,:) = hu_n(:,:) 761 zhvp2_e (:,:) = hv_n(:,:) 762 !$OMP END PARALLEL WORKSHARE 821 !$OMP PARALLEL DO schedule(static) private(jj,ji) 822 DO jj = 1, jpj 823 DO ji = 1, jpi 824 zhup2_e (ji,jj) = hu_n(ji,jj) 825 zhvp2_e (ji,jj) = hv_n(ji,jj) 826 END DO 827 END DO 763 828 ENDIF 764 829 ! !* after ssh … … 767 832 ! considering fluxes below: 768 833 ! 769 !$OMP PARALLEL WORKSHARE 770 zwx(:,:) = e2u(:,:) * ua_e(:,:) * zhup2_e(:,:) ! fluxes at jn+0.5 771 zwy(:,:) = e1v(:,:) * va_e(:,:) * zhvp2_e(:,:) 772 !$OMP END PARALLEL WORKSHARE 834 !$OMP PARALLEL DO schedule(static) private(jj,ji) 835 DO jj = 1, jpj 836 DO ji = 1, jpi 837 zwx(ji,jj) = e2u(ji,jj) * ua_e(ji,jj) * zhup2_e(ji,jj) ! fluxes at jn+0.5 838 zwy(ji,jj) = e1v(ji,jj) * va_e(ji,jj) * zhvp2_e(ji,jj) 839 END DO 840 END DO 773 841 ! 774 842 #if defined key_agrif … … 802 870 za2 = wgtbtp2(jn) 803 871 !$OMP PARALLEL 804 !$OMP WORKSHARE 805 un_adv(:,:) = un_adv(:,:) + za2 * zwx(:,:) * r1_e2u(:,:) 806 vn_adv(:,:) = vn_adv(:,:) + za2 * zwy(:,:) * r1_e1v(:,:) 807 !$OMP END WORKSHARE NOWAIT 872 !$OMP DO schedule(static) private(jj,ji) 873 DO jj = 1, jpj 874 DO ji = 1, jpi 875 un_adv(ji,jj) = un_adv(ji,jj) + za2 * zwx(ji,jj) * r1_e2u(ji,jj) 876 vn_adv(ji,jj) = vn_adv(ji,jj) + za2 * zwy(ji,jj) * r1_e1v(ji,jj) 877 END DO 878 END DO 879 !$OMP END DO NOWAIT 808 880 ! 809 881 ! Set next sea level: … … 815 887 END DO 816 888 END DO 817 !$OMP WORKSHARE 818 ssha_e(:,:) = ( sshn_e(:,:) - rdtbt * ( zssh_frc(:,:) + zhdiv(:,:) ) ) * ssmask(:,:) 819 !$OMP END WORKSHARE NOWAIT 889 !$OMP DO schedule(static) private(jj,ji) 890 DO jj = 1, jpj 891 DO ji = 1, jpi 892 ssha_e(ji,jj) = ( sshn_e(ji,jj) - rdtbt * ( zssh_frc(ji,jj) + zhdiv(ji,jj) ) ) * ssmask(ji,jj) 893 END DO 894 END DO 895 !$OMP END DO NOWAIT 820 896 !$OMP END PARALLEL 821 897 IF( ln_wd ) THEN … … 872 948 ENDIF 873 949 ! 874 zsshp2_e(:,:) = za0 * ssha_e(:,:) + za1 * sshn_e (:,:) & 875 & + za2 * sshb_e(:,:) + za3 * sshbb_e(:,:) 950 !$OMP PARALLEL DO schedule(static) private(jj,ji) 951 DO jj = 1, jpj 952 DO ji = 1, jpi 953 zsshp2_e(ji,jj) = za0 * ssha_e(ji,jj) + za1 * sshn_e (ji,jj) & 954 & + za2 * sshb_e(ji,jj) + za3 * sshbb_e(ji,jj) 955 END DO 956 END DO 876 957 IF( ln_wd ) THEN ! Calculating and applying W/D gravity filters 877 958 !$OMP PARALLEL 878 !$OMP WORKSHARE 879 wduflt1(:,:) = 1._wp 880 wdvflt1(:,:) = 1._wp 881 !$OMP END WORKSHARE 959 !$OMP DO schedule(static) private(jj,ji) 960 DO jj = 1, jpj 961 DO ji = 1, jpi 962 wduflt1(ji,jj) = 1._wp 963 wdvflt1(ji,jj) = 1._wp 964 END DO 965 END DO 882 966 !$OMP DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 883 967 DO jj = 2, jpjm1 … … 1014 1098 ! 1015 1099 ! Add bottom stresses: 1016 !$OMP PARALLEL WORKSHARE 1017 zu_trd(:,:) = zu_trd(:,:) + bfrua(:,:) * un_e(:,:) * hur_e(:,:) 1018 zv_trd(:,:) = zv_trd(:,:) + bfrva(:,:) * vn_e(:,:) * hvr_e(:,:) 1019 ! 1020 ! Add top stresses: 1021 zu_trd(:,:) = zu_trd(:,:) + tfrua(:,:) * un_e(:,:) * hur_e(:,:) 1022 zv_trd(:,:) = zv_trd(:,:) + tfrva(:,:) * vn_e(:,:) * hvr_e(:,:) 1023 !$OMP END PARALLEL WORKSHARE 1100 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1101 DO jj = 1, jpj 1102 DO ji = 1, jpi 1103 zu_trd(ji,jj) = zu_trd(ji,jj) + bfrua(ji,jj) * un_e(ji,jj) * hur_e(ji,jj) 1104 zv_trd(ji,jj) = zv_trd(ji,jj) + bfrva(ji,jj) * vn_e(ji,jj) * hvr_e(ji,jj) 1105 ! 1106 ! Add top stresses: 1107 zu_trd(ji,jj) = zu_trd(ji,jj) + tfrua(ji,jj) * un_e(ji,jj) * hur_e(ji,jj) 1108 zv_trd(ji,jj) = zv_trd(ji,jj) + tfrva(ji,jj) * vn_e(ji,jj) * hvr_e(ji,jj) 1109 END DO 1110 END DO 1024 1111 ! 1025 1112 ! Surface pressure trend: … … 1109 1196 END DO 1110 1197 ELSE 1111 !$OMP PARALLEL WORKSHARE 1112 hu_e (:,:) = hu_0(:,:) + zsshu_a(:,:) 1113 hv_e (:,:) = hv_0(:,:) + zsshv_a(:,:) 1114 !$OMP END PARALLEL WORKSHARE 1198 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1199 DO jj = 1, jpj 1200 DO ji = 1, jpi 1201 hu_e (ji,jj) = hu_0(ji,jj) + zsshu_a(ji,jj) 1202 hv_e (ji,jj) = hv_0(ji,jj) + zsshv_a(ji,jj) 1203 END DO 1204 END DO 1115 1205 END IF 1116 !$OMP PARALLEL WORKSHARE 1117 hur_e(:,:) = ssumask(:,:) / ( hu_e(:,:) + 1._wp - ssumask(:,:) ) 1118 hvr_e(:,:) = ssvmask(:,:) / ( hv_e(:,:) + 1._wp - ssvmask(:,:) ) 1119 !$OMP END PARALLEL WORKSHARE 1206 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1207 DO jj = 1, jpj 1208 DO ji = 1, jpi 1209 hur_e(ji,jj) = ssumask(ji,jj) / ( hu_e(ji,jj) + 1._wp - ssumask(ji,jj) ) 1210 hvr_e(ji,jj) = ssvmask(ji,jj) / ( hv_e(ji,jj) + 1._wp - ssvmask(ji,jj) ) 1211 END DO 1212 END DO 1120 1213 ! 1121 1214 ENDIF … … 1132 1225 ! !* Swap 1133 1226 ! ! ---- 1134 !$OMP PARALLEL WORKSHARE 1135 ubb_e (:,:) = ub_e (:,:) 1136 ub_e (:,:) = un_e (:,:) 1137 un_e (:,:) = ua_e (:,:) 1138 ! 1139 vbb_e (:,:) = vb_e (:,:) 1140 vb_e (:,:) = vn_e (:,:) 1141 vn_e (:,:) = va_e (:,:) 1142 ! 1143 sshbb_e(:,:) = sshb_e(:,:) 1144 sshb_e (:,:) = sshn_e(:,:) 1145 sshn_e (:,:) = ssha_e(:,:) 1146 !$OMP END PARALLEL WORKSHARE 1227 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1228 DO jj = 1, jpj 1229 DO ji = 1, jpi 1230 ubb_e (ji,jj) = ub_e (ji,jj) 1231 ub_e (ji,jj) = un_e (ji,jj) 1232 un_e (ji,jj) = ua_e (ji,jj) 1233 ! 1234 vbb_e (ji,jj) = vb_e (ji,jj) 1235 vb_e (ji,jj) = vn_e (ji,jj) 1236 vn_e (ji,jj) = va_e (ji,jj) 1237 ! 1238 sshbb_e(ji,jj) = sshb_e(ji,jj) 1239 sshb_e (ji,jj) = sshn_e(ji,jj) 1240 sshn_e (ji,jj) = ssha_e(ji,jj) 1241 END DO 1242 END DO 1147 1243 1148 1244 ! !* Sum over whole bt loop … … 1150 1246 za1 = wgtbtp1(jn) 1151 1247 IF( ln_dynadv_vec .OR. ln_linssh ) THEN ! Sum velocities 1152 !$OMP PARALLEL WORKSHARE 1153 ua_b (:,:) = ua_b (:,:) + za1 * ua_e (:,:) 1154 va_b (:,:) = va_b (:,:) + za1 * va_e (:,:) 1155 !$OMP END PARALLEL WORKSHARE 1248 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1249 DO jj = 1, jpj 1250 DO ji = 1, jpi 1251 ua_b (ji,jj) = ua_b (ji,jj) + za1 * ua_e (ji,jj) 1252 va_b (ji,jj) = va_b (ji,jj) + za1 * va_e (ji,jj) 1253 END DO 1254 END DO 1156 1255 ELSE ! Sum transports 1157 !$OMP PARALLEL WORKSHARE 1158 ua_b (:,:) = ua_b (:,:) + za1 * ua_e (:,:) * hu_e (:,:) 1159 va_b (:,:) = va_b (:,:) + za1 * va_e (:,:) * hv_e (:,:) 1160 !$OMP END PARALLEL WORKSHARE 1256 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1257 DO jj = 1, jpj 1258 DO ji = 1, jpi 1259 ua_b (ji,jj) = ua_b (ji,jj) + za1 * ua_e (ji,jj) * hu_e (ji,jj) 1260 va_b (ji,jj) = va_b (ji,jj) + za1 * va_e (ji,jj) * hv_e (ji,jj) 1261 END DO 1262 END DO 1161 1263 ENDIF 1162 1264 ! ! Sum sea level 1163 !$OMP PARALLEL WORKSHARE 1164 ssha(:,:) = ssha(:,:) + za1 * ssha_e(:,:) 1165 !$OMP END PARALLEL WORKSHARE 1265 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1266 DO jj = 1, jpj 1267 DO ji = 1, jpi 1268 ssha(ji,jj) = ssha(ji,jj) + za1 * ssha_e(ji,jj) 1269 END DO 1270 END DO 1166 1271 ! ! ==================== ! 1167 1272 END DO ! end loop ! … … 1172 1277 ! 1173 1278 ! Set advection velocity correction: 1174 !$OMP PARALLEL WORKSHARE 1175 zwx(:,:) = un_adv(:,:) 1176 zwy(:,:) = vn_adv(:,:) 1177 !$OMP END PARALLEL WORKSHARE 1279 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1280 DO jj = 1, jpj 1281 DO ji = 1, jpi 1282 zwx(ji,jj) = un_adv(ji,jj) 1283 zwy(ji,jj) = vn_adv(ji,jj) 1284 END DO 1285 END DO 1178 1286 IF( ( kt == nit000 .AND. neuler==0 ) .OR. .NOT.ln_bt_fw ) THEN 1179 !$OMP PARALLEL WORKSHARE 1180 un_adv(:,:) = zwx(:,:) * r1_hu_n(:,:) 1181 vn_adv(:,:) = zwy(:,:) * r1_hv_n(:,:) 1182 !$OMP END PARALLEL WORKSHARE 1287 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1288 DO jj = 1, jpj 1289 DO ji = 1, jpi 1290 un_adv(ji,jj) = zwx(ji,jj) * r1_hu_n(ji,jj) 1291 vn_adv(ji,jj) = zwy(ji,jj) * r1_hv_n(ji,jj) 1292 END DO 1293 END DO 1183 1294 ELSE 1184 !$OMP PARALLEL WORKSHARE 1185 un_adv(:,:) = z1_2 * ( ub2_b(:,:) + zwx(:,:) ) * r1_hu_n(:,:) 1186 vn_adv(:,:) = z1_2 * ( vb2_b(:,:) + zwy(:,:) ) * r1_hv_n(:,:) 1187 !$OMP END PARALLEL WORKSHARE 1295 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1296 DO jj = 1, jpj 1297 DO ji = 1, jpi 1298 un_adv(ji,jj) = z1_2 * ( ub2_b(ji,jj) + zwx(ji,jj) ) * r1_hu_n(ji,jj) 1299 vn_adv(ji,jj) = z1_2 * ( vb2_b(ji,jj) + zwy(ji,jj) ) * r1_hv_n(ji,jj) 1300 END DO 1301 END DO 1188 1302 END IF 1189 1303 1190 1304 IF( ln_bt_fw ) THEN ! Save integrated transport for next computation 1191 !$OMP PARALLEL WORKSHARE 1192 ub2_b(:,:) = zwx(:,:) 1193 vb2_b(:,:) = zwy(:,:) 1194 !$OMP END PARALLEL WORKSHARE 1305 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1306 DO jj = 1, jpj 1307 DO ji = 1, jpi 1308 ub2_b(ji,jj) = zwx(ji,jj) 1309 vb2_b(ji,jj) = zwy(ji,jj) 1310 END DO 1311 END DO 1195 1312 ENDIF 1196 1313 ! … … 1225 1342 !$OMP END DO NOWAIT 1226 1343 ! Save barotropic velocities not transport: 1227 !$OMP WORKSHARE 1228 ua_b(:,:) = ua_b(:,:) / ( hu_0(:,:) + zsshu_a(:,:) + 1._wp - ssumask(:,:) ) 1229 va_b(:,:) = va_b(:,:) / ( hv_0(:,:) + zsshv_a(:,:) + 1._wp - ssvmask(:,:) ) 1230 !$OMP END WORKSHARE NOWAIT 1344 !$OMP DO schedule(static) private(jj,ji) 1345 DO jj = 1, jpj 1346 DO ji = 1, jpi 1347 ua_b(ji,jj) = ua_b(ji,jj) / ( hu_0(ji,jj) + zsshu_a(ji,jj) + 1._wp - ssumask(ji,jj) ) 1348 va_b(ji,jj) = va_b(ji,jj) / ( hv_0(ji,jj) + zsshv_a(ji,jj) + 1._wp - ssvmask(ji,jj) ) 1349 END DO 1350 END DO 1231 1351 !$OMP END PARALLEL 1232 1352 ENDIF … … 1249 1369 IF( .NOT.Agrif_Root() .AND. ln_bt_fw ) THEN 1250 1370 IF( Agrif_NbStepint() == 0 ) THEN 1251 !$OMP PARALLEL WORKSHARE 1252 ub2_i_b(:,:) = 0._wp 1253 vb2_i_b(:,:) = 0._wp 1254 !$OMP END PARALLEL WORKSHARE 1371 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1372 DO jj = 1, jpj 1373 DO ji = 1, jpi 1374 ub2_i_b(ji,jj) = 0._wp 1375 vb2_i_b(ji,jj) = 0._wp 1376 END DO 1377 END DO 1255 1378 END IF 1256 1379 ! 1257 1380 za1 = 1._wp / REAL(Agrif_rhot(), wp) 1258 !$OMP PARALLEL WORKSHARE 1259 ub2_i_b(:,:) = ub2_i_b(:,:) + za1 * ub2_b(:,:) 1260 vb2_i_b(:,:) = vb2_i_b(:,:) + za1 * vb2_b(:,:) 1261 !$OMP END PARALLEL WORKSHARE 1381 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1382 DO jj = 1, jpj 1383 DO ji = 1, jpi 1384 ub2_i_b(ji,jj) = ub2_i_b(ji,jj) + za1 * ub2_b(ji,jj) 1385 vb2_i_b(ji,jj) = vb2_i_b(ji,jj) + za1 * vb2_b(ji,jj) 1386 END DO 1387 END DO 1262 1388 ENDIF 1263 1389 #endif -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynvor.F90
r7037 r7508 94 94 !!---------------------------------------------------------------------- 95 95 INTEGER, INTENT( in ) :: kt ! ocean time-step index 96 INTEGER :: jk, jj, ji 96 97 ! 97 98 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv … … 106 107 CASE ( np_ENE ) !* energy conserving scheme 107 108 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 108 !$OMP PARALLEL WORKSHARE 109 ztrdu(:,:,:) = ua(:,:,:) 110 ztrdv(:,:,:) = va(:,:,:) 111 !$OMP END PARALLEL WORKSHARE 109 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 110 DO jk = 1, jpk 111 DO jj = 1, jpj 112 DO ji = 1, jpi 113 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 114 ztrdv(ji,jj,jk) = va(ji,jj,jk) 115 END DO 116 END DO 117 END DO 112 118 CALL vor_ene( kt, nrvm, ua, va ) ! relative vorticity or metric trend 113 !$OMP PARALLEL WORKSHARE 114 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 115 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 116 !$OMP END PARALLEL WORKSHARE 119 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 120 DO jk = 1, jpk 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 124 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 125 END DO 126 END DO 127 END DO 117 128 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 118 !$OMP PARALLEL WORKSHARE 119 ztrdu(:,:,:) = ua(:,:,:) 120 ztrdv(:,:,:) = va(:,:,:) 121 !$OMP END PARALLEL WORKSHARE 129 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 130 DO jk = 1, jpk 131 DO jj = 1, jpj 132 DO ji = 1, jpi 133 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 134 ztrdv(ji,jj,jk) = va(ji,jj,jk) 135 END DO 136 END DO 137 END DO 122 138 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend 123 !$OMP PARALLEL WORKSHARE 124 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 125 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 126 !$OMP END PARALLEL WORKSHARE 139 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 140 DO jk = 1, jpk 141 DO jj = 1, jpj 142 DO ji = 1, jpi 143 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 144 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 145 END DO 146 END DO 147 END DO 127 148 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 128 149 ELSE … … 132 153 CASE ( np_ENS ) !* enstrophy conserving scheme 133 154 IF( l_trddyn ) THEN ! trend diagnostics: splitthe trend in two 134 !$OMP PARALLEL WORKSHARE 135 ztrdu(:,:,:) = ua(:,:,:) 136 ztrdv(:,:,:) = va(:,:,:) 137 !$OMP END PARALLEL WORKSHARE 155 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 156 DO jk = 1, jpk 157 DO jj = 1, jpj 158 DO ji = 1, jpi 159 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 160 ztrdv(ji,jj,jk) = va(ji,jj,jk) 161 END DO 162 END DO 163 END DO 138 164 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend 139 !$OMP PARALLEL WORKSHARE 140 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 141 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 142 !$OMP END PARALLEL WORKSHARE 165 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 166 DO jk = 1, jpk 167 DO jj = 1, jpj 168 DO ji = 1, jpi 169 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 170 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 171 END DO 172 END DO 173 END DO 143 174 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 144 !$OMP PARALLEL WORKSHARE 145 ztrdu(:,:,:) = ua(:,:,:) 146 ztrdv(:,:,:) = va(:,:,:) 147 !$OMP END PARALLEL WORKSHARE 175 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 176 DO jk = 1, jpk 177 DO jj = 1, jpj 178 DO ji = 1, jpi 179 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 180 ztrdv(ji,jj,jk) = va(ji,jj,jk) 181 END DO 182 END DO 183 END DO 148 184 CALL vor_ens( kt, ncor, ua, va ) ! planetary vorticity trend 149 !$OMP PARALLEL WORKSHARE 150 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 151 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 152 !$OMP END PARALLEL WORKSHARE 185 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 186 DO jk = 1, jpk 187 DO jj = 1, jpj 188 DO ji = 1, jpi 189 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 190 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 191 END DO 192 END DO 193 END DO 153 194 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 154 195 ELSE … … 158 199 CASE ( np_MIX ) !* mixed ene-ens scheme 159 200 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 160 !$OMP PARALLEL WORKSHARE 161 ztrdu(:,:,:) = ua(:,:,:) 162 ztrdv(:,:,:) = va(:,:,:) 163 !$OMP END PARALLEL WORKSHARE 201 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 202 DO jk = 1, jpk 203 DO jj = 1, jpj 204 DO ji = 1, jpi 205 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 206 ztrdv(ji,jj,jk) = va(ji,jj,jk) 207 END DO 208 END DO 209 END DO 164 210 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend (ens) 165 !$OMP PARALLEL WORKSHARE 166 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 167 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 168 !$OMP END PARALLEL WORKSHARE 211 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 212 DO jk = 1, jpk 213 DO jj = 1, jpj 214 DO ji = 1, jpi 215 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 216 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 217 END DO 218 END DO 219 END DO 169 220 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 170 !$OMP PARALLEL WORKSHARE 171 ztrdu(:,:,:) = ua(:,:,:) 172 ztrdv(:,:,:) = va(:,:,:) 173 !$OMP END PARALLEL WORKSHARE 221 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 222 DO jk = 1, jpk 223 DO jj = 1, jpj 224 DO ji = 1, jpi 225 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 226 ztrdv(ji,jj,jk) = va(ji,jj,jk) 227 END DO 228 END DO 229 END DO 174 230 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend (ene) 175 !$OMP PARALLEL WORKSHARE 176 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 177 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 178 !$OMP END PARALLEL WORKSHARE 231 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 232 DO jk = 1, jpk 233 DO jj = 1, jpj 234 DO ji = 1, jpi 235 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 236 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 237 END DO 238 END DO 239 END DO 179 240 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 180 241 ELSE … … 185 246 CASE ( np_EEN ) !* energy and enstrophy conserving scheme 186 247 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 187 !$OMP PARALLEL WORKSHARE 188 ztrdu(:,:,:) = ua(:,:,:) 189 ztrdv(:,:,:) = va(:,:,:) 190 !$OMP END PARALLEL WORKSHARE 248 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 249 DO jk = 1, jpk 250 DO jj = 1, jpj 251 DO ji = 1, jpi 252 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 253 ztrdv(ji,jj,jk) = va(ji,jj,jk) 254 END DO 255 END DO 256 END DO 191 257 CALL vor_een( kt, nrvm, ua, va ) ! relative vorticity or metric trend 192 !$OMP PARALLEL WORKSHARE 193 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 194 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 195 !$OMP END PARALLEL WORKSHARE 258 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 259 DO jk = 1, jpk 260 DO jj = 1, jpj 261 DO ji = 1, jpi 262 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 263 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 264 END DO 265 END DO 266 END DO 196 267 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 197 !$OMP PARALLEL WORKSHARE 198 ztrdu(:,:,:) = ua(:,:,:) 199 ztrdv(:,:,:) = va(:,:,:) 200 !$OMP END PARALLEL WORKSHARE 268 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 269 DO jk = 1, jpk 270 DO jj = 1, jpj 271 DO ji = 1, jpi 272 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 273 ztrdv(ji,jj,jk) = va(ji,jj,jk) 274 END DO 275 END DO 276 END DO 201 277 CALL vor_een( kt, ncor, ua, va ) ! planetary vorticity trend 202 !$OMP PARALLEL WORKSHARE 203 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 204 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 205 !$OMP END PARALLEL WORKSHARE 278 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 279 DO jk = 1, jpk 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 283 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 284 END DO 285 END DO 286 END DO 206 287 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 207 288 ELSE … … 269 350 SELECT CASE( kvor ) !== vorticity considered ==! 270 351 CASE ( np_COR ) !* Coriolis (planetary vorticity) 271 !$OMP PARALLEL WORKSHARE 272 zwz(:,:) = ff(:,:) 273 !$OMP END PARALLEL WORKSHARE 352 !$OMP PARALLEL DO schedule(static) private(jj,ji) 353 DO jj = 1, jpj 354 DO ji = 1, jpi 355 zwz(ji,jj) = ff(ji,jj) 356 END DO 357 END DO 274 358 CASE ( np_RVO ) !* relative vorticity 275 !$OMP PARALLEL DO private(jj,ji)359 !$OMP PARALLEL DO schedule(static) private(jj,ji) 276 360 DO jj = 1, jpjm1 277 361 DO ji = 1, fs_jpim1 ! vector opt. … … 281 365 END DO 282 366 CASE ( np_MET ) !* metric term 283 !$OMP PARALLEL DO private(jj,ji)367 !$OMP PARALLEL DO schedule(static) private(jj,ji) 284 368 DO jj = 1, jpjm1 285 369 DO ji = 1, fs_jpim1 ! vector opt. … … 290 374 END DO 291 375 CASE ( np_CRV ) !* Coriolis + relative vorticity 292 !$OMP PARALLEL DO private(jj,ji)376 !$OMP PARALLEL DO schedule(static) private(jj,ji) 293 377 DO jj = 1, jpjm1 294 378 DO ji = 1, fs_jpim1 ! vector opt. … … 299 383 END DO 300 384 CASE ( np_CME ) !* Coriolis + metric 301 !$OMP PARALLEL DO private(jj,ji)385 !$OMP PARALLEL DO schedule(static) private(jj,ji) 302 386 DO jj = 1, jpjm1 303 387 DO ji = 1, fs_jpim1 ! vector opt. … … 313 397 ! 314 398 IF( ln_dynvor_msk ) THEN !== mask/unmask vorticity ==! 315 !$OMP PARALLEL DO private(jj,ji)399 !$OMP PARALLEL DO schedule(static) private(jj,ji) 316 400 DO jj = 1, jpjm1 317 401 DO ji = 1, fs_jpim1 ! vector opt. … … 322 406 323 407 IF( ln_sco ) THEN 324 !$OMP PARALLEL WORKSHARE 325 zwz(:,:) = zwz(:,:) / e3f_n(:,:,jk) 326 zwx(:,:) = e2u(:,:) * e3u_n(:,:,jk) * un(:,:,jk) 327 zwy(:,:) = e1v(:,:) * e3v_n(:,:,jk) * vn(:,:,jk) 328 !$OMP END PARALLEL WORKSHARE 408 !$OMP PARALLEL DO schedule(static) private(jj,ji) 409 DO jj = 1, jpj 410 DO ji = 1, jpi 411 zwz(ji,jj) = zwz(ji,jj) / e3f_n(ji,jj,jk) 412 zwx(ji,jj) = e2u(ji,jj) * e3u_n(ji,jj,jk) * un(ji,jj,jk) 413 zwy(ji,jj) = e1v(ji,jj) * e3v_n(ji,jj,jk) * vn(ji,jj,jk) 414 END DO 415 END DO 329 416 ELSE 330 !$OMP PARALLEL WORKSHARE 331 zwx(:,:) = e2u(:,:) * un(:,:,jk) 332 zwy(:,:) = e1v(:,:) * vn(:,:,jk) 333 !$OMP END PARALLEL WORKSHARE 417 !$OMP PARALLEL DO schedule(static) private(jj,ji) 418 DO jj = 1, jpj 419 DO ji = 1, jpi 420 zwx(ji,jj) = e2u(ji,jj) * un(ji,jj,jk) 421 zwy(ji,jj) = e1v(ji,jj) * vn(ji,jj,jk) 422 END DO 423 END DO 334 424 ENDIF 335 425 ! !== compute and add the vorticity term trend =! 336 !$OMP PARALLEL DO private(jj, ji, zy1, zy2, zx1, zx2)426 !$OMP PARALLEL DO schedule(static) private(jj, ji, zy1, zy2, zx1, zx2) 337 427 DO jj = 2, jpjm1 338 428 DO ji = fs_2, fs_jpim1 ! vector opt. … … 523 613 SELECT CASE( nn_een_e3f ) ! == reciprocal of e3 at F-point 524 614 CASE ( 0 ) ! original formulation (masked averaging of e3t divided by 4) 525 !$OMP PARALLEL DO private(jj,ji,ze3)615 !$OMP PARALLEL DO schedule(static) private(jj,ji,ze3) 526 616 DO jj = 1, jpjm1 527 617 DO ji = 1, fs_jpim1 ! vector opt. … … 534 624 END DO 535 625 CASE ( 1 ) ! new formulation (masked averaging of e3t divided by the sum of mask) 536 !$OMP PARALLEL DO private(jj,ji,ze3,zmsk)626 !$OMP PARALLEL DO schedule(static) private(jj,ji,ze3,zmsk) 537 627 DO jj = 1, jpjm1 538 628 DO ji = 1, fs_jpim1 ! vector opt. … … 550 640 SELECT CASE( kvor ) !== vorticity considered ==! 551 641 CASE ( np_COR ) !* Coriolis (planetary vorticity) 552 !$OMP PARALLEL DO private(jj,ji)642 !$OMP PARALLEL DO schedule(static) private(jj,ji) 553 643 DO jj = 1, jpjm1 554 644 DO ji = 1, fs_jpim1 ! vector opt. … … 557 647 END DO 558 648 CASE ( np_RVO ) !* relative vorticity 559 !$OMP PARALLEL DO private(jj,ji)649 !$OMP PARALLEL DO schedule(static) private(jj,ji) 560 650 DO jj = 1, jpjm1 561 651 DO ji = 1, fs_jpim1 ! vector opt. … … 566 656 END DO 567 657 CASE ( np_MET ) !* metric term 568 !$OMP PARALLEL DO private(jj,ji)658 !$OMP PARALLEL DO schedule(static) private(jj,ji) 569 659 DO jj = 1, jpjm1 570 660 DO ji = 1, fs_jpim1 ! vector opt. … … 575 665 END DO 576 666 CASE ( np_CRV ) !* Coriolis + relative vorticity 577 !$OMP PARALLEL DO private(jj,ji)667 !$OMP PARALLEL DO schedule(static) private(jj,ji) 578 668 DO jj = 1, jpjm1 579 669 DO ji = 1, fs_jpim1 ! vector opt. … … 584 674 END DO 585 675 CASE ( np_CME ) !* Coriolis + metric 586 !$OMP PARALLEL DO private(jj,ji)676 !$OMP PARALLEL DO schedule(static) private(jj,ji) 587 677 DO jj = 1, jpjm1 588 678 DO ji = 1, fs_jpim1 ! vector opt. … … 598 688 ! 599 689 IF( ln_dynvor_msk ) THEN !== mask/unmask vorticity ==! 600 !$OMP PARALLEL DO private(jj,ji)690 !$OMP PARALLEL DO schedule(static) private(jj,ji) 601 691 DO jj = 1, jpjm1 602 692 DO ji = 1, fs_jpim1 ! vector opt. … … 609 699 ! 610 700 ! !== horizontal fluxes ==! 611 !$OMP PARALLEL DO private(jj,ji) 701 !$OMP PARALLEL 702 !$OMP DO schedule(static) private(jj,ji) 612 703 DO jj = 1, jpj 613 704 DO ji = 1, jpi … … 618 709 619 710 ! !== compute and add the vorticity term trend =! 711 !$OMP DO schedule(static) private(jj) 712 DO jj = 1, jpj 713 ztne(1,jj) = 0 ; ztnw(1,jj) = 0 ; ztse(1,jj) = 0 ; ztsw(1,jj) = 0 714 END DO 620 715 jj = 2 621 ztne(1,:) = 0 ; ztnw(1,:) = 0 ; ztse(1,:) = 0 ; ztsw(1,:) = 0 716 !$OMP DO schedule(static) private(ji) 622 717 DO ji = 2, jpi ! split in 2 parts due to vector opt. 623 718 ztne(ji,jj) = zwz(ji-1,jj ) + zwz(ji ,jj ) + zwz(ji ,jj-1) … … 626 721 ztsw(ji,jj) = zwz(ji ,jj-1) + zwz(ji-1,jj-1) + zwz(ji-1,jj ) 627 722 END DO 628 !$OMP PARALLEL 629 !$OMP DO private(jj,ji) 723 !$OMP DO schedule(static) private(jj,ji) 630 724 DO jj = 3, jpj 631 725 DO ji = fs_2, jpi ! vector opt. ok because we start at jj = 3 … … 636 730 END DO 637 731 END DO 638 !$OMP DO private(jj,ji,zua,zva)732 !$OMP DO schedule(static) private(jj,ji,zua,zva) 639 733 DO jj = 2, jpjm1 640 734 DO ji = fs_2, fs_jpim1 ! vector opt. … … 703 797 IF(lwp) WRITE(numout,*) ' namlbc: change fmask value in the angles (T) ln_vorlat = ', ln_vorlat 704 798 IF( ln_vorlat .AND. ( ln_dynvor_ene .OR. ln_dynvor_ens .OR. ln_dynvor_mix ) ) THEN 705 !$OMP PARALLEL DO private(jk,jj,ji)799 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 706 800 DO jk = 1, jpk 707 801 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzad.F90
r7037 r7508 77 77 IF( l_trddyn ) THEN ! Save ua and va trends 78 78 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 79 !$OMP PARALLEL WORKSHARE 80 ztrdu(:,:,:) = ua(:,:,:) 81 ztrdv(:,:,:) = va(:,:,:) 82 !$OMP END PARALLEL WORKSHARE 79 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 80 DO jk = 1, jpk 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 84 ztrdv(ji,jj,jk) = va(ji,jj,jk) 85 END DO 86 END DO 87 END DO 83 88 ENDIF 84 89 … … 139 144 140 145 IF( l_trddyn ) THEN ! save the vertical advection trends for diagnostic 141 !$OMP PARALLEL WORKSHARE 142 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 143 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 144 !$OMP END PARALLEL WORKSHARE 146 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 147 DO jk = 1, jpk 148 DO jj = 1, jpj 149 DO ji = 1, jpi 150 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 151 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 152 END DO 153 END DO 154 END DO 145 155 CALL trd_dyn( ztrdu, ztrdv, jpdyn_zad, kt ) 146 156 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzdf.F90
r6748 r7508 53 53 !! 54 54 INTEGER, INTENT( in ) :: kt ! ocean time-step index 55 INTEGER :: ji, jj, jk ! dummy loop indices 55 56 ! 56 57 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv … … 66 67 IF( l_trddyn ) THEN ! temporary save of ta and sa trends 67 68 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 68 !$OMP PARALLEL WORKSHARE 69 ztrdu(:,:,:) = ua(:,:,:) 70 ztrdv(:,:,:) = va(:,:,:) 71 !$OMP END PARALLEL WORKSHARE 69 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 70 DO jk = 1, jpk 71 DO jj = 1, jpj 72 DO ji = 1, jpi 73 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 74 ztrdv(ji,jj,jk) = va(ji,jj,jk) 75 END DO 76 END DO 77 END DO 72 78 ENDIF 73 79 … … 80 86 81 87 IF( l_trddyn ) THEN ! save the vertical diffusive trends for further diagnostics 82 !$OMP PARALLEL WORKSHARE 83 ztrdu(:,:,:) = ( ua(:,:,:) - ub(:,:,:) ) / r2dt - ztrdu(:,:,:) 84 ztrdv(:,:,:) = ( va(:,:,:) - vb(:,:,:) ) / r2dt - ztrdv(:,:,:) 85 !$OMP END PARALLEL WORKSHARE 88 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 89 DO jk = 1, jpk 90 DO jj = 1, jpj 91 DO ji = 1, jpi 92 ztrdu(ji,jj,jk) = ( ua(ji,jj,jk) - ub(ji,jj,jk) ) / r2dt - ztrdu(ji,jj,jk) 93 ztrdv(ji,jj,jk) = ( va(ji,jj,jk) - vb(ji,jj,jk) ) / r2dt - ztrdv(ji,jj,jk) 94 END DO 95 END DO 96 END DO 86 97 CALL trd_dyn( ztrdu, ztrdv, jpdyn_zdf, kt ) 87 98 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/sshwzv.F90
r7037 r7508 97 97 ! !------------------------------! 98 98 !$OMP PARALLEL 99 !$OMP WORKSHARE 100 zhdiv(:,:) = 0._wp 101 !$OMP END WORKSHARE 99 !$OMP DO schedule(static) private(jj, ji) 100 DO jj = 1, jpj 101 DO ji = 1, jpi 102 zhdiv(ji,jj) = 0._wp 103 END DO 104 END DO 102 105 DO jk = 1, jpkm1 ! Horizontal divergence of barotropic transports 103 106 !$OMP DO schedule(static) private(jj, ji) 104 107 DO jj = 1, jpj 105 108 DO ji = 1, jpi ! vector opt. 106 zhdiv(ji,jj) = zhdiv(ji,jj) + e3t_n(ji,jj,jk) * hdivn(ji,jj,jk)109 zhdiv(ji,jj) = zhdiv(ji,jj) + e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) 107 110 END DO 108 111 END DO … … 116 119 117 120 IF(ln_wd) CALL wad_lmt(sshb, zcoef * (emp_b(:,:) + emp(:,:)), z2dt) 118 !$OMP PARALLEL WORKSHARE 119 ssha(:,:) = ( sshb(:,:) - z2dt * ( zcoef * ( emp_b(:,:) + emp(:,:) ) + zhdiv(:,:) ) ) * ssmask(:,:) 120 !$OMP END PARALLEL WORKSHARE 121 !$OMP PARALLEL DO schedule(static) private(jj, ji) 122 DO jj = 1, jpj 123 DO ji = 1, jpi 124 ssha(ji,jj) = ( sshb(ji,jj) - z2dt * ( zcoef * ( emp_b(ji,jj) + emp(ji,jj) ) + zhdiv(ji,jj) ) ) * ssmask(ji,jj) 125 END DO 126 END DO 121 127 IF ( .NOT.ln_dynspg_ts ) THEN 122 128 ! These lines are not necessary with time splitting since … … 136 142 IF( lk_asminc .AND. ln_sshinc .AND. ln_asmiau ) THEN ! Include the IAU weighted SSH increment 137 143 CALL ssh_asm_inc( kt ) 138 !$OMP PARALLEL WORKSHARE 139 ssha(:,:) = ssha(:,:) + z2dt * ssh_iau(:,:) 140 !$OMP END PARALLEL WORKSHARE 144 !$OMP PARALLEL DO schedule(static) private(jj, ji) 145 DO jj = 1, jpj 146 DO ji = 1, jpi 147 ssha(ji,jj) = ssha(ji,jj) + z2dt * ssh_iau(ji,jj) 148 END DO 149 END DO 141 150 ENDIF 142 151 #endif … … 184 193 IF(lwp) WRITE(numout,*) '~~~~~ ' 185 194 ! 186 wn( :,:,jpk) = 0._wp ! bottom boundary condition: w=0 (set once for all)195 wn(ji,jj,jpk) = 0._wp ! bottom boundary condition: w=0 (set once for all) 187 196 ENDIF 188 197 ! !------------------------------! … … 267 276 INTEGER, INTENT(in) :: kt ! ocean time-step index 268 277 ! 278 INTEGER :: ji, jj, jk ! dummy loop indices 269 279 REAL(wp) :: zcoef ! local scalar 270 280 !!---------------------------------------------------------------------- … … 280 290 IF( ( neuler == 0 .AND. kt == nit000 ) .OR. & 281 291 & ( ln_bt_fw .AND. ln_dynspg_ts ) ) THEN 282 !$OMP PARALLEL WORKSHARE 283 sshb(:,:) = sshn(:,:) ! before <-- now 284 sshn(:,:) = ssha(:,:) ! now <-- after (before already = now) 285 !$OMP END PARALLEL WORKSHARE 292 !$OMP PARALLEL DO schedule(static) private(jj, ji) 293 DO jj = 1, jpj 294 DO ji = 1, jpi 295 sshb(ji,jj) = sshn(ji,jj) ! before <-- now 296 sshn(ji,jj) = ssha(ji,jj) ! now <-- after (before already = now) 297 END DO 298 END DO 286 299 ! 287 300 ELSE !== Leap-Frog time-stepping: Asselin filter + swap ==! 288 301 ! ! before <-- now filtered 289 !$OMP PARALLEL WORKSHARE 290 sshb(:,:) = sshn(:,:) + atfp * ( sshb(:,:) - 2 * sshn(:,:) + ssha(:,:) ) 291 !$OMP END PARALLEL WORKSHARE 302 !$OMP PARALLEL DO schedule(static) private(jj, ji) 303 DO jj = 1, jpj 304 DO ji = 1, jpi 305 sshb(ji,jj) = sshn(ji,jj) + atfp * ( sshb(ji,jj) - 2 * sshn(ji,jj) + ssha(ji,jj) ) 306 END DO 307 END DO 292 308 IF( .NOT.ln_linssh ) THEN ! before <-- with forcing removed 293 309 zcoef = atfp * rdt * r1_rau0 294 !$OMP PARALLEL WORKSHARE 295 sshb(:,:) = sshb(:,:) - zcoef * ( emp_b(:,:) - emp (:,:) & 296 & - rnf_b(:,:) + rnf (:,:) & 297 & + fwfisf_b(:,:) - fwfisf(:,:) ) * ssmask(:,:) 298 !$OMP END PARALLEL WORKSHARE 310 !$OMP PARALLEL DO schedule(static) private(jj, ji) 311 DO jj = 1, jpj 312 DO ji = 1, jpi 313 sshb(ji,jj) = sshb(ji,jj) - zcoef * ( emp_b(ji,jj) - emp (ji,jj) & 314 & - rnf_b(ji,jj) + rnf (ji,jj) & 315 & + fwfisf_b(ji,jj) - fwfisf(ji,jj) ) * ssmask(ji,jj) 316 END DO 317 END DO 299 318 ENDIF 300 !$OMP PARALLEL WORKSHARE 301 sshn(:,:) = ssha(:,:) ! now <-- after 302 !$OMP END PARALLEL WORKSHARE 319 !$OMP PARALLEL DO schedule(static) private(jj, ji) 320 DO jj = 1, jpj 321 DO ji = 1, jpi 322 sshn(ji,jj) = ssha(ji,jj) ! now <-- after 323 END DO 324 END DO 303 325 ENDIF 304 326 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ICB/icbini.F90
r6748 r7508 85 85 first_width (:) = SQRT( rn_initial_mass(:) / ( rn_LoW_ratio * rn_rho_bergs * rn_initial_thickness(:) ) ) 86 86 first_length(:) = rn_LoW_ratio * first_width(:) 87 !$OMP PARALLEL WORKSHARE 88 berg_grid%calving (:,:) = 0._wp 89 berg_grid%calving_hflx (:,:) = 0._wp 90 berg_grid%stored_heat (:,:) = 0._wp 91 berg_grid%floating_melt(:,:) = 0._wp 92 berg_grid%maxclass (:,:) = nclasses 93 berg_grid%stored_ice (:,:,:) = 0._wp 94 berg_grid%tmp (:,:) = 0._wp 95 src_calving (:,:) = 0._wp 96 src_calving_hflx (:,:) = 0._wp 97 !$OMP END PARALLEL WORKSHARE 87 !$OMP PARALLEL DO schedule(static) private(jj, ji) 88 DO jj = 1, jpj 89 DO ji = 1, jpi 90 berg_grid%calving (ji,jj) = 0._wp 91 berg_grid%calving_hflx (ji,jj) = 0._wp 92 berg_grid%stored_heat (ji,jj) = 0._wp 93 berg_grid%floating_melt(ji,jj) = 0._wp 94 berg_grid%maxclass (ji,jj) = nclasses 95 berg_grid%tmp (ji,jj) = 0._wp 96 src_calving (ji,jj) = 0._wp 97 src_calving_hflx (ji,jj) = 0._wp 98 END DO 99 END DO 100 DO jn = 1, nclasses 101 !$OMP PARALLEL DO schedule(static) private(jj, ji) 102 DO jj = 1, jpj 103 DO ji = 1, jpi 104 berg_grid%stored_ice (ji,jj,jn) = 0._wp 105 END DO 106 END DO 107 END DO 98 108 ! ! domain for icebergs 99 109 IF( lk_mpp .AND. jpni == 1 ) CALL ctl_stop( 'icbinit: having ONE processor in x currently does not work' ) … … 219 229 CALL flush(numicb) 220 230 ENDIF 221 !$OMP PARALLEL WORKSHARE 222 src_calving (:,:) = 0._wp 223 src_calving_hflx(:,:) = 0._wp 224 !$OMP END PARALLEL WORKSHARE 231 !$OMP PARALLEL DO schedule(static) private(jj, ji) 232 DO jj = 1, jpj 233 DO ji = 1, jpi 234 src_calving (ji,jj) = 0._wp 235 src_calving_hflx(ji,jj) = 0._wp 236 END DO 237 END DO 225 238 ! assign each new iceberg with a unique number constructed from the processor number 226 239 ! and incremented by the total number of processors … … 238 251 CALL iom_get ( inum, jpdom_data, 'maxclass', src_calving ) ! read the max distribution array 239 252 berg_grid%maxclass(:,:) = INT( src_calving ) 240 src_calving(:,:) = 0._wp 253 !$OMP PARALLEL DO schedule(static) private(jj, ji) 254 DO jj = 1, jpj 255 DO ji = 1, jpi 256 src_calving(ji,jj) = 0._wp 257 END DO 258 END DO 241 259 ENDIF 242 260 CALL iom_close( inum ) ! close file -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/lbclnk.F90
r7037 r7508 163 163 !!---------------------------------------------------------------------- 164 164 ! 165 !$OMP PARALLEL DO schedule(static) private(jk, ztab)166 165 DO jk = 1, jpk 167 166 ztab = pt3d(2,2,jk) … … 190 189 ! 191 190 ztab = pt2d(2,2) 192 !$OMP PARALLEL WORKSHARE193 191 pt2d(:,:) = ztab 194 !$OMP END PARALLEL WORKSHARE195 192 ! 196 193 END SUBROUTINE lbc_lnk_2d … … 316 313 ! 317 314 CASE ( 1 , 4 , 6 ) !** cyclic east-west 318 !$OMP PARALLEL WORKSHARE319 315 pt3d( 1 ,:,:) = pt3d(jpim1,:,:) ! all points 320 316 pt3d(jpi,:,:) = pt3d( 2 ,:,:) 321 !$OMP END PARALLEL WORKSHARE322 317 ! 323 318 CASE DEFAULT !** East closed -- West closed 324 319 SELECT CASE ( cd_type ) 325 320 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 326 !$OMP PARALLEL WORKSHARE327 321 pt3d( 1 ,:,:) = zland 328 322 pt3d(jpi,:,:) = zland 329 !$OMP END PARALLEL WORKSHARE330 323 CASE ( 'F' ) ! F-point 331 !$OMP PARALLEL WORKSHARE332 324 pt3d(jpi,:,:) = zland 333 !$OMP END PARALLEL WORKSHARE334 325 END SELECT 335 326 ! … … 342 333 SELECT CASE ( cd_type ) 343 334 CASE ( 'T' , 'U' , 'W' ) ! T-, U-, W-points 344 !$OMP PARALLEL WORKSHARE345 335 pt3d(:, 1 ,:) = pt3d(:,3,:) 346 336 pt3d(:,jpj,:) = zland 347 !$OMP END PARALLEL WORKSHARE348 337 CASE ( 'V' , 'F' ) ! V-, F-points 349 !$OMP PARALLEL WORKSHARE350 338 pt3d(:, 1 ,:) = psgn * pt3d(:,2,:) 351 339 pt3d(:,jpj,:) = zland 352 !$OMP END PARALLEL WORKSHARE353 340 END SELECT 354 341 ! … … 356 343 SELECT CASE ( cd_type ) ! South : closed 357 344 CASE ( 'T' , 'U' , 'V' , 'W' , 'I' ) ! all points except F-point 358 !$OMP PARALLEL WORKSHARE359 345 pt3d(:, 1 ,:) = zland 360 !$OMP END PARALLEL WORKSHARE361 346 END SELECT 362 347 ! ! North fold … … 366 351 SELECT CASE ( cd_type ) 367 352 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 368 !$OMP PARALLEL WORKSHARE369 353 pt3d(:, 1 ,:) = zland 370 354 pt3d(:,jpj,:) = zland 371 !$OMP END PARALLEL WORKSHARE372 355 CASE ( 'F' ) ! F-point 373 !$OMP PARALLEL WORKSHARE374 356 pt3d(:,jpj,:) = zland 375 !$OMP END PARALLEL WORKSHARE376 357 END SELECT 377 358 ! … … 608 589 ! 609 590 CASE ( 1 , 4 , 6 ) !** cyclic east-west 610 !$OMP PARALLEL WORKSHARE611 591 pt3d(jpim1,:,:) = pt3d(jpim1,:,:) + pt3d( 1 ,:,:) 612 592 pt3d( 2 ,:,:) = pt3d( 2 ,:,:) + pt3d(jpi,:,:) 613 593 pt3d( 1 ,:,:) = 0.0_wp ! all points 614 594 pt3d(jpi,:,:) = 0.0_wp 615 !$OMP END PARALLEL WORKSHARE616 595 ! 617 596 CASE DEFAULT !** East closed -- West closed 618 597 SELECT CASE ( cd_type ) 619 598 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 620 !$OMP PARALLEL WORKSHARE621 599 pt3d( 1 ,:,:) = zland 622 600 pt3d(jpi,:,:) = zland 623 !$OMP END PARALLEL WORKSHARE624 601 CASE ( 'F' ) ! F-point 625 !$OMP PARALLEL WORKSHARE626 602 pt3d(jpi,:,:) = zland 627 !$OMP END PARALLEL WORKSHARE628 603 END SELECT 629 604 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r7037 r7508 400 400 ! !* Cyclic east-west 401 401 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 402 !$OMP PARALLEL WORKSHARE 403 ptab( 1 ,:,:) = ptab(jpim1,:,:) 404 ptab(jpi,:,:) = ptab( 2 ,:,:) 405 !$OMP END PARALLEL WORKSHARE 402 !$OMP PARALLEL DO schedule(static) private(jk, jj) 403 DO jk = 1, jpk 404 DO jj = 1, jpj 405 ptab( 1 ,jj,jk) = ptab(jpim1,jj,jk) 406 ptab(jpi,jj,jk) = ptab( 2 ,jj,jk) 407 END DO 408 END DO 406 409 ELSE !* closed 407 410 IF( .NOT. cd_type == 'F' ) THEN 408 !$OMP PARALLEL WORKSHARE 409 ptab( 1 :jpreci,:,:) = zland ! south except F-point 410 !$OMP END PARALLEL WORKSHARE 411 !$OMP PARALLEL DO schedule(static) private(jk, jj) 412 DO jk = 1, jpk 413 DO jj = 1, jpj 414 ptab( 1 :jpreci,jj,jk) = zland ! south except F-point 415 END DO 416 END DO 411 417 END IF 412 !$OMP PARALLEL WORKSHARE 413 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 414 !$OMP END PARALLEL WORKSHARE 418 !$OMP PARALLEL DO schedule(static) private(jk, jj) 419 DO jk = 1, jpk 420 DO jj = 1, jpj 421 ptab(nlci-jpreci+1:jpi ,jj,jk) = zland ! north 422 END DO 423 END DO 415 424 ENDIF 416 425 ! ! North-South boundaries (always closed) 417 426 IF( .NOT. cd_type == 'F' ) THEN 418 !$OMP PARALLEL WORKSHARE 419 ptab(:, 1 :jprecj,:) = zland ! south except F-point 420 !$OMP END PARALLEL WORKSHARE 427 !$OMP PARALLEL DO schedule(static) private(jk, ji) 428 DO jk = 1, jpk 429 DO ji = 1, jpi 430 ptab(ji, 1 :jprecj,jk) = zland ! south except F-point 431 END DO 432 END DO 421 433 END IF 422 !$OMP PARALLEL WORKSHARE 423 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 424 !$OMP END PARALLEL WORKSHARE 434 !$OMP PARALLEL DO schedule(static) private(jk, ji) 435 DO jk = 1, jpk 436 DO ji = 1, jpi 437 ptab(ji,nlcj-jprecj+1:jpj ,jk) = zland ! north 438 END DO 439 END DO 425 440 ! 426 441 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfdyn.F90
r6748 r7508 80 80 !! or |u|e^3/12 bilaplacian operator ) 81 81 !!---------------------------------------------------------------------- 82 INTEGER :: jk 82 INTEGER :: jk, jj, ji ! dummy loop indices 83 83 INTEGER :: ierr, inum, ios ! local integer 84 84 REAL(wp) :: zah0 ! local scalar … … 136 136 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'ldf_dyn_init: failed to allocate arrays') 137 137 ! 138 !$OMP PARALLEL WORKSHARE 139 ahmt(:,:,jpk) = 0._wp ! last level always 0 140 ahmf(:,:,jpk) = 0._wp 141 !$OMP END PARALLEL WORKSHARE 138 !$OMP PARALLEL DO schedule(static) private(jj, ji) 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 ahmt(ji,jj,jpk) = 0._wp ! last level always 0 142 ahmf(ji,jj,jpk) = 0._wp 143 END DO 144 END DO 142 145 ! 143 146 ! ! value of eddy mixing coef. … … 156 159 CASE( 0 ) !== constant ==! 157 160 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = constant ' 158 !$OMP PARALLEL WORKSHARE 159 ahmt(:,:,:) = zah0 * tmask(:,:,:) 160 ahmf(:,:,:) = zah0 * fmask(:,:,:) 161 !$OMP END PARALLEL WORKSHARE 161 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 162 DO jk = 1, jpk 163 DO jj = 1, jpj 164 DO ji = 1, jpi 165 ahmt(ji,jj,jk) = zah0 * tmask(ji,jj,jk) 166 ahmf(ji,jj,jk) = zah0 * fmask(ji,jj,jk) 167 END DO 168 END DO 169 END DO 162 170 ! 163 171 CASE( 10 ) !== fixed profile ==! 164 172 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = F( depth )' 165 !$OMP PARALLEL WORKSHARE 166 ahmt(:,:,1) = zah0 * tmask(:,:,1) ! constant surface value 167 ahmf(:,:,1) = zah0 * fmask(:,:,1) 168 !$OMP END PARALLEL WORKSHARE 173 !$OMP PARALLEL DO schedule(static) private(jj, ji) 174 DO jj = 1, jpj 175 DO ji = 1, jpi 176 ahmt(ji,jj,1) = zah0 * tmask(ji,jj,1) ! constant surface value 177 ahmf(ji,jj,1) = zah0 * fmask(ji,jj,1) 178 END DO 179 END DO 169 180 CALL ldf_c1d( 'DYN', r1_4, ahmt(:,:,1), ahmf(:,:,1), ahmt, ahmf ) 170 181 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfslp.F90
r7037 r7508 136 136 ! 137 137 !$OMP PARALLEL 138 !$OMP WORKSHARE 139 140 zww(:,:,:) = 0._wp 141 zwz(:,:,:) = 0._wp 142 !$OMP END WORKSHARE NOWAIT 138 !$OMP DO schedule(static) private(jk, jj, ji) 139 DO jk = 1, jpk 140 DO jj = 1, jpj 141 DO ji = 1, jpi 142 zww(ji,jj,jk) = 0._wp 143 zwz(ji,jj,jk) = 0._wp 144 END DO 145 END DO 146 END DO 147 !$OMP END DO NOWAIT 143 148 ! 144 149 !$OMP DO schedule(static) private(jk, jj, ji) … … 173 178 ! 174 179 !$OMP PARALLEL 175 !$OMP WORKSHARE 176 zdzr(:,:,1) = 0._wp !== Local vertical density gradient at T-point == ! (evaluated from N^2) 177 !$OMP END WORKSHARE 180 !$OMP DO schedule(static) private(jj, ji) 181 DO jj = 1, jpj 182 DO ji = 1, jpi 183 zdzr(ji,jj,1) = 0._wp !== Local vertical density gradient at T-point == ! (evaluated from N^2) 184 END DO 185 END DO 178 186 !$OMP DO schedule(static) private(jk) 179 187 DO jk = 2, jpkm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldftra.F90
r7037 r7508 117 117 !! aeiu, aeiv initialized once for all or l_ldfeiv_time set to true 118 118 !!---------------------------------------------------------------------- 119 INTEGER :: jk 119 INTEGER :: jk, jj, ji ! dummy loop indices 120 120 INTEGER :: ierr, inum, ios ! local integer 121 121 REAL(wp) :: zah0 ! local scalar … … 185 185 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'ldf_tra_init: failed to allocate arrays') 186 186 ! 187 !$OMP PARALLEL WORKSHARE 188 ahtu(:,:,jpk) = 0._wp ! last level always 0 189 ahtv(:,:,jpk) = 0._wp 190 !$OMP END PARALLEL WORKSHARE 187 !$OMP PARALLEL DO schedule(static) private(jj, ji) 188 DO jj = 1, jpj 189 DO ji = 1, jpi 190 ahtu(ji,jj,jpk) = 0._wp ! last level always 0 191 ahtv(ji,jj,jpk) = 0._wp 192 END DO 193 END DO 191 194 ! 192 195 ! ! value of eddy mixing coef. … … 203 206 CASE( 0 ) !== constant ==! 204 207 IF(lwp) WRITE(numout,*) ' tracer mixing coef. = constant = ', rn_aht_0 205 !$OMP PARALLEL WORKSHARE 206 ahtu(:,:,:) = zah0 * umask(:,:,:) 207 ahtv(:,:,:) = zah0 * vmask(:,:,:) 208 !$OMP END PARALLEL WORKSHARE 208 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 209 DO jk = 1, jpk 210 DO jj = 1, jpj 211 DO ji = 1, jpi 212 ahtu(ji,jj,jk) = zah0 * umask(ji,jj,jk) 213 ahtv(ji,jj,jk) = zah0 * vmask(ji,jj,jk) 214 END DO 215 END DO 216 END DO 209 217 ! 210 218 CASE( 10 ) !== fixed profile ==! 211 219 IF(lwp) WRITE(numout,*) ' tracer mixing coef. = F( depth )' 212 !$OMP PARALLEL WORKSHARE 213 ahtu(:,:,1) = zah0 * umask(:,:,1) ! constant surface value 214 ahtv(:,:,1) = zah0 * vmask(:,:,1) 215 !$OMP END PARALLEL WORKSHARE 220 !$OMP PARALLEL DO schedule(static) private(jj, ji) 221 DO jj = 1, jpj 222 DO ji = 1, jpi 223 ahtu(ji,jj,1) = zah0 * umask(ji,jj,1) ! constant surface value 224 ahtv(ji,jj,1) = zah0 * vmask(ji,jj,1) 225 END DO 226 END DO 216 227 CALL ldf_c1d( 'TRA', r1_4, ahtu(:,:,1), ahtv(:,:,1), ahtu, ahtv ) 217 228 ! … … 276 287 ! 277 288 IF( ln_traldf_blp .AND. .NOT. l_ldftra_time ) THEN 278 ahtu(:,:,:) = SQRT( ahtu(:,:,:) ) 279 ahtv(:,:,:) = SQRT( ahtv(:,:,:) ) 289 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 290 DO jk = 1, jpk 291 DO jj = 1, jpj 292 DO ji = 1, jpi 293 ahtu(ji,jj,jk) = SQRT( ahtu(ji,jj,jk) ) 294 ahtv(ji,jj,jk) = SQRT( ahtv(ji,jj,jk) ) 295 END DO 296 END DO 297 END DO 280 298 ENDIF 281 299 ! … … 396 414 !! l_ldfeiv_time : =T if EIV coefficients vary with time 397 415 !!---------------------------------------------------------------------- 398 INTEGER :: jk 416 INTEGER :: jk, jj, ji ! dummy loop indices 399 417 INTEGER :: ierr, inum, ios ! local integer 400 418 ! … … 437 455 CASE( 0 ) !== constant ==! 438 456 IF(lwp) WRITE(numout,*) ' eddy induced velocity coef. = constant = ', rn_aeiv_0 439 !$OMP PARALLEL WORKSHARE 440 aeiu(:,:,:) = rn_aeiv_0 441 aeiv(:,:,:) = rn_aeiv_0 442 !$OMP END PARALLEL WORKSHARE 457 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 458 DO jk = 1, jpk 459 DO jj = 1, jpj 460 DO ji = 1, jpi 461 aeiu(ji,jj,jk) = rn_aeiv_0 462 aeiv(ji,jj,jk) = rn_aeiv_0 463 END DO 464 END DO 465 END DO 443 466 ! 444 467 CASE( 10 ) !== fixed profile ==! 445 468 IF(lwp) WRITE(numout,*) ' eddy induced velocity coef. = F( depth )' 446 !$OMP PARALLEL WORKSHARE 447 aeiu(:,:,1) = rn_aeiv_0 ! constant surface value 448 aeiv(:,:,1) = rn_aeiv_0 449 !$OMP END PARALLEL WORKSHARE 469 !$OMP PARALLEL DO schedule(static) private(jj, ji) 470 DO jj = 1, jpj 471 DO ji = 1, jpi 472 aeiu(ji,jj,1) = rn_aeiv_0 ! constant surface value 473 aeiv(ji,jj,1) = rn_aeiv_0 474 END DO 475 END DO 450 476 CALL ldf_c1d( 'TRA', r1_4, aeiu(:,:,1), aeiv(:,:,1), aeiu, aeiv ) 451 477 ! … … 521 547 CALL wrk_alloc( jpi,jpj, zn, zah, zhw, zross, zaeiw ) 522 548 ! 523 !$OMP PARALLEL WORKSHARE 524 zn (:,:) = 0._wp ! Local initialization 525 zhw (:,:) = 5._wp 526 zah (:,:) = 0._wp 527 zross(:,:) = 0._wp 528 !$OMP END PARALLEL WORKSHARE 549 !$OMP PARALLEL DO schedule(static) private(jj,ji) 550 DO jj = 1, jpj 551 DO ji = 1, jpi 552 zn (ji,jj) = 0._wp ! Local initialization 553 zhw (ji,jj) = 5._wp 554 zah (ji,jj) = 0._wp 555 zross(ji,jj) = 0._wp 556 END DO 557 END DO 529 558 ! ! Compute lateral diffusive coefficient at T-point 530 559 IF( ln_traldf_triad ) THEN … … 668 697 669 698 !$OMP PARALLEL 670 !$OMP WORKSHARE 671 zpsi_uw(:,:, 1 ) = 0._wp ; zpsi_vw(:,:, 1 ) = 0._wp 672 zpsi_uw(:,:,jpk) = 0._wp ; zpsi_vw(:,:,jpk) = 0._wp 673 !$OMP END WORKSHARE NOWAIT 699 !$OMP DO schedule(static) private(jj,ji) 700 DO jj = 1, jpj 701 DO ji = 1, jpi 702 zpsi_uw(ji,jj, 1 ) = 0._wp ; zpsi_vw(ji,jj, 1 ) = 0._wp 703 zpsi_uw(ji,jj,jpk) = 0._wp ; zpsi_vw(ji,jj,jpk) = 0._wp 704 END DO 705 END DO 706 !$OMP END DO NOWAIT 674 707 ! 675 708 !$OMP DO schedule(static) private(jk,jj,ji) … … 748 781 ! 749 782 !$OMP PARALLEL 750 !$OMP WORKSHARE 751 zw3d(:,:,jpk) = 0._wp ! bottom value always 0 752 !$OMP END WORKSHARE NOWAIT 783 !$OMP DO schedule(static) private(jj,ji) 784 DO jj = 1, jpj 785 DO ji = 1, jpi 786 zw3d(ji,jj,jpk) = 0._wp ! bottom value always 0 787 END DO 788 END DO 789 !$OMP END DO NOWAIT 753 790 ! 754 791 !$OMP DO schedule(static) private(jk) … … 786 823 zztmp = 0.5_wp * rau0 * rcp 787 824 !$OMP PARALLEL 788 !$OMP WORKSHARE 789 zw2d(:,:) = 0._wp 790 !$OMP END WORKSHARE 825 !$OMP DO schedule(static) private(jj,ji) 826 DO jj = 1, jpj 827 DO ji = 1, jpi 828 zw2d(ji,jj) = 0._wp 829 END DO 830 END DO 791 831 DO jk = 1, jpkm1 792 832 !$OMP DO schedule(static) private(jj,ji) … … 803 843 CALL iom_put( "ueiv_heattr", zw2d ) ! heat transport in i-direction 804 844 !$OMP PARALLEL 805 !$OMP WORKSHARE 806 zw2d(:,:) = 0._wp 807 !$OMP END WORKSHARE 845 !$OMP DO schedule(static) private(jj,ji) 846 DO jj = 1, jpj 847 DO ji = 1, jpi 848 zw2d(ji,jj) = 0._wp 849 END DO 850 END DO 808 851 DO jk = 1, jpkm1 809 852 !$OMP DO schedule(static) private(jj,ji) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/albedo.F90
r7037 r7508 157 157 END DO 158 158 159 !$OMP WORKSHARE 160 pa_ice_os(:,:,:) = pa_ice_cs(:,:,:) + rcloud ! Oberhuber correction for overcast sky 161 !$OMP END WORKSHARE NOWAIT 162 !$OMP END PARALLEL 159 !$OMP DO schedule(static) private(jl, jj, ji) 160 DO jl = 1, ijpl 161 DO jj = 1, jpj 162 DO ji = 1, jpi 163 pa_ice_os(ji,jj,jl) = pa_ice_cs(ji,jj,jl) + rcloud ! Oberhuber correction for overcast sky 164 END DO 165 END DO 166 END DO 167 !$OMP END PARALLEL 163 168 164 169 !------------------------------------------ … … 235 240 !! 236 241 REAL(wp) :: zcoef 242 INTEGER :: ji, jj ! dummy loop indices 237 243 !!---------------------------------------------------------------------- 238 244 ! 239 245 zcoef = 0.05 / ( 1.1 * rmue**1.4 + 0.15 ) ! Parameterization of Briegled and Ramanathan, 1982 240 !$OMP PARALLEL WORKSHARE 241 pa_oce_cs(:,:) = zcoef 242 pa_oce_os(:,:) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 243 !$OMP END PARALLEL WORKSHARE 246 !$OMP PARALLEL DO schedule(static) private(jj, ji) 247 DO jj = 1, jpj 248 DO ji = 1, jpi 249 pa_oce_cs(ji,jj) = zcoef 250 pa_oce_os(ji,jj) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 251 END DO 252 END DO 244 253 ! 245 254 END SUBROUTINE albedo_oce -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcana.F90
r7037 r7508 239 239 240 240 ! freshwater (mass flux) and update of qns with heat content of emp 241 !$OMP PARALLEL WORKSHARE 242 emp (:,:) = emp(:,:) - zsumemp * tmask(:,:,1) ! freshwater flux (=0 in domain average) 243 sfx (:,:) = 0.0_wp ! no salt flux 244 qns (:,:) = qns(:,:) - emp(:,:) * sst_m(:,:) * rcp ! evap and precip are at SST 245 !$OMP END PARALLEL WORKSHARE 241 !$OMP PARALLEL DO schedule(static) private(jj, ji) 242 DO jj = 1, jpj 243 DO ji = 1, jpi 244 emp (ji,jj) = emp(ji,jj) - zsumemp * tmask(ji,jj,1) ! freshwater flux (=0 in domain average) 245 sfx (ji,jj) = 0.0_wp ! no salt flux 246 qns (ji,jj) = qns(ji,jj) - emp(ji,jj) * sst_m(ji,jj) * rcp ! evap and precip are at SST 247 END DO 248 END DO 246 249 247 250 ! ---------------------------- ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk_core.F90
r7037 r7508 141 141 INTEGER :: ifpr ! dummy loop indice 142 142 INTEGER :: jfld ! dummy loop arguments 143 INTEGER :: ji, jj ! dummy loop indices 143 144 INTEGER :: ios ! Local integer output status for namelist read 144 145 ! … … 194 195 CALL fld_fill( sf, slf_i, cn_dir, 'sbc_blk_core', 'flux formulation for ocean surface boundary condition', 'namsbc_core' ) 195 196 ! 196 sfx(:,:) = 0._wp ! salt flux; zero unless ice is present (computed in limsbc(_2).F90) 197 ! 197 !$OMP PARALLEL DO schedule(static) private(jj, ji) 198 DO jj = 1, jpj 199 DO ji = 1, jpi 200 sfx(ji,jj) = 0._wp ! salt flux; zero unless ice is present (computed in limsbc(_2).F90) 201 END DO 202 END DO 198 203 ENDIF 199 204 … … 205 210 #if defined key_cice 206 211 IF( MOD( kt - 1, nn_fsbc ) == 0 ) THEN 207 qlw_ice(:,:,1) = sf(jp_qlw)%fnow(:,:,1) 208 qsr_ice(:,:,1) = sf(jp_qsr)%fnow(:,:,1) 209 tatm_ice(:,:) = sf(jp_tair)%fnow(:,:,1) 210 qatm_ice(:,:) = sf(jp_humi)%fnow(:,:,1) 211 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac 212 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac 213 wndi_ice(:,:) = sf(jp_wndi)%fnow(:,:,1) 214 wndj_ice(:,:) = sf(jp_wndj)%fnow(:,:,1) 212 !$OMP PARALLEL DO schedule(static) private(jj, ji) 213 DO jj = 1, jpj 214 DO ji = 1, jpi 215 qlw_ice(ji,jj,1) = sf(jp_qlw)%fnow(ji,jj,1) 216 qsr_ice(ji,jj,1) = sf(jp_qsr)%fnow(ji,jj,1) 217 tatm_ice(ji,jj) = sf(jp_tair)%fnow(ji,jj,1) 218 qatm_ice(ji,jj) = sf(jp_humi)%fnow(ji,jj,1) 219 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac 220 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac 221 wndi_ice(ji,jj) = sf(jp_wndi)%fnow(ji,jj,1) 222 wndj_ice(ji,jj) = sf(jp_wndj)%fnow(ji,jj,1) 223 END DO 224 END DO 215 225 ENDIF 216 226 #endif … … 267 277 zcoef_qsatw = 0.98 * 640380. / rhoa 268 278 269 !$OMP PARALLEL WORKSHARE 270 zst(:,:) = pst(:,:) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 279 !$OMP PARALLEL DO schedule(static) private(jj, ji) 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 zst(ji,jj) = pst(ji,jj) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 271 283 272 284 ! ----------------------------------------------------------------------------- ! … … 275 287 276 288 ! ... components ( U10m - U_oce ) at T-point (unmasked) 277 zwnd_i(:,:) = 0.e0 278 zwnd_j(:,:) = 0.e0 279 !$OMP END PARALLEL WORKSHARE 289 zwnd_i(ji,jj) = 0.e0 290 zwnd_j(ji,jj) = 0.e0 291 END DO 292 END DO 280 293 #if defined key_cyclone 281 294 CALL wnd_cyc( kt, zwnd_i, zwnd_j ) ! add analytical tropical cyclone (Vincent et al. JGR 2012) … … 312 325 ! ocean albedo assumed to be constant + modify now Qsr to include the diurnal cycle ! Short Wave 313 326 zztmp = 1. - albo 314 IF( ln_dm2dc ) THEN ; qsr(:,:) = zztmp * sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) * tmask(:,:,1) 315 ELSE ; qsr(:,:) = zztmp * sf(jp_qsr)%fnow(:,:,1) * tmask(:,:,1) 327 IF( ln_dm2dc ) THEN 328 qsr(:,:) = zztmp * sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) * tmask(:,:,1) 329 ELSE 330 qsr(:,:) = zztmp * sf(jp_qsr)%fnow(:,:,1) * tmask(:,:,1) 316 331 ENDIF 317 332 … … 319 334 DO jj = 1, jpj 320 335 DO ji = 1, jpi 321 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave322 ! ----------------------------------------------------------------------------- !323 ! II Turbulent FLUXES !324 ! ----------------------------------------------------------------------------- !325 326 ! ... specific humidity at SST and IST327 zqsatw(ji,jj) = zcoef_qsatw * EXP( -5107.4 / zst(ji,jj) )336 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave 337 ! ----------------------------------------------------------------------------- ! 338 ! II Turbulent FLUXES ! 339 ! ----------------------------------------------------------------------------- ! 340 341 ! ... specific humidity at SST and IST 342 zqsatw(ji,jj) = zcoef_qsatw * EXP( -5107.4 / zst(ji,jj) ) 328 343 329 344 END DO … … 346 361 ! ... add the HF tau contribution to the wind stress module? 347 362 IF( lhftau ) THEN 348 taum(:,:) = taum(:,:) + sf(jp_tdif)%fnow(:,:,1) 363 !$OMP PARALLEL DO schedule(static) private(jj, ji) 364 DO jj = 1, jpj 365 DO ji = 1, jpi 366 taum(ji,jj) = taum(ji,jj) + sf(jp_tdif)%fnow(ji,jj,1) 367 END DO 368 END DO 349 369 ENDIF 350 370 CALL iom_put( "taum_oce", taum ) ! output wind stress module … … 371 391 DO jj = 1, jpj 372 392 DO ji = 1, jpi 373 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 374 !! q_air and t_air are (or "are almost") given at 10m (wind reference height) 375 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation 376 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat 377 ELSE 378 !! q_air and t_air are not given at 10m (wind reference height) 379 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 380 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation 381 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat 382 ENDIF 383 zqla (ji,jj) = Lv * zevap(ji,jj) ! Latent Heat 384 393 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 394 !! q_air and t_air are (or "are almost") given at 10m (wind reference height) 395 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation 396 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat 397 ELSE 398 !! q_air and t_air are not given at 10m (wind reference height) 399 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 400 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation 401 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat 402 ENDIF 403 zqla (ji,jj) = Lv * zevap(ji,jj) ! Latent Heat 385 404 END DO 386 405 END DO … … 417 436 ! 418 437 #if defined key_lim3 419 !$OMP PARALLEL WORKSHARE 420 qns_oce(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) ! non solar without emp (only needed by LIM3) 421 qsr_oce(:,:) = qsr(:,:) 422 !$OMP END PARALLEL WORKSHARE 438 !$OMP PARALLEL DO schedule(static) private(jj, ji) 439 DO jj = 1, jpj 440 DO ji = 1, jpi 441 qns_oce(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) ! non solar without emp (only needed by LIM3) 442 qsr_oce(ji,jj) = qsr(ji,jj) 443 END DO 444 END DO 423 445 #endif 424 446 ! … … 431 453 CALL iom_put( "qsr_oce" , qsr ) ! output downward solar heat over the ocean 432 454 CALL iom_put( "qt_oce" , qns+qsr ) ! output total downward heat over the ocean 433 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! output total precipitation [kg/m2/s] 434 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! output solid precipitation [kg/m2/s] 455 !$OMP PARALLEL DO schedule(static) private(jj, ji) 456 DO jj = 1, jpj 457 DO ji = 1, jpi 458 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! output total precipitation [kg/m2/s] 459 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! output solid precipitation [kg/m2/s] 460 END DO 461 END DO 435 462 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow 436 463 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 477 504 478 505 !!gm brutal.... 479 !$OMP PARALLEL WORKSHARE 480 utau_ice (:,:) = 0._wp 481 vtau_ice (:,:) = 0._wp 482 wndm_ice (:,:) = 0._wp 483 !$OMP END PARALLEL WORKSHARE 506 !$OMP PARALLEL DO schedule(static) private(jj, ji) 507 DO jj = 1, jpj 508 DO ji = 1, jpi 509 utau_ice (ji,jj) = 0._wp 510 vtau_ice (ji,jj) = 0._wp 511 wndm_ice (ji,jj) = 0._wp 512 END DO 513 END DO 484 514 !!gm end 485 515 … … 515 545 ! 516 546 CASE( 'C' ) ! C-grid ice dynamics : U & V-points (same as ocean) 517 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_t,zwndj_t) 547 !$OMP PARALLEL 548 !$OMP DO schedule(static) private(jj,ji,zwndi_t,zwndj_t) 518 549 DO jj = 2, jpj 519 550 DO ji = fs_2, jpi ! vect. opt. … … 523 554 END DO 524 555 END DO 525 !$OMP PARALLELDO schedule(static) private(jj,ji)556 !$OMP DO schedule(static) private(jj,ji) 526 557 DO jj = 2, jpjm1 527 558 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 532 563 END DO 533 564 END DO 565 !$OMP END PARALLEL 534 566 CALL lbc_lnk( utau_ice, 'U', -1. ) 535 567 CALL lbc_lnk( vtau_ice, 'V', -1. ) … … 637 669 END DO 638 670 ! 639 !$OMP WORKSHARE 640 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! total precipitation [kg/m2/s] 641 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! solid precipitation [kg/m2/s] 642 !$OMP END WORKSHARE 671 !$OMP DO schedule(static) private(jj, ji) 672 DO jj = 1, jpj 673 DO ji = 1, jpi 674 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! total precipitation [kg/m2/s] 675 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! solid precipitation [kg/m2/s] 676 END DO 677 END DO 643 678 !$OMP END PARALLEL 644 679 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow precipitation … … 650 685 ! --- evaporation --- ! 651 686 z1_lsub = 1._wp / Lsub 652 !$OMP PARALLEL WORKSHARE 653 evap_ice (:,:,:) = rn_efac * qla_ice (:,:,:) * z1_lsub ! sublimation 654 devap_ice(:,:,:) = rn_efac * dqla_ice(:,:,:) * z1_lsub ! d(sublimation)/dT 655 zevap (:,:) = rn_efac * ( emp(:,:) + tprecip(:,:) ) ! evaporation over ocean 656 657 ! --- evaporation minus precipitation --- ! 658 zsnw(:,:) = 0._wp 659 !$OMP END PARALLEL WORKSHARE 687 688 !$OMP PARALLEL 689 !$OMP DO schedule(static) private(jl, jj, ji) 690 DO jl = 1, jpl 691 DO jj = 1, jpj 692 DO ji = 1, jpi 693 evap_ice (ji,jj,jl) = rn_efac * qla_ice (ji,jj,jl) * z1_lsub ! sublimation 694 devap_ice(ji,jj,jl) = rn_efac * dqla_ice(ji,jj,jl) * z1_lsub ! d(sublimation)/dT 695 END DO 696 END DO 697 END DO 698 !$OMP DO schedule(static) private(jj, ji) 699 DO jj = 1, jpj 700 DO ji = 1, jpi 701 zevap (ji,jj) = rn_efac * ( emp(ji,jj) + tprecip(ji,jj) ) ! evaporation over ocean 702 ! --- evaporation minus precipitation --- ! 703 zsnw(ji,jj) = 0._wp 704 END DO 705 END DO 706 !$OMP END PARALLEL 660 707 CALL lim_thd_snwblow( pfrld, zsnw ) ! snow distribution over ice after wind blowing 708 661 709 emp_oce(:,:) = pfrld(:,:) * zevap(:,:) - ( tprecip(:,:) - sprecip(:,:) ) - sprecip(:,:) * (1._wp - zsnw ) 662 710 emp_ice(:,:) = SUM( a_i_b(:,:,:) * evap_ice(:,:,:), dim=3 ) - sprecip(:,:) * zsnw … … 664 712 665 713 ! --- heat flux associated with emp --- ! 666 qemp_oce(:,:) = - pfrld(:,:) * zevap(:,:) * sst_m(:,:) * rcp 667 668 & + sprecip(:,:) * ( 1._wp - zsnw ) *& ! solid precip at min(Tair,Tsnow)669 670 qemp_ice(:,:) = sprecip(:,:) * zsnw * 671 714 qemp_oce(:,:) = - pfrld(:,:) * zevap(:,:) * sst_m(:,:) * rcp & ! evap at sst 715 & + ( tprecip(:,:) - sprecip(:,:) ) * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & ! liquid precip at Tair 716 & + sprecip(:,:) * ( 1._wp - zsnw ) * & ! solid precip at min(Tair,Tsnow) 717 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 718 qemp_ice(:,:) = sprecip(:,:) * zsnw * & ! solid precip (only) 719 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 672 720 673 721 ! --- total solar and non solar fluxes --- ! … … 675 723 qsr_tot(:,:) = pfrld(:,:) * qsr_oce(:,:) + SUM( a_i_b(:,:,:) * qsr_ice(:,:,:), dim=3 ) 676 724 677 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) --- ! 725 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) 726 ! --- ! 678 727 qprec_ice(:,:) = rhosn * ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 679 728 … … 684 733 ! But we do not have Tice => consider it at 0°C => evap=0 685 734 END DO 686 687 735 CALL wrk_dealloc( jpi,jpj, zevap, zsnw ) 688 736 #endif … … 693 741 ! ( Maykut and Untersteiner, 1971 ; Ebert and Curry, 1993 ) 694 742 ! 695 !$OMP PARALLEL WORKSHARE 696 fr1_i0(:,:) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 697 fr2_i0(:,:) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 698 !$OMP END PARALLEL WORKSHARE 743 !$OMP PARALLEL DO schedule(static) private(jj, ji) 744 DO jj = 1, jpj 745 DO ji = 1, jpi 746 fr1_i0(ji,jj) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 747 fr2_i0(ji,jj) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 748 END DO 749 END DO 699 750 ! 700 751 ! … … 753 804 ! 754 805 INTEGER :: j_itt 806 INTEGER :: ji, jj ! dummy loop indices 755 807 INTEGER , PARAMETER :: nb_itt = 5 ! number of itterations 756 808 LOGICAL :: l_zt_equal_zu = .FALSE. ! if q and t are given at different height than U … … 787 839 !! Neutral coefficients at 10m: 788 840 IF( ln_cdgw ) THEN ! wave drag case 789 !$OMP PARALLEL WORKSHARE 790 cdn_wave(:,:) = cdn_wave(:,:) + rsmall * ( 1._wp - tmask(:,:,1) ) 791 ztmp0 (:,:) = cdn_wave(:,:) 792 !$OMP END PARALLEL WORKSHARE 841 !$OMP PARALLEL DO schedule(static) private(jj, ji) 842 DO jj = 1, jpj 843 DO ji = 1, jpi 844 cdn_wave(ji,jj) = cdn_wave(ji,jj) + rsmall * ( 1._wp - tmask(ji,jj,1) ) 845 ztmp0 (ji,jj) = cdn_wave(ji,jj) 846 END DO 847 END DO 793 848 ELSE 794 849 ztmp0 = cd_neutral_10m( U_zu ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcice_lim_2.F90
r7037 r7508 144 144 ! 145 145 CASE( 'C' ) !== C-grid ice dynamics : U & V-points (same as ocean) 146 !$OMP PARALLEL WORKSHARE 147 u_oce(:,:) = ssu_m(:,:) * umask(:,:,1) ! mean surface ocean current at ice velocity point 148 v_oce(:,:) = ssv_m(:,:) * vmask(:,:,1) 149 !$OMP END PARALLEL WORKSHARE 146 !$OMP PARALLEL DO schedule(static) private(jj, ji) 147 DO jj = 1, jpj 148 DO ji = 1, jpi 149 u_oce(ji,jj) = ssu_m(ji,jj) * umask(ji,jj,1) ! mean surface ocean current at ice velocity point 150 v_oce(ji,jj) = ssv_m(ji,jj) * vmask(ji,jj,1) 151 END DO 152 END DO 150 153 ! 151 154 END SELECT … … 153 156 ! ... masked sea surface freezing temperature [Kelvin] (set to rt0 over land) 154 157 CALL eos_fzp( sss_m(:,:), tfu(:,:) ) 155 !$OMP PARALLEL WORKSHARE 156 tfu(:,:) = tfu(:,:) + rt0 157 158 zsist (:,:,1) = sist (:,:) + rt0 * ( 1. - tmask(:,:,1) ) 159 !$OMP END PARALLEL WORKSHARE 158 !$OMP PARALLEL DO schedule(static) private(jj, ji) 159 DO jj = 1, jpj 160 DO ji = 1, jpi 161 tfu(ji,jj) = tfu(ji,jj) + rt0 162 zsist (ji,jj,1) = sist (ji,jj) + rt0 * ( 1. - tmask(ji,jj,1) ) 163 END DO 164 END DO 160 165 161 166 ! Ice albedo … … 169 174 170 175 ! albedo depends on cloud fraction because of non-linear spectral effects 171 !$OMP PARALLEL WORKSHARE 172 zalb_ice(:,:,:) = ( 1. - cldf_ice ) * zalb_cs(:,:,:) + cldf_ice * zalb_os(:,:,:) 173 !$OMP END PARALLEL WORKSHARE 176 !$OMP PARALLEL DO schedule(static) private(jj, ji) 177 DO jj = 1, jpj 178 DO ji = 1, jpi 179 zalb_ice(ji,jj,1) = ( 1. - cldf_ice ) * zalb_cs(ji,jj,1) + cldf_ice * zalb_os(ji,jj,1) 180 END DO 181 END DO 174 182 ! In CLIO the cloud fraction is read in the climatology and the all-sky albedo 175 183 ! (zalb_ice) is computed within the bulk routine … … 210 218 IF( ln_mixcpl) THEN 211 219 CALL sbc_cpl_ice_tau( zutau_ice , zvtau_ice ) 212 !$OMP PARALLEL WORKSHARE 213 utau_ice(:,:) = utau_ice(:,:) * xcplmask(:,:,0) + zutau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 214 vtau_ice(:,:) = vtau_ice(:,:) * xcplmask(:,:,0) + zvtau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 215 !$OMP END PARALLEL WORKSHARE 220 !$OMP PARALLEL DO schedule(static) private(jj, ji) 221 DO jj = 1, jpj 222 DO ji = 1, jpi 223 utau_ice(ji,jj) = utau_ice(ji,jj) * xcplmask(ji,jj,0) + zutau_ice(ji,jj) * ( 1. - xcplmask(ji,jj,0) ) 224 vtau_ice(ji,jj) = vtau_ice(ji,jj) * xcplmask(ji,jj,0) + zvtau_ice(ji,jj) * ( 1. - xcplmask(ji,jj,0) ) 225 END DO 226 END DO 216 227 ENDIF 217 228 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcmod.F90
r6748 r7508 85 85 !! - nsbc: type of sbc 86 86 !!---------------------------------------------------------------------- 87 INTEGER :: icpt ! local integer87 INTEGER :: icpt, jp, jj, ji ! local integer 88 88 !! 89 89 NAMELIST/namsbc/ nn_fsbc , ln_ana , ln_flx, ln_blk_clio, ln_blk_core, ln_blk_mfs, & … … 187 187 IF( .NOT. ln_isf ) THEN ! variable initialisation if no ice shelf 188 188 IF( sbc_isf_alloc() /= 0 ) CALL ctl_stop( 'STOP', 'sbc_init : unable to allocate sbc_isf arrays' ) 189 !$OMP PARALLEL WORKSHARE 190 fwfisf (:,:) = 0.0_wp ; fwfisf_b (:,:) = 0.0_wp 191 risf_tsc(:,:,:) = 0.0_wp ; risf_tsc_b(:,:,:) = 0.0_wp 192 !$OMP END PARALLEL WORKSHARE 193 END IF 189 !$OMP PARALLEL 190 !$OMP DO schedule(static) private(jj,ji) 191 DO jj = 1, jpj 192 DO ji = 1, jpi 193 fwfisf (ji,jj) = 0.0_wp ; fwfisf_b (ji,jj) = 0.0_wp 194 END DO 195 END DO 196 !$OMP END DO NOWAIT 197 !$OMP DO schedule(static) private(jp,jj,ji) 198 DO jp = 1, jpts 199 DO jj = 1, jpj 200 DO ji = 1, jpi 201 risf_tsc(ji,jj,jp) = 0.0_wp ; risf_tsc_b(ji,jj,jp) = 0.0_wp 202 END DO 203 END DO 204 END DO 205 !$OMP END PARALLEL 206 END IF 194 207 IF( nn_ice == 0 .AND. nn_components /= jp_iam_opa ) fr_i(:,:) = 0._wp ! no ice in the domain, ice fraction is always zero 195 !$OMP PARALLEL WORKSHARE 196 sfx(:,:) = 0._wp ! the salt flux due to freezing/melting will be computed (i.e. will be non-zero) 208 !$OMP PARALLEL DO schedule(static) private(jj,ji) 209 DO jj = 1, jpj 210 DO ji = 1, jpi 211 sfx(ji,jj) = 0._wp ! the salt flux due to freezing/melting will be computed (i.e. will be non-zero) 197 212 ! only if sea-ice is present 198 199 fmmflx(:,:) = 0._wp ! freezing-melting array initialisation 200 201 taum(:,:) = 0._wp ! Initialise taum for use in gls in case of reduced restart 202 !$OMP END PARALLEL WORKSHARE 213 fmmflx(ji,jj) = 0._wp ! freezing-melting array initialisation 214 taum(ji,jj) = 0._wp ! Initialise taum for use in gls in case of reduced restart 215 END DO 216 END DO 203 217 ! ! restartability 204 218 IF( ( nn_ice == 2 .OR. nn_ice ==3 ) .AND. .NOT.( ln_blk_clio .OR. ln_blk_core .OR. ln_cpl ) ) & … … 313 327 !!---------------------------------------------------------------------- 314 328 INTEGER, INTENT(in) :: kt ! ocean time step 329 INTEGER :: jp, jj, ji ! local integer 315 330 !!--------------------------------------------------------------------- 316 331 ! … … 320 335 IF( kt /= nit000 ) THEN ! Swap of forcing fields ! 321 336 ! ! ---------------------------------------- ! 322 !$OMP PARALLEL WORKSHARE 323 utau_b(:,:) = utau(:,:) ! Swap the ocean forcing fields 324 vtau_b(:,:) = vtau(:,:) ! (except at nit000 where before fields 325 qns_b (:,:) = qns (:,:) ! are set at the end of the routine) 326 emp_b (:,:) = emp (:,:) 327 sfx_b (:,:) = sfx (:,:) 328 !$OMP END PARALLEL WORKSHARE 337 !$OMP PARALLEL DO schedule(static) private(jj,ji) 338 DO jj = 1, jpj 339 DO ji = 1, jpi 340 utau_b(ji,jj) = utau(ji,jj) ! Swap the ocean forcing fields 341 vtau_b(ji,jj) = vtau(ji,jj) ! (except at nit000 where before fields 342 qns_b (ji,jj) = qns (ji,jj) ! are set at the end of the routine) 343 emp_b (ji,jj) = emp (ji,jj) 344 sfx_b (ji,jj) = sfx (ji,jj) 345 END DO 346 END DO 329 347 IF ( ln_rnf ) THEN 330 !$OMP PARALLEL WORKSHARE 331 rnf_b (:,: ) = rnf (:,: ) 332 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 333 !$OMP END PARALLEL WORKSHARE 348 !$OMP PARALLEL 349 !$OMP DO schedule(static) private(jj,ji) 350 DO jj = 1, jpj 351 DO ji = 1, jpi 352 rnf_b (ji,jj ) = rnf (ji,jj ) 353 END DO 354 END DO 355 !$OMP END DO NOWAIT 356 !$OMP DO schedule(static) private(jp,jj,ji) 357 DO jp = 1, jpts 358 DO jj = 1, jpj 359 DO ji = 1, jpi 360 rnf_tsc_b(ji,jj,jp) = rnf_tsc(ji,jj,jp) 361 END DO 362 END DO 363 END DO 364 !$OMP END PARALLEL 334 365 ENDIF 335 366 ENDIF … … 410 441 CALL iom_get( numror, jpdom_autoglo, 'sfx_b', sfx_b ) ! before salt flux (T-point) 411 442 ELSE 412 !$OMP PARALLEL WORKSHARE 413 sfx_b (:,:) = sfx(:,:) 414 !$OMP END PARALLEL WORKSHARE 443 !$OMP PARALLEL DO schedule(static) private(jj,ji) 444 DO jj = 1, jpj 445 DO ji = 1, jpi 446 sfx_b (ji,jj) = sfx(ji,jj) 447 END DO 448 END DO 415 449 ENDIF 416 450 ELSE !* no restart: set from nit000 values 417 451 IF(lwp) WRITE(numout,*) ' nit000-1 surface forcing fields set to nit000' 418 !$OMP PARALLEL WORKSHARE 419 utau_b(:,:) = utau(:,:) 420 vtau_b(:,:) = vtau(:,:) 421 qns_b (:,:) = qns (:,:) 422 emp_b (:,:) = emp(:,:) 423 sfx_b (:,:) = sfx(:,:) 424 !$OMP END PARALLEL WORKSHARE 452 !$OMP PARALLEL DO schedule(static) private(jj,ji) 453 DO jj = 1, jpj 454 DO ji = 1, jpi 455 utau_b(ji,jj) = utau(ji,jj) 456 vtau_b(ji,jj) = vtau(ji,jj) 457 qns_b (ji,jj) = qns (ji,jj) 458 emp_b (ji,jj) = emp(ji,jj) 459 sfx_b (ji,jj) = sfx(ji,jj) 460 END DO 461 END DO 425 462 ENDIF 426 463 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcrnf.F90
r7037 r7508 103 103 INTEGER, INTENT(in) :: kt ! ocean time step 104 104 ! 105 INTEGER :: ji, jj ! dummy loop indices105 INTEGER :: ji, jj, js ! dummy loop indices 106 106 INTEGER :: z_err = 0 ! dummy integer for error handling 107 107 !!---------------------------------------------------------------------- … … 124 124 ! ! set temperature & salinity content of runoffs 125 125 IF( ln_rnf_tem ) THEN ! use runoffs temperature data 126 !$OMP PARALLEL WORKSHARE 127 rnf_tsc(:,:,jp_tem) = ( sf_t_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 128 !$OMP END PARALLEL WORKSHARE 126 !$OMP PARALLEL DO schedule(static) private(jj, ji) 127 DO jj = 1, jpj 128 DO ji = 1, jpi 129 rnf_tsc(ji,jj,jp_tem) = ( sf_t_rnf(1)%fnow(ji,jj,1) ) * rnf(ji,jj) * r1_rau0 130 END DO 131 END DO 129 132 CALL eos_fzp( sss_m(:,:), ztfrz(:,:) ) 130 !$OMP PARALLEL WORKSHARE 131 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -999._wp ) ! if missing data value use SST as runoffs temperature 132 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 133 END WHERE 134 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -222._wp ) ! where fwf comes from melting of ice shelves or iceberg 135 rnf_tsc(:,:,jp_tem) = ztfrz(:,:) * rnf(:,:) * r1_rau0 - rnf(:,:) * rlfusisf * r1_rau0_rcp 136 END WHERE 137 !$OMP END PARALLEL WORKSHARE 133 !$OMP PARALLEL DO schedule(static) private(jj, ji) 134 DO jj = 1, jpj 135 DO ji = 1, jpi 136 IF ( sf_t_rnf(1)%fnow(ji,jj,1) == -999._wp ) THEN ! if missing data value use SST as runoffs temperature 137 rnf_tsc(ji,jj,jp_tem) = sst_m(ji,jj) * rnf(ji,jj) * r1_rau0 138 END IF 139 IF ( sf_t_rnf(1)%fnow(ji,jj,1) == -222._wp ) THEN ! where fwf comes from melting of ice shelves or iceberg 140 rnf_tsc(ji,jj,jp_tem) = ztfrz(ji,jj) * rnf(ji,jj) * r1_rau0 - rnf(ji,jj) * rlfusisf * r1_rau0_rcp 141 END IF 142 END DO 143 END DO 138 144 ELSE ! use SST as runoffs temperature 139 !$OMP PARALLEL WORKSHARE 140 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 141 !$OMP END PARALLEL WORKSHARE 145 !$OMP PARALLEL DO schedule(static) private(jj, ji) 146 DO jj = 1, jpj 147 DO ji = 1, jpi 148 rnf_tsc(ji,jj,jp_tem) = sst_m(ji,jj) * rnf(ji,jj) * r1_rau0 149 END DO 150 END DO 142 151 ENDIF 143 152 ! ! use runoffs salinity data 144 153 IF( ln_rnf_sal ) THEN 145 !$OMP PARALLEL WORKSHARE 146 rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 147 !$OMP END PARALLEL WORKSHARE 154 !$OMP PARALLEL DO schedule(static) private(jj, ji) 155 DO jj = 1, jpj 156 DO ji = 1, jpi 157 rnf_tsc(ji,jj,jp_sal) = ( sf_s_rnf(1)%fnow(ji,jj,1) ) * rnf(ji,jj) * r1_rau0 158 END DO 159 END DO 148 160 END IF 149 161 ! ! else use S=0 for runoffs (done one for all in the init) … … 162 174 ELSE !* no restart: set from nit000 values 163 175 IF(lwp) WRITE(numout,*) ' nit000-1 runoff forcing fields set to nit000' 164 !$OMP PARALLEL WORKSHARE 165 rnf_b (:,: ) = rnf (:,: ) 166 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 167 !$OMP END PARALLEL WORKSHARE 176 !$OMP PARALLEL 177 !$OMP DO schedule(static) private(jj,ji) 178 DO jj = 1, jpj 179 DO ji = 1, jpi 180 rnf_b (ji,jj ) = rnf (ji,jj ) 181 END DO 182 END DO 183 !$OMP END DO NOWAIT 184 !$OMP DO schedule(static) private(js,jj,ji) 185 DO js = 1, jpts 186 DO jj = 1, jpj 187 DO ji = 1, jpi 188 rnf_tsc_b(ji,jj,js) = rnf_tsc(ji,jj,js) 189 END DO 190 END DO 191 END DO 192 !$OMP END PARALLEL 168 193 ENDIF 169 194 ENDIF … … 207 232 IF( ln_rnf_depth .OR. ln_rnf_depth_ini ) THEN !== runoff distributed over several levels ==! 208 233 IF( ln_linssh ) THEN !* constant volume case : just apply the runoff input flow 234 !$OMP PARALLEL DO schedule(static) private(jj,ji,jk) 209 235 DO jj = 1, jpj 210 236 DO ji = 1, jpi 211 !$OMP PARALLEL DO schedule(static) private(jk)212 237 DO jk = 1, nk_rnf(ji,jj) 213 238 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) … … 216 241 END DO 217 242 ELSE !* variable volume case 218 !$OMP PARALLEL 243 !$OMP PARALLEL DO schedule(static) private(ji,jj,jk) 219 244 DO jj = 1, jpj ! update the depth over which runoffs are distributed 220 245 DO ji = 1, jpi 221 246 h_rnf(ji,jj) = 0._wp 222 !$OMP DO schedule(static) private(jk)223 247 DO jk = 1, nk_rnf(ji,jj) ! recalculates h_rnf to be the depth in metres 224 248 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) ! to the bottom of the relevant grid box 225 249 END DO 226 250 ! ! apply the runoff input flow 227 !$OMP DO schedule(static) private(jk)228 251 DO jk = 1, nk_rnf(ji,jj) 229 252 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) 230 253 END DO 231 !$OMP END DO NOWAIT 232 END DO 233 END DO 234 !$OMP END PARALLEL 254 END DO 255 END DO 235 256 ENDIF 236 257 ELSE !== runoff put only at the surface ==! 237 !$OMP PARALLEL WORKSHARE 238 h_rnf (:,:) = e3t_n (:,:,1) ! update h_rnf to be depth of top box 239 phdivn(:,:,1) = phdivn(:,:,1) - ( rnf(:,:) + rnf_b(:,:) ) * zfact * r1_rau0 / e3t_n(:,:,1) 240 !$OMP END PARALLEL WORKSHARE 258 !$OMP PARALLEL DO schedule(static) private(jj, ji) 259 DO jj = 1, jpj 260 DO ji = 1, jpi 261 h_rnf (ji,jj) = e3t_n (ji,jj,1) ! update h_rnf to be depth of top box 262 phdivn(ji,jj,1) = phdivn(ji,jj,1) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / e3t_n(ji,jj,1) 263 END DO 264 END DO 241 265 ENDIF 242 266 ! … … 255 279 !!---------------------------------------------------------------------- 256 280 CHARACTER(len=32) :: rn_dep_file ! runoff file name 257 INTEGER :: ji, jj, jk, jm ! dummy loop indices281 INTEGER :: ji, jj, jk, jm, js ! dummy loop indices 258 282 INTEGER :: ierror, inum ! temporary integer 259 283 INTEGER :: ios ! Local integer output status for namelist read … … 276 300 ln_rnf_mouth = .FALSE. ! default definition needed for example by sbc_ssr or by tra_adv_muscl 277 301 nkrnf = 0 278 !$OMP PARALLEL WORKSHARE 279 rnf (:,:) = 0.0_wp 280 rnf_b (:,:) = 0.0_wp 281 rnfmsk (:,:) = 0.0_wp 302 !$OMP PARALLEL DO schedule(static) private(jj, ji) 303 DO jj = 1, jpj 304 DO ji = 1, jpi 305 rnf (ji,jj) = 0.0_wp 306 rnf_b (ji,jj) = 0.0_wp 307 rnfmsk (ji,jj) = 0.0_wp 308 END DO 309 END DO 282 310 rnfmsk_z(:) = 0.0_wp 283 !$OMP END PARALLEL WORKSHARE284 311 RETURN 285 312 ENDIF … … 452 479 ENDIF 453 480 ELSE ! runoffs applied at the surface 454 nk_rnf(:,:) = 1 455 h_rnf (:,:) = e3t_n(:,:,1) 456 ENDIF 457 ! 458 !$OMP PARALLEL WORKSHARE 459 rnf(:,:) = 0._wp ! runoff initialisation 460 rnf_tsc(:,:,:) = 0._wp ! runoffs temperature & salinty contents initilisation 461 !$OMP END PARALLEL WORKSHARE 481 !$OMP PARALLEL DO schedule(static) private(jj, ji) 482 DO jj = 1, jpj 483 DO ji = 1, jpi 484 nk_rnf(ji,jj) = 1 485 h_rnf (ji,jj) = e3t_n(ji,jj,1) 486 END DO 487 END DO 488 ENDIF 489 ! 490 !$OMP PARALLEL 491 !$OMP DO schedule(static) private(jj, ji) 492 DO jj = 1, jpj 493 DO ji = 1, jpi 494 rnf(ji,jj) = 0._wp ! runoff initialisation 495 END DO 496 END DO 497 !$OMP DO schedule(static) private(js, jj, ji) 498 DO js = 1, jpts 499 DO jj = 1, jpj 500 DO ji = 1, jpi 501 rnf_tsc(ji,jj,js) = 0._wp ! runoffs temperature & salinty contents initilisation 502 END DO 503 END DO 504 END DO 505 !$OMP END PARALLEL 462 506 ! 463 507 ! ! ======================== -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssm.F90
r7047 r7508 53 53 INTEGER, INTENT(in) :: kt ! ocean time step 54 54 ! 55 INTEGER :: ji, jj 55 INTEGER :: ji, jj ! loop index 56 56 REAL(wp) :: zcoef, zf_sbc ! local scalar 57 57 REAL(wp), DIMENSION(jpi,jpj,jpts) :: zts … … 69 69 IF( nn_fsbc == 1 ) THEN ! Instantaneous surface fields ! 70 70 ! ! ---------------------------------------- ! 71 !$OMP PARALLEL WORKSHARE 72 ssu_m(:,:) = ub(:,:,1) 73 ssv_m(:,:) = vb(:,:,1) 74 !$OMP END PARALLEL WORKSHARE 71 !$OMP PARALLEL DO schedule(static) private(jj, ji) 72 DO jj = 1, jpj 73 DO ji = 1, jpi 74 ssu_m(ji,jj) = ub(ji,jj,1) 75 ssv_m(ji,jj) = vb(ji,jj,1) 76 END DO 77 END DO 75 78 IF( l_useCT ) THEN 76 79 sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 77 80 ELSE 78 !$OMP PARALLEL WORKSHARE 79 sst_m(:,:) = zts(:,:,jp_tem) 80 !$OMP END PARALLEL WORKSHARE 81 ENDIF 82 !$OMP PARALLEL WORKSHARE 83 sss_m(:,:) = zts(:,:,jp_sal) 84 !$OMP END PARALLEL WORKSHARE 81 !$OMP PARALLEL DO schedule(static) private(jj, ji) 82 DO jj = 1, jpj 83 DO ji = 1, jpi 84 sst_m(ji,jj) = zts(ji,jj,jp_tem) 85 END DO 86 END DO 87 ENDIF 88 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 DO jj = 1, jpj 90 DO ji = 1, jpi 91 sss_m(ji,jj) = zts(ji,jj,jp_sal) 92 END DO 93 END DO 85 94 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 86 95 IF( ln_apr_dyn ) THEN 87 !$OMP PARALLEL WORKSHARE 88 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 89 !$OMP END PARALLEL WORKSHARE 96 !$OMP PARALLEL DO schedule(static) private(jj, ji) 97 DO jj = 1, jpj 98 DO ji = 1, jpi 99 ssh_m(ji,jj) = sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) 100 END DO 101 END DO 90 102 ELSE 91 !$OMP PARALLEL WORKSHARE 92 ssh_m(:,:) = sshn(:,:) 93 !$OMP END PARALLEL WORKSHARE 94 ENDIF 95 ! 96 !$OMP PARALLEL WORKSHARE 97 e3t_m(:,:) = e3t_n(:,:,1) 98 ! 99 frq_m(:,:) = fraqsr_1lev(:,:) 100 !$OMP END PARALLEL WORKSHARE 103 !$OMP PARALLEL DO schedule(static) private(jj, ji) 104 DO jj = 1, jpj 105 DO ji = 1, jpi 106 ssh_m(ji,jj) = sshn(ji,jj) 107 END DO 108 END DO 109 ENDIF 110 ! 111 !$OMP PARALLEL DO schedule(static) private(jj, ji) 112 DO jj = 1, jpj 113 DO ji = 1, jpi 114 e3t_m(ji,jj) = e3t_n(ji,jj,1) 115 ! 116 frq_m(ji,jj) = fraqsr_1lev(ji,jj) 117 END DO 118 END DO 101 119 ! 102 120 ELSE … … 107 125 IF(lwp) WRITE(numout,*) '~~~~~~~ mean fields initialised to instantaneous values' 108 126 zcoef = REAL( nn_fsbc - 1, wp ) 109 !$OMP PARALLEL WORKSHARE 110 ssu_m(:,:) = zcoef * ub(:,:,1) 111 ssv_m(:,:) = zcoef * vb(:,:,1) 112 !$OMP END PARALLEL WORKSHARE 127 !$OMP PARALLEL DO schedule(static) private(jj, ji) 128 DO jj = 1, jpj 129 DO ji = 1, jpi 130 ssu_m(ji,jj) = zcoef * ub(ji,jj,1) 131 ssv_m(ji,jj) = zcoef * vb(ji,jj,1) 132 END DO 133 END DO 113 134 IF( l_useCT ) THEN 114 135 sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 115 136 ELSE 116 !$OMP PARALLEL WORKSHARE 117 sst_m(:,:) = zcoef * zts(:,:,jp_tem) 118 !$OMP END PARALLEL WORKSHARE 137 !$OMP PARALLEL DO schedule(static) private(jj, ji) 138 DO jj = 1, jpj 139 DO ji = 1, jpi 140 sst_m(ji,jj) = zcoef * zts(ji,jj,jp_tem) 141 END DO 142 END DO 119 143 ENDIF 120 !$OMP PARALLEL WORKSHARE 121 sss_m(:,:) = zcoef * zts(:,:,jp_sal) 122 !$OMP END PARALLEL WORKSHARE 144 !$OMP PARALLEL DO schedule(static) private(jj, ji) 145 DO jj = 1, jpj 146 DO ji = 1, jpi 147 sss_m(ji,jj) = zcoef * zts(ji,jj,jp_sal) 148 END DO 149 END DO 123 150 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 124 151 IF( ln_apr_dyn ) THEN 125 !$OMP PARALLEL WORKSHARE 126 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 127 !$OMP END PARALLEL WORKSHARE 152 !$OMP PARALLEL DO schedule(static) private(jj, ji) 153 DO jj = 1, jpj 154 DO ji = 1, jpi 155 ssh_m(ji,jj) = zcoef * ( sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) ) 156 END DO 157 END DO 128 158 ELSE 129 !$OMP PARALLEL WORKSHARE 130 ssh_m(:,:) = zcoef * sshn(:,:) 131 !$OMP END PARALLEL WORKSHARE 159 !$OMP PARALLEL DO schedule(static) private(jj, ji) 160 DO jj = 1, jpj 161 DO ji = 1, jpi 162 ssh_m(ji,jj) = zcoef * sshn(ji,jj) 163 END DO 164 END DO 132 165 ENDIF 133 166 ! 134 !$OMP PARALLEL WORKSHARE 135 e3t_m(:,:) = zcoef * e3t_n(:,:,1) 136 ! 137 frq_m(:,:) = zcoef * fraqsr_1lev(:,:) 138 !$OMP END PARALLEL WORKSHARE 167 !$OMP PARALLEL DO schedule(static) private(jj, ji) 168 DO jj = 1, jpj 169 DO ji = 1, jpi 170 e3t_m(ji,jj) = zcoef * e3t_n(ji,jj,1) 171 ! 172 frq_m(ji,jj) = zcoef * fraqsr_1lev(ji,jj) 173 END DO 174 END DO 139 175 ! ! ---------------------------------------- ! 140 176 ELSEIF( MOD( kt - 2 , nn_fsbc ) == 0 ) THEN ! Initialisation: New mean computation ! 141 177 ! ! ---------------------------------------- ! 142 !$OMP PARALLEL WORKSHARE 143 ssu_m(:,:) = 0._wp ! reset to zero ocean mean sbc fields 144 ssv_m(:,:) = 0._wp 145 sst_m(:,:) = 0._wp 146 sss_m(:,:) = 0._wp 147 ssh_m(:,:) = 0._wp 148 e3t_m(:,:) = 0._wp 149 frq_m(:,:) = 0._wp 150 !$OMP END PARALLEL WORKSHARE 178 !$OMP PARALLEL DO schedule(static) private(jj, ji) 179 DO jj = 1, jpj 180 DO ji = 1, jpi 181 ssu_m(ji,jj) = 0._wp ! reset to zero ocean mean sbc fields 182 ssv_m(ji,jj) = 0._wp 183 sst_m(ji,jj) = 0._wp 184 sss_m(ji,jj) = 0._wp 185 ssh_m(ji,jj) = 0._wp 186 e3t_m(ji,jj) = 0._wp 187 frq_m(ji,jj) = 0._wp 188 END DO 189 END DO 151 190 ENDIF 152 191 ! ! ---------------------------------------- ! 153 192 ! ! Cumulate at each time step ! 154 193 ! ! ---------------------------------------- ! 155 !$OMP PARALLEL WORKSHARE 156 ssu_m(:,:) = ssu_m(:,:) + ub(:,:,1) 157 ssv_m(:,:) = ssv_m(:,:) + vb(:,:,1) 158 !$OMP END PARALLEL WORKSHARE 194 !$OMP PARALLEL DO schedule(static) private(jj, ji) 195 DO jj = 1, jpj 196 DO ji = 1, jpi 197 ssu_m(ji,jj) = ssu_m(ji,jj) + ub(ji,jj,1) 198 ssv_m(ji,jj) = ssv_m(ji,jj) + vb(ji,jj,1) 199 END DO 200 END DO 159 201 IF( l_useCT ) THEN 160 202 sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 161 203 ELSE 162 !$OMP PARALLEL WORKSHARE 163 sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 164 !$OMP END PARALLEL WORKSHARE 165 ENDIF 166 !$OMP PARALLEL WORKSHARE 167 sss_m(:,:) = sss_m(:,:) + zts(:,:,jp_sal) 168 !$OMP END PARALLEL WORKSHARE 204 !$OMP PARALLEL DO schedule(static) private(jj, ji) 205 DO jj = 1, jpj 206 DO ji = 1, jpi 207 sst_m(ji,jj) = sst_m(ji,jj) + zts(ji,jj,jp_tem) 208 END DO 209 END DO 210 ENDIF 211 !$OMP PARALLEL DO schedule(static) private(jj, ji) 212 DO jj = 1, jpj 213 DO ji = 1, jpi 214 sss_m(ji,jj) = sss_m(ji,jj) + zts(ji,jj,jp_sal) 215 END DO 216 END DO 169 217 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 170 218 IF( ln_apr_dyn ) THEN 171 !$OMP PARALLEL WORKSHARE 172 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 173 !$OMP END PARALLEL WORKSHARE 219 !$OMP PARALLEL DO schedule(static) private(jj, ji) 220 DO jj = 1, jpj 221 DO ji = 1, jpi 222 ssh_m(ji,jj) = ssh_m(ji,jj) + sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) 223 END DO 224 END DO 174 225 ELSE 175 !$OMP PARALLEL WORKSHARE 176 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 177 !$OMP END PARALLEL WORKSHARE 178 ENDIF 179 ! 180 !$OMP PARALLEL WORKSHARE 181 e3t_m(:,:) = e3t_m(:,:) + e3t_n(:,:,1) 182 ! 183 frq_m(:,:) = frq_m(:,:) + fraqsr_1lev(:,:) 184 !$OMP END PARALLEL WORKSHARE 226 !$OMP PARALLEL DO schedule(static) private(jj, ji) 227 DO jj = 1, jpj 228 DO ji = 1, jpi 229 ssh_m(ji,jj) = ssh_m(ji,jj) + sshn(ji,jj) 230 END DO 231 END DO 232 ENDIF 233 ! 234 !$OMP PARALLEL DO schedule(static) private(jj, ji) 235 DO jj = 1, jpj 236 DO ji = 1, jpi 237 e3t_m(ji,jj) = e3t_m(ji,jj) + e3t_n(ji,jj,1) 238 ! 239 frq_m(ji,jj) = frq_m(ji,jj) + fraqsr_1lev(ji,jj) 240 END DO 241 END DO 185 242 186 243 ! ! ---------------------------------------- ! … … 188 245 ! ! ---------------------------------------- ! 189 246 zcoef = 1. / REAL( nn_fsbc, wp ) 190 !$OMP PARALLEL WORKSHARE 191 sst_m(:,:) = sst_m(:,:) * zcoef ! mean SST [Celcius] 192 sss_m(:,:) = sss_m(:,:) * zcoef ! mean SSS [psu] 193 ssu_m(:,:) = ssu_m(:,:) * zcoef ! mean suface current [m/s] 194 ssv_m(:,:) = ssv_m(:,:) * zcoef ! 195 ssh_m(:,:) = ssh_m(:,:) * zcoef ! mean SSH [m] 196 e3t_m(:,:) = e3t_m(:,:) * zcoef ! mean vertical scale factor [m] 197 frq_m(:,:) = frq_m(:,:) * zcoef ! mean fraction of solar net radiation absorbed in the 1st T level [-] 198 !$OMP END PARALLEL WORKSHARE 247 !$OMP PARALLEL DO schedule(static) private(jj, ji) 248 DO jj = 1, jpj 249 DO ji = 1, jpi 250 sst_m(ji,jj) = sst_m(ji,jj) * zcoef ! mean SST [Celcius] 251 sss_m(ji,jj) = sss_m(ji,jj) * zcoef ! mean SSS [psu] 252 ssu_m(ji,jj) = ssu_m(ji,jj) * zcoef ! mean suface current [m/s] 253 ssv_m(ji,jj) = ssv_m(ji,jj) * zcoef ! 254 ssh_m(ji,jj) = ssh_m(ji,jj) * zcoef ! mean SSH [m] 255 e3t_m(ji,jj) = e3t_m(ji,jj) * zcoef ! mean vertical scale factor [m] 256 frq_m(ji,jj) = frq_m(ji,jj) * zcoef ! mean fraction of solar net radiation absorbed in the 1st T level [-] 257 END DO 258 END DO 199 259 ! 200 260 ENDIF … … 242 302 !!---------------------------------------------------------------------- 243 303 REAL(wp) :: zcoef, zf_sbc ! local scalar 304 INTEGER :: ji, jj ! loop index 244 305 !!---------------------------------------------------------------------- 245 306 ! … … 269 330 CALL iom_get( numror, jpdom_autoglo, 'frq_m' , frq_m ) 270 331 ELSE 271 !$OMP PARALLEL WORKSHARE 272 frq_m(:,:) = 1._wp ! default definition 273 !$OMP END PARALLEL WORKSHARE 332 !$OMP PARALLEL DO schedule(static) private(jj, ji) 333 DO jj = 1, jpj 334 DO ji = 1, jpi 335 frq_m(ji,jj) = 1._wp ! default definition 336 END DO 337 END DO 274 338 ENDIF 275 339 ! … … 278 342 & 'from ', zf_sbc, ' to ', nn_fsbc 279 343 zcoef = REAL( nn_fsbc - 1, wp ) / zf_sbc 280 !$OMP PARALLEL WORKSHARE 281 ssu_m(:,:) = zcoef * ssu_m(:,:) 282 ssv_m(:,:) = zcoef * ssv_m(:,:) 283 sst_m(:,:) = zcoef * sst_m(:,:) 284 sss_m(:,:) = zcoef * sss_m(:,:) 285 ssh_m(:,:) = zcoef * ssh_m(:,:) 286 e3t_m(:,:) = zcoef * e3t_m(:,:) 287 frq_m(:,:) = zcoef * frq_m(:,:) 288 !$OMP END PARALLEL WORKSHARE 344 !$OMP PARALLEL DO schedule(static) private(jj, ji) 345 DO jj = 1, jpj 346 DO ji = 1, jpi 347 ssu_m(ji,jj) = zcoef * ssu_m(ji,jj) 348 ssv_m(ji,jj) = zcoef * ssv_m(ji,jj) 349 sst_m(ji,jj) = zcoef * sst_m(ji,jj) 350 sss_m(ji,jj) = zcoef * sss_m(ji,jj) 351 ssh_m(ji,jj) = zcoef * ssh_m(ji,jj) 352 e3t_m(ji,jj) = zcoef * e3t_m(ji,jj) 353 frq_m(ji,jj) = zcoef * frq_m(ji,jj) 354 END DO 355 END DO 289 356 ELSE 290 357 IF(lwp) WRITE(numout,*) '~~~~~~~ mean fields read in the ocean restart file' … … 296 363 ! 297 364 IF(lwp) WRITE(numout,*) ' default initialisation of ss?_m arrays' 298 !$OMP PARALLEL WORKSHARE 299 ssu_m(:,:) = ub(:,:,1) 300 ssv_m(:,:) = vb(:,:,1) 301 !$OMP END PARALLEL WORKSHARE 365 !$OMP PARALLEL DO schedule(static) private(jj, ji) 366 DO jj = 1, jpj 367 DO ji = 1, jpi 368 ssu_m(ji,jj) = ub(ji,jj,1) 369 ssv_m(ji,jj) = vb(ji,jj,1) 370 END DO 371 END DO 302 372 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( tsn(:,:,1,jp_tem), tsn(:,:,1,jp_sal) ) 303 373 ELSE ; sst_m(:,:) = tsn(:,:,1,jp_tem) 304 374 ENDIF 305 !$OMP PARALLEL WORKSHARE 306 sss_m(:,:) = tsn (:,:,1,jp_sal) 307 ssh_m(:,:) = sshn (:,:) 308 e3t_m(:,:) = e3t_n(:,:,1) 309 frq_m(:,:) = 1._wp 310 !$OMP END PARALLEL WORKSHARE 375 !$OMP PARALLEL DO schedule(static) private(jj, ji) 376 DO jj = 1, jpj 377 DO ji = 1, jpi 378 sss_m(ji,jj) = tsn (ji,jj,1,jp_sal) 379 ssh_m(ji,jj) = sshn (ji,jj) 380 e3t_m(ji,jj) = e3t_n(ji,jj,1) 381 frq_m(ji,jj) = 1._wp 382 END DO 383 END DO 311 384 ! 312 385 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/eosbn2.F90
r7037 r7508 707 707 IF( nn_timing == 1 ) CALL timing_start('rab_2d') 708 708 ! 709 !$OMP PARALLEL WORKSHARE 710 pab(:,:,:) = 0._wp 711 !$OMP END PARALLEL WORKSHARE 709 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 710 DO jk = 1, jpts 711 DO jj = 1, jpj 712 DO ji = 1, jpi 713 pab(ji,jj,jk) = 0._wp 714 END DO 715 END DO 716 END DO 712 717 ! 713 718 SELECT CASE ( neos ) … … 1048 1053 END DO 1049 1054 END DO 1050 !$OMP WORKSHARE 1051 ptf(:,:) = ptf(:,:) * psal(:,:) 1052 !$OMP END WORKSHARE NOWAIT 1055 !$OMP DO schedule(static) private(jj, ji) 1056 DO jj = 1, jpj 1057 DO ji = 1, jpi 1058 ptf(ji,jj) = ptf(ji,jj) * psal(ji,jj) 1059 END DO 1060 END DO 1053 1061 !$OMP END PARALLEL 1054 1062 ! … … 1073 1081 ! 1074 1082 IF( PRESENT( pdep ) ) THEN 1075 !$OMP PARALLEL WORKSHARE 1076 ptf(:,:) = ptf(:,:) - 7.53e-4 * pdep(:,:) 1077 !$OMP END PARALLEL WORKSHARE 1083 !$OMP DO schedule(static) private(jj, ji) 1084 DO jj = 1, jpj 1085 DO ji = 1, jpi 1086 ptf(ji,jj) = ptf(ji,jj) - 7.53e-4 * pdep(ji,jj) 1087 END DO 1088 END DO 1078 1089 END IF 1079 1090 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv.F90
r7037 r7508 84 84 INTEGER, INTENT( in ) :: kt ! ocean time-step index 85 85 ! 86 INTEGER :: 86 INTEGER :: ji, jj, jk ! dummy loop index 87 87 REAL(wp), POINTER, DIMENSION(:,:,:) :: zun, zvn, zwn 88 88 !!---------------------------------------------------------------------- … … 108 108 ! 109 109 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! add z-tilde and/or vvl corrections 110 !$OMP PARALLEL WORKSHARE 111 zun(:,:,:) = zun(:,:,:) + un_td(:,:,:) 112 zvn(:,:,:) = zvn(:,:,:) + vn_td(:,:,:) 113 !$OMP END PARALLEL WORKSHARE 114 ENDIF 115 ! 116 !$OMP PARALLEL WORKSHARE 117 zun(:,:,jpk) = 0._wp ! no transport trough the bottom 118 zvn(:,:,jpk) = 0._wp 119 zwn(:,:,jpk) = 0._wp 120 !$OMP END PARALLEL WORKSHARE 110 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 111 DO jk = 1, jpk 112 DO jj = 1, jpj 113 DO ji = 1, jpi 114 zun(ji,jj,jk) = zun(ji,jj,jk) + un_td(ji,jj,jk) 115 zvn(ji,jj,jk) = zvn(ji,jj,jk) + vn_td(ji,jj,jk) 116 END DO 117 END DO 118 END DO 119 ENDIF 120 ! 121 !$OMP PARALLEL DO schedule(static) private(jj, ji) 122 DO jj = 1, jpj 123 DO ji = 1, jpi 124 zun(ji,jj,jpk) = 0._wp ! no transport trough the bottom 125 zvn(ji,jj,jpk) = 0._wp 126 zwn(ji,jj,jpk) = 0._wp 127 END DO 128 END DO 121 129 ! 122 130 IF( ln_ldfeiv .AND. .NOT. ln_traldf_triad ) & -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_fct.F90
r7037 r7508 97 97 ! 98 98 ! ! surface & bottom value : flux set to zero one for all 99 !$OMP PARALLEL WORKSHARE 100 zwz(:,:, 1 ) = 0._wp 101 zwx(:,:,jpk) = 0._wp 102 zwy(:,:,jpk) = 0._wp 103 zwz(:,:,jpk) = 0._wp 104 zwi(:,:,:) = 0._wp 105 !$OMP END PARALLEL WORKSHARE 99 !$OMP PARALLEL 100 !$OMP DO schedule(static) private(jj, ji) 101 DO jj = 1, jpj 102 DO ji = 1, jpi 103 zwz(ji,jj, 1 ) = 0._wp 104 zwx(ji,jj,jpk) = 0._wp 105 zwy(ji,jj,jpk) = 0._wp 106 zwz(ji,jj,jpk) = 0._wp 107 END DO 108 END DO 109 !$OMP DO schedule(static) private(jk, jj, ji) 110 DO jk = 1, jpk 111 DO jj = 1, jpj 112 DO ji = 1, jpi 113 zwi(ji,jj,jk) = 0._wp 114 END DO 115 END DO 116 END DO 117 !$OMP END PARALLEL 106 118 ! 107 119 DO jn = 1, kjpt !== loop over the tracers ==! … … 147 159 END DO 148 160 ELSE ! no cavities: only at the ocean surface 149 !$OMP PARALLEL WORKSHARE 150 zwz(:,:,1) = pwn(:,:,1) * ptb(:,:,1,jn) 151 !$OMP END PARALLEL WORKSHARE 161 !$OMP PARALLEL DO schedule(static) private(jj, ji) 162 DO jj = 1, jpj 163 DO ji = 1, jpi 164 zwz(ji,jj,1) = pwn(ji,jj,1) * ptb(ji,jj,1,jn) 165 END DO 166 END DO 152 167 ENDIF 153 168 ENDIF … … 173 188 CALL wrk_alloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz ) 174 189 !$OMP PARALLEL 175 !$OMP WORKSHARE 176 ztrdx(:,:,:) = 0._wp 177 ztrdy(:,:,:) = 0._wp 178 ztrdz(:,:,:) = 0._wp 179 !$OMP END WORKSHARE 180 !$OMP WORKSHARE 181 ztrdx(:,:,:) = zwx(:,:,:) 182 ztrdy(:,:,:) = zwy(:,:,:) 183 ztrdz(:,:,:) = zwz(:,:,:) 184 !$OMP END WORKSHARE 190 !$OMP DO schedule(static) private(jk, jj, ji) 191 DO jk = 1, jpk 192 DO jj = 1, jpj 193 DO ji = 1, jpi 194 ztrdx(ji,jj,jk) = 0._wp 195 ztrdy(ji,jj,jk) = 0._wp 196 ztrdz(ji,jj,jk) = 0._wp 197 END DO 198 END DO 199 END DO 200 !$OMP DO schedule(static) private(jk, jj, ji) 201 DO jk = 1, jpk 202 DO jj = 1, jpj 203 DO ji = 1, jpi 204 ztrdx(ji,jj,jk) = zwx(ji,jj,jk) 205 ztrdy(ji,jj,jk) = zwy(ji,jj,jk) 206 ztrdz(ji,jj,jk) = zwz(ji,jj,jk) 207 END DO 208 END DO 209 END DO 185 210 !$OMP END PARALLEL 186 211 END IF … … 208 233 CASE( 4 ) !- 4th order centered 209 234 !$OMP PARALLEL 210 !$OMP WORKSHARE 211 zltu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 212 zltv(:,:,jpk) = 0._wp 213 !$OMP END WORKSHARE 235 !$OMP DO schedule(static) private(jj, ji) 236 DO jj = 1, jpj 237 DO ji = 1, jpi 238 zltu(ji,jj,jpk) = 0._wp ! Bottom value : flux set to zero 239 zltv(ji,jj,jpk) = 0._wp 240 END DO 241 END DO 214 242 !$OMP DO schedule(static) private(jk, jj, ji) 215 243 DO jk = 1, jpkm1 ! Laplacian … … 246 274 CASE( 41 ) !- 4th order centered ==>> !!gm coding attempt need to be tested 247 275 !$OMP PARALLEL 248 !$OMP WORKSHARE 249 ztu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 250 ztv(:,:,jpk) = 0._wp 251 !$OMP END WORKSHARE 276 !$OMP DO schedule(static) private(jj, ji) 277 DO jj = 1, jpj 278 DO ji = 1, jpi 279 ztu(ji,jj,jpk) = 0._wp ! Bottom value : flux set to zero 280 ztv(ji,jj,jpk) = 0._wp 281 END DO 282 END DO 252 283 !$OMP DO schedule(static) private(jk, jj, ji) 253 284 DO jk = 1, jpkm1 ! 1st derivative (gradient) … … 307 338 END SELECT 308 339 IF( ln_linssh ) THEN ! top ocean value: high order = upstream ==>> zwz=0 309 !$OMP PARALLEL WORKSHARE 310 zwz(:,:,1) = 0._wp ! only ocean surface as interior zwz values have been w-masked 311 !$OMP END PARALLEL WORKSHARE 340 !$OMP PARALLEL DO schedule(static) private(jj, ji) 341 DO jj = 1, jpj 342 DO ji = 1, jpi 343 zwz(ji,jj,1) = 0._wp ! only ocean surface as interior zwz values have been w-masked 344 END DO 345 END DO 312 346 ENDIF 313 347 ! … … 334 368 ! 335 369 IF( l_trd ) THEN ! trend diagnostics (contribution of upstream fluxes) 336 !$OMP PARALLEL WORKSHARE 337 ztrdx(:,:,:) = ztrdx(:,:,:) + zwx(:,:,:) ! <<< Add to previously computed 338 ztrdy(:,:,:) = ztrdy(:,:,:) + zwy(:,:,:) ! <<< Add to previously computed 339 ztrdz(:,:,:) = ztrdz(:,:,:) + zwz(:,:,:) ! <<< Add to previously computed 340 !$OMP END PARALLEL WORKSHARE 370 !$OMP DO schedule(static) private(jk, jj, ji) 371 DO jk = 1, jpk 372 DO jj = 1, jpj 373 DO ji = 1, jpi 374 ztrdx(ji,jj,jk) = ztrdx(ji,jj,jk) + zwx(ji,jj,jk) ! <<< Add to previously computed 375 ztrdy(ji,jj,jk) = ztrdy(ji,jj,jk) + zwy(ji,jj,jk) ! <<< Add to previously computed 376 ztrdz(ji,jj,jk) = ztrdz(ji,jj,jk) + zwz(ji,jj,jk) ! <<< Add to previously computed 377 END DO 378 END DO 379 END DO 341 380 ! 342 381 CALL trd_tra( kt, cdtype, jn, jptra_xad, ztrdx, pun, ptn(:,:,:,jn) ) … … 678 717 679 718 !$OMP PARALLEL 680 !$OMP WORKSHARE 681 zbetup(:,:,:) = 0._wp 682 zbetdo(:,:,:) = 0._wp 683 !$OMP END WORKSHARE 719 !$OMP DO schedule(static) private(jk, jj, ji) 720 DO jk = 1, jpk 721 DO jj = 1, jpj 722 DO ji = 1, jpi 723 zbetup(ji,jj,jk) = 0._wp 724 zbetdo(ji,jj,jk) = 0._wp 725 END DO 726 END DO 727 END DO 684 728 !$OMP DO schedule(static) private(jk, jj, ji, ikm1, zup, zdo, zpos, zneg, zbt) 685 729 DO jk = 1, jpkm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_mle.F90
r6748 r7508 348 348 ! 349 349 z1_t2 = 1._wp / ( rn_time * rn_time ) 350 !$OMP PARALLEL WORKSHARE 351 r1_ft(:,:) = 2._wp * omega * SIN( rad * gphit(:,:) ) 352 r1_ft(:,:) = 1._wp / SQRT( r1_ft(:,:) * r1_ft(:,:) + z1_t2 ) 353 !$OMP END PARALLEL WORKSHARE 350 !$OMP PARALLEL DO schedule(static) private(jj, ji) 351 DO jj = 1, jpj 352 DO ji = 1, jpi 353 r1_ft(ji,jj) = 2._wp * omega * SIN( rad * gphit(ji,jj) ) 354 r1_ft(ji,jj) = 1._wp / SQRT( r1_ft(ji,jj) * r1_ft(ji,jj) + z1_t2 ) 355 END DO 356 END DO 354 357 ! 355 358 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_mus.F90
r7037 r7508 101 101 ! 102 102 ALLOCATE( xind(jpi,jpj,jpk), STAT=ierr ) 103 !$OMP PARALLEL WORKSHARE 104 xind(:,:,:) = 1._wp ! set equal to 1 where up-stream is not needed 105 !$OMP END PARALLEL WORKSHARE 103 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 104 DO jk = 1, jpk 105 DO jj = 1, jpj 106 DO ji = 1, jpi 107 xind(ji,jj,jk) = 1._wp ! set equal to 1 where up-stream is not needed 108 END DO 109 END DO 110 END DO 106 111 ! 107 112 IF( ld_msc_ups ) THEN ! define the upstream indicator (if asked) 108 113 ALLOCATE( upsmsk(jpi,jpj), STAT=ierr ) 109 114 !$OMP PARALLEL 110 !$OMP WORKSHARE 111 upsmsk(:,:) = 0._wp ! not upstream by default 112 !$OMP END WORKSHARE 115 !$OMP DO schedule(static) private(jj, ji) 116 DO jj = 1, jpj 117 DO ji = 1, jpi 118 upsmsk(ji,jj) = 0._wp ! not upstream by default 119 END DO 120 END DO 113 121 ! 114 122 !$OMP DO schedule(static) private(jk) … … 130 138 ! !-- first guess of the slopes 131 139 !$OMP PARALLEL 132 !$OMP WORKSHARE 133 zwx(:,:,jpk) = 0._wp ! bottom values 134 zwy(:,:,jpk) = 0._wp 135 !$OMP END WORKSHARE 140 !$OMP DO schedule(static) private(jj, ji) 141 DO jj = 1, jpj 142 DO ji = 1, jpi 143 zwx(ji,jj,jpk) = 0._wp ! bottom values 144 zwy(ji,jj,jpk) = 0._wp 145 END DO 146 END DO 136 147 !$OMP DO schedule(static) private(jk, jj, ji) 137 148 DO jk = 1, jpkm1 ! interior values … … 149 160 ! !-- Slopes of tracer 150 161 !$OMP PARALLEL 151 !$OMP WORKSHARE 152 zslpx(:,:,jpk) = 0._wp ! bottom values 153 zslpy(:,:,jpk) = 0._wp 154 !$OMP END WORKSHARE 162 !$OMP DO schedule(static) private(jj, ji) 163 DO jj = 1, jpj 164 DO ji = 1, jpi 165 zslpx(ji,jj,jpk) = 0._wp ! bottom values 166 zslpy(ji,jj,jpk) = 0._wp 167 END DO 168 END DO 155 169 !$OMP DO schedule(static) private(jk, jj, ji) 156 170 DO jk = 1, jpkm1 ! interior values … … 230 244 ! !-- first guess of the slopes 231 245 !$OMP PARALLEL 232 !$OMP WORKSHARE 233 zwx(:,:, 1 ) = 0._wp ! surface & bottom boundary conditions 234 zwx(:,:,jpk) = 0._wp 235 !$OMP END WORKSHARE 246 !$OMP DO schedule(static) private(jj, ji) 247 DO jj = 1, jpj 248 DO ji = 1, jpi 249 zwx(ji,jj, 1 ) = 0._wp ! surface & bottom boundary conditions 250 zwx(ji,jj,jpk) = 0._wp 251 END DO 252 END DO 236 253 !$OMP DO schedule(static) private(jk) 237 254 DO jk = 2, jpkm1 ! interior values … … 240 257 ! !-- Slopes of tracer 241 258 !$OMP END DO NOWAIT 242 !$OMP WORKSHARE 243 zslpx(:,:,1) = 0._wp ! surface values 244 !$OMP END WORKSHARE 259 !$OMP DO schedule(static) private(jj, ji) 260 DO jj = 1, jpj 261 DO ji = 1, jpi 262 zslpx(ji,jj,1) = 0._wp ! surface values 263 END DO 264 END DO 245 265 !$OMP DO schedule(static) private(jk, jj, ji) 246 266 DO jk = 2, jpkm1 ! interior value … … 286 306 END DO 287 307 ELSE ! no cavities: only at the ocean surface 288 !$OMP PARALLEL WORKSHARE 289 zwx(:,:,1) = pwn(:,:,1) * ptb(:,:,1,jn) 290 !$OMP END PARALLEL WORKSHARE 308 !$OMP PARALLEL DO schedule(static) private(jj, ji) 309 DO jj = 1, jpj 310 DO ji = 1, jpi 311 zwx(ji,jj,1) = pwn(ji,jj,1) * ptb(ji,jj,1,jn) 312 END DO 313 END DO 291 314 ENDIF 292 315 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trabbc.F90
r7037 r7508 76 76 INTEGER, INTENT(in) :: kt ! ocean time-step index 77 77 ! 78 INTEGER :: ji, jj ! dummy loop indices78 INTEGER :: ji, jj, jk ! dummy loop indices 79 79 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdt 80 80 !!---------------------------------------------------------------------- … … 84 84 IF( l_trdtra ) THEN ! Save the input temperature trend 85 85 CALL wrk_alloc( jpi,jpj,jpk, ztrdt ) 86 !$OMP PARALLEL WORKSHARE 87 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 88 !$OMP END PARALLEL WORKSHARE 86 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 87 DO jk = 1, jpk 88 DO jj = 1, jpj 89 DO ji = 1, jpi 90 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 91 END DO 92 END DO 93 END DO 89 94 ENDIF 90 95 ! ! Add the geothermal trend on temperature … … 99 104 ! 100 105 IF( l_trdtra ) THEN ! Send the trend for diagnostics 101 !$OMP PARALLEL WORKSHARE 102 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 103 !$OMP END PARALLEL WORKSHARE 106 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 107 DO jk = 1, jpk 108 DO jj = 1, jpj 109 DO ji = 1, jpi 110 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) - ztrdt(ji,jj,jk) 111 END DO 112 END DO 113 END DO 104 114 CALL trd_tra( kt, 'TRA', jp_tem, jptra_bbc, ztrdt ) 105 115 CALL wrk_dealloc( jpi,jpj,jpk, ztrdt ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trabbl.F90
r7037 r7508 105 105 !!---------------------------------------------------------------------- 106 106 INTEGER, INTENT( in ) :: kt ! ocean time-step 107 INTEGER :: ji, jj, jk ! dummy loop indices 107 108 ! 108 109 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdt, ztrds … … 113 114 IF( l_trdtra ) THEN !* Save the input trends 114 115 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 115 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 116 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 116 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 117 DO jk = 1, jpk 118 DO jj = 1, jpj 119 DO ji = 1, jpi 120 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 121 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) 122 END DO 123 END DO 124 END DO 117 125 ENDIF 118 126 … … 146 154 147 155 IF( l_trdtra ) THEN ! send the trends for further diagnostics 148 !$OMP PARALLEL WORKSHARE 149 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 150 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 151 !$OMP END PARALLEL WORKSHARE 156 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 157 DO jk = 1, jpk 158 DO jj = 1, jpj 159 DO ji = 1, jpi 160 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) - ztrdt(ji,jj,jk) 161 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) - ztrds(ji,jj,jk) 162 END DO 163 END DO 164 END DO 152 165 CALL trd_tra( kt, 'TRA', jp_tem, jptra_bbl, ztrdt ) 153 166 CALL trd_tra( kt, 'TRA', jp_sal, jptra_bbl, ztrds ) … … 574 587 575 588 ! !* masked diffusive flux coefficients 576 !$OMP PARALLEL WORKSHARE 577 ahu_bbl_0(:,:) = rn_ahtbbl * e2_e1u(:,:) * e3u_bbl_0(:,:) * umask(:,:,1) 578 ahv_bbl_0(:,:) = rn_ahtbbl * e1_e2v(:,:) * e3v_bbl_0(:,:) * vmask(:,:,1) 579 !$OMP END PARALLEL WORKSHARE 589 !$OMP PARALLEL DO schedule(static) private(jj,ji) 590 DO jj = 1, jpj 591 DO ji = 1, jpi 592 ahu_bbl_0(ji,jj) = rn_ahtbbl * e2_e1u(ji,jj) * e3u_bbl_0(ji,jj) * umask(ji,jj,1) 593 ahv_bbl_0(ji,jj) = rn_ahtbbl * e1_e2v(ji,jj) * e3v_bbl_0(ji,jj) * vmask(ji,jj,1) 594 END DO 595 END DO 580 596 581 597 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/tradmp.F90
r7037 r7508 102 102 IF( l_trdtra ) THEN !* Save ta and sa trends 103 103 CALL wrk_alloc( jpi,jpj,jpk,jpts, ztrdts ) 104 !$OMP PARALLEL WORKSHARE 105 ztrdts(:,:,:,:) = tsa(:,:,:,:) 106 !$OMP END PARALLEL WORKSHARE 104 DO jn = 1, jpts 105 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 106 DO jk = 1, jpk 107 DO jj = 1, jpj 108 DO ji = 1, jpi 109 ztrdts(ji,jj,jk,jn) = tsa(ji,jj,jk,jn) 110 END DO 111 END DO 112 END DO 113 END DO 107 114 ENDIF 108 115 ! !== input T-S data at kt ==! … … 156 163 ! 157 164 IF( l_trdtra ) THEN ! trend diagnostic 158 !$OMP PARALLEL WORKSHARE 159 ztrdts(:,:,:,:) = tsa(:,:,:,:) - ztrdts(:,:,:,:) 160 !$OMP END PARALLEL WORKSHARE 165 DO jn = 1, jpts 166 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 167 DO jk = 1, jpk 168 DO jj = 1, jpj 169 DO ji = 1, jpi 170 ztrdts(ji,jj,jk,jn) = tsa(ji,jj,jk,jn) - ztrdts(ji,jj,jk,jn) 171 END DO 172 END DO 173 END DO 174 END DO 161 175 CALL trd_tra( kt, 'TRA', jp_tem, jptra_dmp, ztrdts(:,:,:,jp_tem) ) 162 176 CALL trd_tra( kt, 'TRA', jp_sal, jptra_dmp, ztrdts(:,:,:,jp_sal) ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf.F90
r6748 r7508 57 57 !!---------------------------------------------------------------------- 58 58 INTEGER, INTENT( in ) :: kt ! ocean time-step index 59 INTEGER :: jk, jj, ji 59 60 !! 60 61 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdt, ztrds … … 65 66 IF( l_trdtra ) THEN !* Save ta and sa trends 66 67 CALL wrk_alloc( jpi,jpj,jpk, ztrdt, ztrds ) 67 !$OMP PARALLEL WORKSHARE 68 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 69 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 70 !$OMP END PARALLEL WORKSHARE 68 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 69 DO jk = 1, jpk 70 DO jj = 1, jpj 71 DO ji = 1, jpi 72 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 73 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) 74 END DO 75 END DO 76 END DO 71 77 ENDIF 72 78 ! … … 83 89 ! 84 90 IF( l_trdtra ) THEN !* save the horizontal diffusive trends for further diagnostics 85 !$OMP PARALLEL WORKSHARE 86 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 87 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 88 !$OMP END PARALLEL WORKSHARE 91 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 92 DO jk = 1, jpk 93 DO jj = 1, jpj 94 DO ji = 1, jpi 95 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) - ztrdt(ji,jj,jk) 96 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) - ztrds(ji,jj,jk) 97 END DO 98 END DO 99 END DO 100 89 101 CALL trd_tra( kt, 'TRA', jp_tem, jptra_ldf, ztrdt ) 90 102 CALL trd_tra( kt, 'TRA', jp_sal, jptra_ldf, ztrds ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf_iso.F90
r7037 r7508 124 124 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~' 125 125 ! 126 !$OMP PARALLEL WORKSHARE 127 akz (:,:,:) = 0._wp 128 ah_wslp2(:,:,:) = 0._wp 129 !$OMP END PARALLEL WORKSHARE 126 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 127 DO jk = 1, jpk 128 DO jj = 1, jpj 129 DO ji = 1, jpi 130 akz (ji,jj,jk) = 0._wp 131 ah_wslp2(ji,jj,jk) = 0._wp 132 END DO 133 END DO 134 END DO 130 135 ENDIF 131 136 ! ! set time step size (Euler/Leapfrog) … … 204 209 ! 205 210 ELSE ! 33 flux set to zero with akz=ah_wslp2 ==>> computed in full implicit 206 !$OMP PARALLEL WORKSHARE 207 akz(:,:,:) = ah_wslp2(:,:,:) 208 !$OMP END PARALLEL WORKSHARE 211 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 212 DO jk = 1, jpk 213 DO jj = 1, jpj 214 DO ji = 1, jpi 215 akz (ji,jj,jk) = ah_wslp2(ji,jj,jk) 216 END DO 217 END DO 218 END DO 209 219 ENDIF 210 220 ENDIF … … 219 229 !!gm : bug.... why (x,:,:)? (1,jpj,:) and (jpi,1,:) should be sufficient.... 220 230 !$OMP PARALLEL 221 !$OMP WORKSHARE 222 zdit (1,:,:) = 0._wp ; zdit (jpi,:,:) = 0._wp 223 zdjt (1,:,:) = 0._wp ; zdjt (jpi,:,:) = 0._wp 224 !$OMP END WORKSHARE 231 !$OMP DO schedule(static) private(jk, jj) 232 DO jk = 1, jpk 233 DO jj = 1, jpj 234 zdit (1,jj,jk) = 0._wp ; zdit (jpi,jj,jk) = 0._wp 235 zdjt (1,jj,jk) = 0._wp ; zdjt (jpi,jj,jk) = 0._wp 236 END DO 237 END DO 225 238 !!end 226 239 … … 260 273 !!---------------------------------------------------------------------- 261 274 !$OMP PARALLEL 275 DO jk = 1, jpkm1 ! Horizontal slab 262 276 !$OMP DO schedule(static) private(jj, ji) 263 DO jj = 1 , jpj !== Horizontal fluxes 264 DO ji = 1, jpi ! vector opt. 265 zdk1t(ji,jj) = ( ptb(ji,jj,1,jn) - ptb(ji,jj,2,jn) ) * wmask(ji,jj,2) 266 zdkt(ji,jj) = zdk1t(ji,jj) 267 END DO 268 END DO 269 !$OMP DO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2) 270 DO jj = 1 , jpjm1 !== Horizontal fluxes 271 DO ji = 1, fs_jpim1 ! vector opt. 272 zabe1 = pahu(ji,jj,1) * e2_e1u(ji,jj) * e3u_n(ji,jj,1) 273 zabe2 = pahv(ji,jj,1) * e1_e2v(ji,jj) * e3v_n(ji,jj,1) 274 ! 275 zmsku = 1. / MAX( wmask(ji+1,jj,1 ) + wmask(ji,jj,2) & 276 & + wmask(ji+1,jj,2) + wmask(ji,jj,1 ), 1.) 277 ! 278 zmskv = 1. / MAX( wmask(ji,jj+1,1 ) + wmask(ji,jj,2) & 279 & + wmask(ji,jj+1,2) + wmask(ji,jj,1 ), 1.) 280 ! 281 zcof1 = - pahu(ji,jj,1) * e2u(ji,jj) * uslp(ji,jj,1) * zmsku 282 zcof2 = - pahv(ji,jj,1) * e1v(ji,jj) * vslp(ji,jj,1) * zmskv 283 ! 284 zftu(ji,jj,1 ) = ( zabe1 * zdit(ji,jj,1) & 285 & + zcof1 * ( zdkt (ji+1,jj) + zdk1t(ji,jj) & 286 & + zdk1t(ji+1,jj) + zdkt (ji,jj)) ) * umask(ji,jj,1) 287 zftv(ji,jj,1 ) = ( zabe2 * zdjt(ji,jj,1) & 288 & + zcof2 * ( zdkt (ji,jj+1) + zdk1t(ji,jj) & 289 & + zdk1t(ji,jj+1) + zdkt (ji,jj)) ) * vmask(ji,jj,1) 290 END DO 291 END DO 292 ! 293 !$OMP DO schedule(static) private(jj, ji) 294 DO jj = 2 , jpjm1 !== horizontal divergence and add to pta 295 DO ji = fs_2, fs_jpim1 ! vector opt. 296 pta(ji,jj,1,jn) = pta(ji,jj,1,jn) + zsign * (zftu(ji,jj,1) - zftu(ji-1,jj,1) & 297 & + zftv(ji,jj,1) - zftv(ji,jj-1,1) ) & 298 & * r1_e1e2t(ji,jj) / e3t_n(ji,jj,1) 299 END DO 300 END DO 301 !$OMP END DO NOWAIT 302 DO jk = 2, jpkm1 303 !$OMP DO schedule(static) private(jj, ji) 304 DO jj = 1 , jpj !== Horizontal fluxes 305 DO ji = 1, jpi ! vector opt. 306 zdk1t(ji,jj) = ( ptb(ji,jj,jk,jn) - ptb(ji,jj,jk+1,jn) ) * wmask(ji,jj,jk+1) 307 zdkt(ji,jj) = ( ptb(ji,jj,jk-1,jn) - ptb(ji,jj,jk,jn) ) * wmask(ji,jj,jk) 277 DO jj = 1 , jpj 278 DO ji = 1, jpi 279 zdk1t(ji,jj) = ( ptb(ji,jj,jk,jn) - ptb(ji,jj,jk+1,jn) ) * wmask(ji,jj,jk+1) 280 IF( jk == 1 ) THEN ; zdkt(ji,jj) = zdk1t(ji,jj) ! surface: zdkt(jk=1)=zdkt(jk=2) 281 ELSE ; zdkt(ji,jj) = ( ptb(ji,jj,jk-1,jn) - ptb(ji,jj,jk,jn) ) * wmask(ji,jj,jk) 282 ENDIF 308 283 END DO 309 284 END DO … … 315 290 ! 316 291 zmsku = 1. / MAX( wmask(ji+1,jj,jk ) + wmask(ji,jj,jk+1) & 317 & + wmask(ji+1,jj,jk+1) + wmask(ji,jj,jk ), 1.)292 & + wmask(ji+1,jj,jk+1) + wmask(ji,jj,jk ), 1.) 318 293 ! 319 294 zmskv = 1. / MAX( wmask(ji,jj+1,jk ) + wmask(ji,jj,jk+1) & 320 & + wmask(ji,jj+1,jk+1) + wmask(ji,jj,jk ), 1.)295 & + wmask(ji,jj+1,jk+1) + wmask(ji,jj,jk ), 1.) 321 296 ! 322 297 zcof1 = - pahu(ji,jj,jk) * e2u(ji,jj) * uslp(ji,jj,jk) * zmsku … … 342 317 !$OMP END DO NOWAIT 343 318 END DO 344 !$OMP END PARALLEL345 319 346 320 … … 349 323 !!---------------------------------------------------------------------- 350 324 ! 351 !$OMP PARALLEL 352 !$OMP WORKSHARE 353 ztfw(1,:,:) = 0._wp ; ztfw(jpi,:,:) = 0._wp 325 !$OMP DO schedule(static) private(jk, jj) 326 DO jk = 1, jpk 327 DO jj = 1, jpj 328 ztfw(1,jj,jk) = 0._wp ; ztfw(jpi,jj,jk) = 0._wp 329 END DO 330 END DO 354 331 ! 355 332 ! Vertical fluxes 356 333 ! --------------- 357 334 ! ! Surface and bottom vertical fluxes set to zero 358 ztfw(:,:, 1 ) = 0._wp ; ztfw(:,:,jpk) = 0._wp 359 !$OMP END WORKSHARE 335 !$OMP DO schedule(static) private(jj, ji) 336 DO jj = 1, jpj 337 DO ji = 1, jpi 338 ztfw(ji,jj, 1 ) = 0._wp ; ztfw(ji,jj,jpk) = 0._wp 339 END DO 340 END DO 360 341 361 342 !$OMP DO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w, zcoef3, zcoef4) … … 449 430 ! 450 431 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN 432 z2d(:,:) = zftu(ji,jj,1) 451 433 !$OMP PARALLEL 452 !$OMP WORKSHARE453 z2d(:,:) = zftu(ji,jj,1)454 !$OMP END WORKSHARE455 434 !$OMP DO schedule(static) private(jk, jj, ji) 456 435 DO jk = 2, jpkm1 … … 463 442 !!gm CAUTION I think there is an error of sign when using BLP operator.... 464 443 !!gm a multiplication by zsign is required (to be checked twice !) 465 !$OMP WORKSHARE 466 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 467 !$OMP END WORKSHARE NOWAIT 444 !$OMP DO schedule(static) private(jj, ji) 445 DO jj = 1, jpj 446 DO ji = 1, jpi 447 z2d(ji,jj) = - rau0_rcp * z2d(ji,jj) ! note sign is reversed to give down-gradient diffusive transports (#1043) 448 END DO 449 END DO 468 450 !$OMP END PARALLEL 469 451 CALL lbc_lnk( z2d, 'U', -1. ) 470 452 CALL iom_put( "udiff_heattr", z2d ) ! heat transport in i-direction 471 453 ! 454 z2d(:,:) = zftv(ji,jj,1) 472 455 !$OMP PARALLEL 473 !$OMP WORKSHARE474 z2d(:,:) = zftv(ji,jj,1)475 !$OMP END WORKSHARE476 456 !$OMP DO schedule(static) private(jk, jj, ji) 477 457 DO jk = 2, jpkm1 … … 482 462 END DO 483 463 END DO 484 !$OMP WORKSHARE 485 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 486 !$OMP END WORKSHARE NOWAIT 464 !$OMP DO schedule(static) private(jj, ji) 465 DO jj = 1, jpj 466 DO ji = 1, jpi 467 z2d(ji,jj) = - rau0_rcp * z2d(ji,jj) ! note sign is reversed to give down-gradient diffusive transports (#1043) 468 END DO 469 END DO 487 470 !$OMP END PARALLEL 488 471 CALL lbc_lnk( z2d, 'V', -1. ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/tranxt.F90
r7037 r7508 123 123 IF( l_trdtra ) THEN ! store now fields before applying the Asselin filter 124 124 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 125 !$OMP PARALLEL WORKSHARE 126 ztrdt(:,:,:) = tsn(:,:,:,jp_tem) 127 ztrds(:,:,:) = tsn(:,:,:,jp_sal) 128 !$OMP END PARALLEL WORKSHARE 125 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 126 DO jk = 1, jpk 127 DO jj = 1, jpj 128 DO ji = 1, jpi 129 ztrdt(ji,jj,jk) = tsn(ji,jj,jk,jp_tem) 130 ztrds(ji,jj,jk) = tsn(ji,jj,jk,jp_sal) 131 END DO 132 END DO 133 END DO 129 134 IF( ln_traldf_iso ) THEN ! diagnose the "pure" Kz diffusive trend 130 135 CALL trd_tra( kt, 'TRA', jp_tem, jptra_zdfp, ztrdt ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trasbc.F90
r6748 r7508 88 88 IF( l_trdtra ) THEN !* Save ta and sa trends 89 89 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 90 !$OMP PARALLEL WORKSHARE 91 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 92 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 93 !$OMP END PARALLEL WORKSHARE 90 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 91 DO jk = 1, jpk 92 DO jj = 1, jpj 93 DO ji = 1, jpi 94 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 95 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) 96 END DO 97 END DO 98 END DO 94 99 ENDIF 95 100 ! 96 101 !!gm This should be moved into sbcmod.F90 module ? (especially now that ln_traqsr is read in namsbc namelist) 97 102 IF( .NOT.ln_traqsr ) THEN ! no solar radiation penetration 98 !$OMP PARALLEL WORKSHARE 99 qns(:,:) = qns(:,:) + qsr(:,:) ! total heat flux in qns 100 qsr(:,:) = 0._wp ! qsr set to zero 101 !$OMP END PARALLEL WORKSHARE 103 !$OMP PARALLEL DO schedule(static) private(jj, ji) 104 DO jj = 1, jpj 105 DO ji = 1, jpi 106 qns(ji,jj) = qns(ji,jj) + qsr(ji,jj) ! total heat flux in qns 107 qsr(ji,jj) = 0._wp ! qsr set to zero 108 END DO 109 END DO 102 110 ENDIF 103 111 … … 115 123 ELSE ! No restart or restart not found: Euler forward time stepping 116 124 zfact = 1._wp 117 !$OMP PARALLEL WORKSHARE 118 sbc_tsc_b(:,:,:) = 0._wp 119 !$OMP END PARALLEL WORKSHARE 125 DO jn = 1, jpts 126 !$OMP PARALLEL DO schedule(static) private(jj, ji) 127 DO jj = 1, jpj 128 DO ji = 1, jpi 129 sbc_tsc_b(ji,jj,jn) = 0._wp 130 END DO 131 END DO 132 END DO 120 133 ENDIF 121 134 ELSE !* other time-steps: swap of forcing fields 122 135 zfact = 0.5_wp 123 !$OMP PARALLEL WORKSHARE 124 sbc_tsc_b(:,:,:) = sbc_tsc(:,:,:) 125 !$OMP END PARALLEL WORKSHARE 136 DO jn = 1, jpts 137 !$OMP PARALLEL DO schedule(static) private(jj, ji) 138 DO jj = 1, jpj 139 DO ji = 1, jpi 140 sbc_tsc_b(ji,jj,jn) = sbc_tsc(ji,jj,jn) 141 END DO 142 END DO 143 END DO 126 144 ENDIF 127 145 ! !== Now sbc tracer content fields ==! … … 243 261 244 262 IF( l_trdtra ) THEN ! save the horizontal diffusive trends for further diagnostics 245 !$OMP PARALLEL WORKSHARE 246 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 247 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 248 !$OMP END PARALLEL WORKSHARE 263 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 264 DO jk = 1, jpk 265 DO jj = 1, jpj 266 DO ji = 1, jpi 267 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) - ztrdt(ji,jj,jk) 268 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) - ztrds(ji,jj,jk) 269 END DO 270 END DO 271 END DO 249 272 CALL trd_tra( kt, 'TRA', jp_tem, jptra_nsr, ztrdt ) 250 273 CALL trd_tra( kt, 'TRA', jp_sal, jptra_nsr, ztrds ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf.F90
r7037 r7508 58 58 INTEGER, INTENT( in ) :: kt ! ocean time-step index 59 59 ! 60 INTEGER :: jk 60 INTEGER :: jk, jj, ji ! Dummy loop indices 61 61 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdt, ztrds ! 3D workspace 62 62 !!--------------------------------------------------------------------- … … 72 72 IF( l_trdtra ) THEN !* Save ta and sa trends 73 73 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 74 !$OMP PARALLEL WORKSHARE 75 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 76 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 77 !$OMP END PARALLEL WORKSHARE 74 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 75 DO jk = 1, jpk 76 DO jj = 1, jpj 77 DO ji = 1, jpi 78 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 79 ztrds(ji,jj,jk) = tsa(ji,jj,jk,jp_sal) 80 END DO 81 END DO 82 END DO 78 83 ENDIF 79 84 ! … … 86 91 ! JMM avoid negative salinities near river outlet ! Ugly fix 87 92 ! JMM : restore negative salinities to small salinities: 88 !$OMP PARALLEL WORKSHARE 89 WHERE( tsa(:,:,:,jp_sal) < 0._wp ) tsa(:,:,:,jp_sal) = 0.1_wp 90 !$OMP END PARALLEL WORKSHARE 93 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 94 DO jk = 1, jpk 95 DO jj = 1, jpj 96 DO ji = 1, jpi 97 IF( tsa(ji,jj,jk,jp_sal) < 0._wp ) tsa(ji,jj,jk,jp_sal) = 0.1_wp 98 END DO 99 END DO 100 END DO 91 101 !!gm 92 102 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf_imp.F90
r7037 r7508 107 107 ! vertical mixing coef.: avt for temperature, avs for salinity and passive tracers 108 108 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN 109 !$OMP PARALLEL WORKSHARE 110 zwt(:,:,2:jpk) = avt (:,:,2:jpk) 111 !$OMP END PARALLEL WORKSHARE 109 !$OMP PARALLEL DO schedule(static) private(jj, ji) 110 DO jj = 1, jpj 111 DO ji = 1, jpi 112 zwt(ji,jj,2:jpk) = avt (ji,jj,2:jpk) 113 END DO 114 END DO 112 115 ELSE 113 !$OMP PARALLEL WORKSHARE 114 zwt(:,:,2:jpk) = fsavs(:,:,2:jpk) 115 !$OMP END PARALLEL WORKSHARE 116 !$OMP PARALLEL DO schedule(static) private(jj, ji) 117 DO jj = 1, jpj 118 DO ji = 1, jpi 119 zwt(ji,jj,2:jpk) = fsavs(ji,jj,2:jpk) 120 END DO 121 END DO 116 122 ENDIF 117 !$OMP PARALLEL WORKSHARE 118 zwt(:,:,1) = 0._wp 119 !$OMP END PARALLEL WORKSHARE 123 !$OMP PARALLEL DO schedule(static) private(jj, ji) 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 zwt(ji,jj,1) = 0._wp 127 END DO 128 END DO 120 129 ! 121 130 IF( l_ldfslp ) THEN ! isoneutral diffusion: add the contribution -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/zpshde.F90
r7037 r7508 101 101 IF( nn_timing == 1 ) CALL timing_start( 'zps_hde') 102 102 ! 103 !$OMP PARALLEL 104 !$OMP WORKSHARE 105 pgtu(:,:,:)=0._wp ; zti (:,:,:)=0._wp ; zhi (:,: )=0._wp 106 pgtv(:,:,:)=0._wp ; ztj (:,:,:)=0._wp ; zhj (:,: )=0._wp 107 !$OMP END WORKSHARE 103 DO jn = 1, kjpt 104 !$OMP PARALLEL DO schedule(static) private(jj,ji) 105 DO jj = 1, jpjm1 106 DO ji = 1, jpim1 107 pgtu(ji,jj,jn)=0._wp ; zti (ji,jj,jn)=0._wp 108 pgtv(ji,jj,jn)=0._wp ; ztj (ji,jj,jn)=0._wp 109 END DO 110 END DO 111 END DO 112 !$OMP PARALLEL DO schedule(static) private(jj,ji) 113 DO jj = 1, jpjm1 114 DO ji = 1, jpim1 115 zhi (ji,jj )=0._wp 116 zhj (ji,jj )=0._wp 117 END DO 118 END DO 108 119 ! 109 120 DO jn = 1, kjpt !== Interpolation of tracers at the last ocean level ==! 110 121 ! 111 !$OMP DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv,zmaxu,zmaxv)122 !$OMP PARALLEL DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv,zmaxu,zmaxv) 112 123 DO jj = 1, jpjm1 113 124 DO ji = 1, jpim1 … … 149 160 END DO 150 161 END DO 151 !$OMP END DO152 !$OMP SINGLE153 162 CALL lbc_lnk( pgtu(:,:,jn), 'U', -1. ) ; CALL lbc_lnk( pgtv(:,:,jn), 'V', -1. ) ! Lateral boundary cond. 154 !$OMP END SINGLE155 163 ! 156 164 END DO 157 !$OMP END PARALLEL158 165 ! 159 166 IF( PRESENT( prd ) ) THEN !== horizontal derivative of density anomalies (rd) ==! (optional part) 160 167 !$OMP PARALLEL 161 !$OMP WORKSHARE 162 pgru(:,:) = 0._wp 163 pgrv(:,:) = 0._wp ! depth of the partial step level 164 !$OMP END WORKSHARE NOWAIT 168 !$OMP DO schedule(static) private(jj,ji) 169 DO jj = 1, jpjm1 170 DO ji = 1, jpim1 171 pgru(ji,jj) = 0._wp 172 pgrv(ji,jj) = 0._wp ! depth of the partial step level 173 END DO 174 END DO 175 !$OMP END DO NOWAIT 165 176 !$OMP DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv) 166 177 DO jj = 1, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRD/trdtra.F90
r7037 r7508 99 99 CASE( jptra_yad ) ; CALL trd_tra_adv( ptrd, pun, ptra, 'Y', trdty ) 100 100 CASE( jptra_zad ) ; CALL trd_tra_adv( ptrd, pun, ptra, 'Z', trdt ) 101 CASE( jptra_bbc, jptra_qsr ) ! qsr, bbc: on temperature only, send to trd_tra_mng 102 !$OMP PARALLEL WORKSHARE 103 trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 104 ztrds(:,:,:) = 0._wp 105 !$OMP END PARALLEL WORKSHARE 106 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 101 CASE( jptra_bbc, & ! qsr, bbc: on temperature only, send to trd_tra_mng 102 & jptra_qsr ) ; trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 103 ztrds(:,:,:) = 0._wp 104 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 107 105 CASE DEFAULT ! other trends: masked trends 108 !$OMP PARALLEL WORKSHARE109 106 trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) ! mask & store 110 !$OMP END PARALLEL WORKSHARE111 107 END SELECT 112 108 ! … … 128 124 CALL wrk_alloc( jpi, jpj, jpk, zwt, zws, ztrdt ) 129 125 ! 130 !$OMP PARALLEL WORKSHARE131 126 zwt(:,:, 1 ) = 0._wp ; zws(:,:, 1 ) = 0._wp ! vertical diffusive fluxes 132 127 zwt(:,:,jpk) = 0._wp ; zws(:,:,jpk) = 0._wp 133 !$OMP END PARALLEL WORKSHARE134 !$OMP PARALLEL DO schedule(static) private(jk)135 128 DO jk = 2, jpk 136 129 zwt(:,:,jk) = avt(:,:,jk) * ( tsa(:,:,jk-1,jp_tem) - tsa(:,:,jk,jp_tem) ) / e3w_n(:,:,jk) * tmask(:,:,jk) … … 138 131 END DO 139 132 ! 140 !$OMP PARALLEL WORKSHARE141 133 ztrdt(:,:,jpk) = 0._wp ; ztrds(:,:,jpk) = 0._wp 142 !$OMP END PARALLEL WORKSHARE143 !$OMP PARALLEL DO schedule(static) private(jk)144 134 DO jk = 1, jpkm1 145 135 ztrdt(:,:,jk) = ( zwt(:,:,jk) - zwt(:,:,jk+1) ) / e3t_n(:,:,jk) … … 151 141 ! 152 142 CASE DEFAULT ! other trends: mask and send T & S trends to trd_tra_mng 153 !$OMP PARALLEL WORKSHARE154 143 ztrds(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 155 !$OMP END PARALLEL WORKSHARE156 144 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 157 145 END SELECT … … 166 154 CASE( jptra_zad ) ; CALL trd_tra_adv( ptrd , pun , ptra, 'Z', ztrds ) 167 155 CASE DEFAULT ! other trends: just masked 168 !$OMP PARALLEL WORKSHARE169 156 ztrds(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 170 !$OMP END PARALLEL WORKSHARE171 157 END SELECT 172 158 ! ! send trend to trd_trc -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfbfr.F90
r6748 r7508 112 112 IF ( ln_loglayer.AND. .NOT.ln_linssh ) THEN ! "log layer" bottom friction coefficient 113 113 114 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp)114 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbt,ztmp) 115 115 DO jj = 1, jpj 116 116 DO ji = 1, jpi … … 124 124 ! (ISF) 125 125 IF ( ln_isfcav ) THEN 126 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp)126 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbt,ztmp) 127 127 DO jj = 1, jpj 128 128 DO ji = 1, jpi … … 137 137 ! 138 138 ELSE 139 !$OMP PARALLEL WORKSHARE 140 zbfrt(:,:) = bfrcoef2d(:,:) 141 ztfrt(:,:) = tfrcoef2d(:,:) 142 !$OMP END PARALLEL WORKSHARE 143 ENDIF 144 145 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv) 139 !$OMP PARALLEL DO schedule(static) private(jj,ji) 140 DO jj = 1, jpj 141 DO ji = 1, jpi 142 zbfrt(ji,jj) = bfrcoef2d(ji,jj) 143 ztfrt(ji,jj) = tfrcoef2d(ji,jj) 144 END DO 145 END DO 146 ENDIF 147 148 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv) 146 149 DO jj = 2, jpjm1 147 150 DO ji = 2, jpim1 … … 178 181 179 182 IF( ln_isfcav ) THEN 180 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv)183 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv) 181 184 DO jj = 2, jpjm1 182 185 DO ji = 2, jpim1 … … 272 275 CASE( 0 ) 273 276 IF(lwp) WRITE(numout,*) ' free-slip ' 274 !$OMP PARALLEL WORKSHARE 275 bfrua(:,:) = 0.e0 276 bfrva(:,:) = 0.e0 277 tfrua(:,:) = 0.e0 278 tfrva(:,:) = 0.e0 279 !$OMP END PARALLEL WORKSHARE 277 !$OMP PARALLEL DO schedule(static) private(jj,ji) 278 DO jj = 1, jpj 279 DO ji = 1, jpi 280 bfrua(ji,jj) = 0.e0 281 bfrva(ji,jj) = 0.e0 282 tfrua(ji,jj) = 0.e0 283 tfrva(ji,jj) = 0.e0 284 END DO 285 END DO 280 286 ! 281 287 CASE( 1 ) … … 304 310 ENDIF 305 311 ! 306 !$OMP PARALLEL WORKSHARE 307 bfrua(:,:) = - bfrcoef2d(:,:) 308 bfrva(:,:) = - bfrcoef2d(:,:) 309 !$OMP END PARALLEL WORKSHARE 312 !$OMP PARALLEL DO schedule(static) private(jj,ji) 313 DO jj = 1, jpj 314 DO ji = 1, jpi 315 bfrua(ji,jj) = - bfrcoef2d(ji,jj) 316 bfrva(ji,jj) = - bfrcoef2d(ji,jj) 317 END DO 318 END DO 310 319 ! 311 320 IF ( ln_isfcav ) THEN … … 320 329 ENDIF 321 330 ! 322 !$OMP PARALLEL WORKSHARE 323 tfrua(:,:) = - tfrcoef2d(:,:) 324 tfrva(:,:) = - tfrcoef2d(:,:) 325 !$OMP END PARALLEL WORKSHARE 331 !$OMP PARALLEL DO schedule(static) private(jj,ji) 332 DO jj = 1, jpj 333 DO ji = 1, jpi 334 tfrua(ji,jj) = - tfrcoef2d(ji,jj) 335 tfrva(ji,jj) = - tfrcoef2d(ji,jj) 336 END DO 337 END DO 326 338 END IF 327 339 ! … … 383 395 ! 384 396 IF( ln_loglayer.AND. ln_linssh ) THEN ! set "log layer" bottom friction once for all 385 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp)397 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbt,ztmp) 386 398 DO jj = 1, jpj 387 399 DO ji = 1, jpi … … 393 405 END DO 394 406 IF ( ln_isfcav ) THEN 395 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp)407 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbt,ztmp) 396 408 DO jj = 1, jpj 397 409 DO ji = 1, jpi … … 433 445 zmaxtfr = -1.e10_wp ! initialise tracker for maximum of bottom friction coefficient 434 446 ! 435 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zfru,zfrv,ictu,ictv,zminbfr,zmaxbfr,zmintfr,zmaxtfr)447 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv,zfru,zfrv,ictu,ictv,zminbfr,zmaxbfr,zmintfr,zmaxtfr) 436 448 DO jj = 2, jpjm1 437 449 DO ji = 2, jpim1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfddm.F90
r7037 r7508 245 245 !!---------------------------------------------------------------------- 246 246 INTEGER :: ios ! local integer 247 INTEGER :: ji, jj , jk ! dummy loop indices 247 248 !! 248 249 NAMELIST/namzdf_ddm/ rn_avts, rn_hsbfr … … 270 271 IF( zdf_ddm_alloc() /= 0 ) CALL ctl_stop( 'STOP', 'zdf_ddm_init : unable to allocate arrays' ) 271 272 ! ! initialization to masked Kz 272 !$OMP PARALLEL WORKSHARE 273 avs(:,:,:) = rn_avt0 * wmask(:,:,:) 274 !$OMP END PARALLEL WORKSHARE 273 !$OMP DO schedule(static) private(jk,jj,ji) 274 DO jk = 1, jpk 275 DO jj = 1, jpj 276 DO ji = 1, jpi 277 avs(ji,jj,jk) = rn_avt0 * wmask(ji,jj,jk) 278 END DO 279 END DO 280 END DO 275 281 ! 276 282 END SUBROUTINE zdf_ddm_init -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfevd.F90
r7037 r7508 68 68 CALL wrk_alloc( jpi,jpj,jpk, zavt_evd, zavm_evd ) 69 69 ! 70 !$OMP PARALLEL WORKSHARE 71 zavt_evd(:,:,:) = avt(:,:,:) ! set avt prior to evd application 72 !$OMP END PARALLEL WORKSHARE 70 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 71 DO jk = 1, jpk 72 DO jj = 1, jpj 73 DO ji = 1, jpi 74 zavt_evd(ji,jj,jk) = avt(ji,jj,jk) ! set avt prior to evd application 75 END DO 76 END DO 77 END DO 73 78 ! 74 79 SELECT CASE ( nn_evdm ) … … 77 82 ! 78 83 !$OMP PARALLEL 79 !$OMP WORKSHARE 80 zavm_evd(:,:,:) = avm(:,:,:) ! set avm prior to evd application 81 !$OMP END WORKSHARE 84 !$OMP DO schedule(static) private(jk, jj, ji) 85 DO jk = 1, jpk 86 DO jj = 1, jpj 87 DO ji = 1, jpi 88 zavm_evd(ji,jj,jk) = avm(ji,jj,jk) ! set avm prior to evd application 89 END DO 90 END DO 91 END DO 82 92 ! 83 93 !$OMP DO schedule(static) private(jk, jj, ji) … … 101 111 CALL lbc_lnk( avmu, 'U', 1. ) ; CALL lbc_lnk( avmv, 'V', 1. ) 102 112 ! 103 !$OMP PARALLEL WORKSHARE 104 zavm_evd(:,:,:) = avm(:,:,:) - zavm_evd(:,:,:) ! change in avm due to evd 105 !$OMP END PARALLEL WORKSHARE 113 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 114 DO jk = 1, jpk 115 DO jj = 1, jpj 116 DO ji = 1, jpi 117 zavm_evd(ji,jj,jk) = avm(ji,jj,jk) - zavm_evd(ji,jj,jk) ! change in avm due to evd 118 END DO 119 END DO 120 END DO 106 121 CALL iom_put( "avm_evd", zavm_evd ) ! output this change 107 122 ! … … 120 135 END SELECT 121 136 122 !$OMP PARALLEL WORKSHARE 123 zavt_evd(:,:,:) = avt(:,:,:) - zavt_evd(:,:,:) ! change in avt due to evd 124 !$OMP END PARALLEL WORKSHARE 137 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 138 DO jk = 1, jpk 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 zavt_evd(ji,jj,jk) = avt(ji,jj,jk) - zavt_evd(ji,jj,jk) ! change in avt due to evd 142 END DO 143 END DO 144 END DO 125 145 CALL iom_put( "avt_evd", zavt_evd ) ! output this change 126 146 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfmxl.F90
r7037 r7508 99 99 ! w-level of the mixing and mixed layers 100 100 !$OMP PARALLEL 101 !$OMP WORKSHARE 102 nmln(:,:) = nlb10 ! Initialization to the number of w ocean point 103 hmlp(:,:) = 0._wp ! here hmlp used as a dummy variable, integrating vertically N^2 104 !$OMP END WORKSHARE 101 !$OMP DO schedule(static) private(jj, ji) 102 DO jj = 1, jpj 103 DO ji = 1, jpi 104 nmln(ji,jj) = nlb10 ! Initialization to the number of w ocean point 105 hmlp(ji,jj) = 0._wp ! here hmlp used as a dummy variable, integrating vertically N^2 106 END DO 107 END DO 105 108 DO jk = nlb10, jpkm1 106 109 !$OMP DO schedule(static) private(jj, ji, ikt) … … 115 118 ! 116 119 ! w-level of the turbocline and mixing layer (iom_use) 117 !$OMP WORKSHARE 118 imld(:,:) = mbkt(:,:) + 1 ! Initialization to the number of w ocean point 119 !$OMP END WORKSHARE 120 !$OMP DO schedule(static) private(jj, ji) 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 imld(ji,jj) = mbkt(ji,jj) + 1 ! Initialization to the number of w ocean point 124 END DO 125 END DO 120 126 121 127 DO jk = jpkm1, nlb10, -1 ! from the bottom to nlb10 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftke.F90
r7037 r7508 171 171 !!---------------------------------------------------------------------- 172 172 INTEGER, INTENT(in) :: kt ! ocean time step 173 INTEGER :: jk, jj, ji 173 174 !!---------------------------------------------------------------------- 174 175 ! … … 179 180 ! 180 181 IF( kt /= nit000 ) THEN ! restore before value to compute tke 181 !$OMP PARALLEL WORKSHARE 182 avt (:,:,:) = avt_k (:,:,:) 183 avm (:,:,:) = avm_k (:,:,:) 184 avmu(:,:,:) = avmu_k(:,:,:) 185 avmv(:,:,:) = avmv_k(:,:,:) 186 !$OMP END PARALLEL WORKSHARE 182 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 183 DO jk = 1, jpk 184 DO jj = 1, jpj 185 DO ji = 1, jpi 186 avt (ji,jj,jk) = avt_k (ji,jj,jk) 187 avm (ji,jj,jk) = avm_k (ji,jj,jk) 188 avmu(ji,jj,jk) = avmu_k(ji,jj,jk) 189 avmv(ji,jj,jk) = avmv_k(ji,jj,jk) 190 END DO 191 END DO 192 END DO 187 193 ENDIF 188 194 ! … … 191 197 CALL tke_avn ! now avt, avm, avmu, avmv 192 198 ! 193 !$OMP PARALLEL WORKSHARE 194 avt_k (:,:,:) = avt (:,:,:) 195 avm_k (:,:,:) = avm (:,:,:) 196 avmu_k(:,:,:) = avmu(:,:,:) 197 avmv_k(:,:,:) = avmv(:,:,:) 198 !$OMP END PARALLEL WORKSHARE 199 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 200 DO jk = 1, jpk 201 DO jj = 1, jpj 202 DO ji = 1, jpi 203 avt_k (ji,jj,jk) = avt (ji,jj,jk) 204 avm_k (ji,jj,jk) = avm (ji,jj,jk) 205 avmu_k(ji,jj,jk) = avmu(ji,jj,jk) 206 avmv_k(ji,jj,jk) = avmv(ji,jj,jk) 207 END DO 208 END DO 209 END DO 199 210 ! 200 211 #if defined key_agrif … … 316 327 ! !* finite Langmuir Circulation depth 317 328 zcof = 0.5 * 0.016 * 0.016 / ( zrhoa * zcdrag ) 318 !$OMP WORKSHARE 319 imlc(:,:) = mbkt(:,:) + 1 ! Initialization to the number of w ocean point (=2 over land) 320 !$OMP END WORKSHARE 329 !$OMP DO schedule(static) private(jj,ji) 330 DO jj = 1, jpj 331 DO ji = 1, jpi 332 imlc(ji,jj) = mbkt(ji,jj) + 1 ! Initialization to the number of w ocean point (=2 over land) 333 END DO 334 END DO 321 335 DO jk = jpkm1, 2, -1 322 336 !$OMP DO schedule(static) private(jj, ji, zus) … … 584 598 ! 585 599 ! initialisation of interior minimum value (avoid a 2d loop with mikt) 586 !$OMP PARALLEL WORKSHARE 587 zmxlm(:,:,:) = rmxl_min 588 zmxld(:,:,:) = rmxl_min 589 !$OMP END PARALLEL WORKSHARE 600 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 601 DO jk = 1, jpk 602 DO jj = 1, jpj 603 DO ji = 1, jpi 604 zmxlm(ji,jj,jk) = rmxl_min 605 zmxld(ji,jj,jk) = rmxl_min 606 END DO 607 END DO 608 END DO 590 609 ! 591 610 IF( ln_mxl0 ) THEN ! surface mixing length = F(stress) : l=vkarmn*2.e5*taum/(rau0*g) … … 598 617 END DO 599 618 ELSE 600 !$OMP PARALLEL WORKSHARE 601 zmxlm(:,:,1) = rn_mxl0 602 !$OMP END PARALLEL WORKSHARE 619 !$OMP PARALLEL DO schedule(static) private(jj,ji) 620 DO jj = 1, jpj 621 DO ji = 1, jpi 622 zmxlm(ji,jj,1) = rn_mxl0 623 END DO 624 END DO 603 625 ENDIF 604 626 ! … … 616 638 ! !* Physical limits for the mixing length 617 639 ! 618 !$OMP WORKSHARE 619 zmxld(:,:, 1 ) = zmxlm(:,:,1) ! surface set to the minimum value 620 zmxld(:,:,jpk) = rmxl_min ! last level set to the minimum value 621 !$OMP END WORKSHARE NOWAIT 640 !$OMP DO schedule(static) private(jj,ji) 641 DO jj = 1, jpj 642 DO ji = 1, jpi 643 zmxld(ji,jj, 1 ) = zmxlm(ji,jj,1) ! surface set to the minimum value 644 zmxld(ji,jj,jpk) = rmxl_min ! last level set to the minimum value 645 END DO 646 END DO 622 647 !$OMP END PARALLEL 623 648 ! … … 708 733 ! 709 734 # if defined key_c1d 710 !$OMP PARALLEL WORKSHARE 711 e_dis(:,:,:) = zmxld(:,:,:) ! c1d configuration : save mixing and dissipation turbulent length scales 712 e_mix(:,:,:) = zmxlm(:,:,:) 713 !$OMP END PARALLEL WORKSHARE 735 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 736 DO jk = 1, jpk 737 DO jj = 1, jpj 738 DO ji = 1, jpi 739 e_dis(ji,jj,jk) = zmxld(ji,jj,jk) ! c1d configuration : save mixing and dissipation turbulent length scales 740 e_mix(ji,jj,jk) = zmxlm(ji,jj,jk) 741 END DO 742 END DO 743 END DO 714 744 # endif 715 745 … … 878 908 END DO 879 909 !$OMP END DO NOWAIT 880 !$OMP WORKSHARE 881 dissl(:,:,:) = 1.e-12_wp 882 !$OMP END WORKSHARE 910 !$OMP DO schedule(static) private(jk,jj,ji) 911 DO jk = 1, jpk 912 DO jj = 1, jpj 913 DO ji = 1, jpi 914 dissl(ji,jj,jk) = 1.e-12_wp 915 END DO 916 END DO 917 END DO 883 918 !$OMP END PARALLEL 884 919 ! … … 901 936 CHARACTER(len=*), INTENT(in) :: cdrw ! "READ"/"WRITE" flag 902 937 ! 903 INTEGER :: jit, jk ! dummy loop indices938 INTEGER :: jit, jk, jj, ji ! dummy loop indices 904 939 INTEGER :: id1, id2, id3, id4, id5, id6 ! local integers 905 940 !!---------------------------------------------------------------------- … … 928 963 ELSE ! No TKE array found: initialisation 929 964 IF(lwp) WRITE(numout,*) ' ===>>>> : previous run without tke scheme, en computed by iterative loop' 930 !$OMP PARALLEL WORKSHARE 931 en (:,:,:) = rn_emin * tmask(:,:,:) 932 !$OMP END PARALLEL WORKSHARE 965 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 966 DO jk = 1, jpk 967 DO jj = 1, jpj 968 DO ji = 1, jpi 969 en (ji,jj,jk) = rn_emin * tmask(ji,jj,jk) 970 END DO 971 END DO 972 END DO 933 973 CALL tke_avn ! recompute avt, avm, avmu, avmv and dissl (approximation) 934 974 ! 935 !$OMP PARALLEL WORKSHARE 936 avt_k (:,:,:) = avt (:,:,:) 937 avm_k (:,:,:) = avm (:,:,:) 938 avmu_k(:,:,:) = avmu(:,:,:) 939 avmv_k(:,:,:) = avmv(:,:,:) 940 !$OMP END PARALLEL WORKSHARE 975 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 976 DO jk = 1, jpk 977 DO jj = 1, jpj 978 DO ji = 1, jpi 979 avt_k (ji,jj,jk) = avt (ji,jj,jk) 980 avm_k (ji,jj,jk) = avm (ji,jj,jk) 981 avmu_k(ji,jj,jk) = avmu(ji,jj,jk) 982 avmv_k(ji,jj,jk) = avmv(ji,jj,jk) 983 END DO 984 END DO 985 END DO 941 986 ! 942 987 DO jit = nit000 + 1, nit000 + 10 ; CALL zdf_tke( jit ) ; END DO … … 944 989 ELSE !* Start from rest 945 990 !$OMP PARALLEL 946 !$OMP WORKSHARE 947 en(:,:,:) = rn_emin * tmask(:,:,:) 948 !$OMP END WORKSHARE NOWAIT 991 !$OMP DO schedule(static) private(jk,jj,ji) 992 DO jk = 1, jpk 993 DO jj = 1, jpj 994 DO ji = 1, jpi 995 en(ji,jj,jk) = rn_emin * tmask(ji,jj,jk) 996 END DO 997 END DO 998 END DO 999 !$OMP END DO NOWAIT 949 1000 !$OMP DO schedule(static) private(jk) 950 1001 DO jk = 1, jpk ! set the Kz to the background value -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftmx.F90
r7037 r7508 132 132 !$OMP END DO NOWAIT 133 133 134 !$OMP WORKSHARE 135 zkz(:,:) = 0.e0 !* Associated potential energy consummed over the whole water column 136 !$OMP END WORKSHARE 134 !$OMP DO schedule(static) private(jj, ji) 135 DO jj = 1, jpj 136 DO ji = 1, jpi 137 zkz(ji,jj) = 0.e0 !* Associated potential energy consummed over the whole water column 138 END DO 139 END DO 137 140 DO jk = 2, jpkm1 138 141 !$OMP DO schedule(static) private(jj, ji) … … 256 259 ! ! compute the form function using N2 at each time step 257 260 !$OMP PARALLEL 258 !$OMP WORKSHARE 259 zempba_3d_1(:,:,jpk) = 0.e0 260 zempba_3d_2(:,:,jpk) = 0.e0 261 !$OMP END WORKSHARE 261 !$OMP DO schedule(static) private(jj, ji) 262 DO jj = 1, jpj 263 DO ji = 1, jpi 264 zempba_3d_1(ji,jj,jpk) = 0.e0 265 zempba_3d_2(ji,jj,jpk) = 0.e0 266 END DO 267 END DO 262 268 !$OMP DO schedule(static) private(jk) 263 269 DO jk = 1, jpkm1 … … 268 274 !$OMP END DO NOWAIT 269 275 ! 270 !$OMP WORKSHARE 271 zsum (:,:) = 0.e0 272 zsum1(:,:) = 0.e0 273 zsum2(:,:) = 0.e0 274 !$OMP END WORKSHARE 276 !$OMP DO schedule(static) private(jj, ji) 277 DO jj = 1, jpj 278 DO ji = 1, jpi 279 zsum (ji,jj) = 0.e0 280 zsum1(ji,jj) = 0.e0 281 zsum2(ji,jj) = 0.e0 282 END DO 283 END DO 275 284 DO jk= 2, jpk 276 285 !$OMP DO schedule(static) private(jj,ji) … … 322 331 END DO 323 332 324 !$OMP WORKSHARE 325 zkz(:,:) = 0.e0 ! Associated potential energy consummed over the whole water column 326 !$OMP END WORKSHARE 333 !$OMP DO schedule(static) private(jj, ji) 334 DO jj = 1, jpj 335 DO ji = 1, jpi 336 zkz(ji,jj) = 0.e0 ! Associated potential energy consummed over the whole water column 337 END DO 338 END DO 327 339 DO jk = 2, jpkm1 328 340 !$OMP DO schedule(static) private(jj,ji) … … 478 490 !$OMP PARALLEL 479 491 480 !$OMP WORKSHARE 481 en_tmx(:,:) = - rn_tfe * rn_me * ( zem2(:,:) * 1.25 + zek1(:,:) ) * ssmask(:,:) 482 !$OMP END WORKSHARE 492 !$OMP DO schedule(static) private(jj, ji) 493 DO jj = 1, jpj 494 DO ji = 1, jpi 495 en_tmx(ji,jj) = - rn_tfe * rn_me * ( zem2(ji,jj) * 1.25 + zek1(ji,jj) ) * ssmask(ji,jj) 496 END DO 497 END DO 483 498 484 499 !============ … … 513 528 ztpc = 0._wp 514 529 !$OMP PARALLEL 515 !$OMP WORKSHARE 516 zav_tide(:,:,:) = 0.e0 517 !$OMP END WORKSHARE 530 !$OMP DO schedule(static) private(jk, jj, ji) 531 DO jk = 1, jpk 532 DO jj = 1, jpj 533 DO ji = 1, jpi 534 zav_tide(ji,jj,jk) = 0.e0 535 END DO 536 END DO 537 END DO 518 538 !$OMP DO schedule(static) private(jk) 519 539 DO jk = 2, jpkm1 … … 583 603 END DO 584 604 END DO 585 !$OMP SINGLE605 !$OMP END PARALLEL 586 606 WRITE(numout,*) ' Min de zkz ', ztpc, ' Max = ', maxval(zkz(:,:) ) 587 !$OMP END SINGLE607 !$OMP PARALLEL 588 608 ! 589 609 !$OMP DO schedule(static) private(jk) … … 845 865 846 866 !$OMP PARALLEL 847 !$OMP WORKSHARE 848 zfact(:,:) = 0._wp 849 !$OMP END WORKSHARE 867 !$OMP DO schedule(static) private(jj, ji) 868 DO jj = 1, jpj 869 DO ji = 1, jpi 870 zfact(ji,jj) = 0._wp 871 END DO 872 END DO 850 873 DO jk = 2, jpkm1 ! part independent of the level 851 874 !$OMP DO schedule(static) private(jj,ji) … … 874 897 875 898 !$OMP PARALLEL 876 !$OMP WORKSHARE 877 zfact(:,:) = 0._wp 878 !$OMP END WORKSHARE 899 !$OMP DO schedule(static) private(jj, ji) 900 DO jj = 1, jpj 901 DO ji = 1, jpi 902 zfact(ji,jj) = 0._wp 903 END DO 904 END DO 905 879 906 DO jk = 2, jpkm1 880 907 !$OMP DO schedule(static) private(jj,ji) … … 906 933 907 934 !$OMP PARALLEL 908 !$OMP WORKSHARE 909 zwkb(:,:,:) = 0._wp 910 zfact(:,:) = 0._wp 911 !$OMP END WORKSHARE 935 !$OMP DO schedule(static) private(jk,jj,ji) 936 DO jk = 1, jpk 937 DO jj = 1, jpj 938 DO ji = 1, jpi 939 zwkb(ji,jj,jk) = 0._wp 940 END DO 941 END DO 942 END DO 943 !$OMP DO schedule(static) private(jj,ji) 944 DO jj = 1, jpj 945 DO ji = 1, jpi 946 zfact(ji,jj) = 0._wp 947 END DO 948 END DO 912 949 DO jk = 2, jpkm1 913 950 !$OMP DO schedule(static) private(jj,ji) … … 929 966 END DO 930 967 END DO 931 !$OMP WORKSHARE 932 zwkb(:,:,1) = zhdep(:,:) * tmask(:,:,1) 933 934 zweight(:,:,:) = 0._wp 935 !$OMP END WORKSHARE 968 969 !$OMP DO schedule(static) private(jj, ji) 970 DO jj = 1, jpj 971 DO ji = 1, jpi 972 zwkb(ji,jj,1) = zhdep(ji,jj) * tmask(ji,jj,1) 973 END DO 974 END DO 975 !$OMP END DO NOWAIT 976 !$OMP DO schedule(static) private(jk,jj,ji) 977 DO jk = 1, jpk 978 DO jj = 1, jpj 979 DO ji = 1, jpi 980 zweight(ji,jj,jk) = 0._wp 981 END DO 982 END DO 983 END DO 984 936 985 !$OMP DO schedule(static) private(jk) 937 986 DO jk = 2, jpkm1 … … 940 989 END DO 941 990 942 !$OMP WORKSHARE 943 zfact(:,:) = 0._wp 944 !$OMP END WORKSHARE 991 !$OMP DO schedule(static) private(jj, ji) 992 DO jj = 1, jpj 993 DO ji = 1, jpi 994 zfact(ji,jj) = 0._wp 995 END DO 996 END DO 997 945 998 DO jk = 2, jpkm1 ! part independent of the level 946 999 !$OMP DO schedule(static) private(jj,ji) … … 968 1021 969 1022 ! Calculate molecular kinematic viscosity 970 !$OMP WORKSHARE 971 znu_t(:,:,:) = 1.e-4_wp * ( 17.91_wp - 0.53810_wp * tsn(:,:,:,jp_tem) + 0.00694_wp * tsn(:,:,:,jp_tem) * tsn(:,:,:,jp_tem) & 972 & + 0.02305_wp * tsn(:,:,:,jp_sal) ) * tmask(:,:,:) * r1_rau0 973 !$OMP END WORKSHARE 1023 !$OMP DO schedule(static) private(jj, ji) 1024 DO jj = 1, jpj 1025 DO ji = 1, jpi 1026 znu_t(ji,jj,jk) = 1.e-4_wp * ( 17.91_wp - 0.53810_wp * tsn(ji,jj,jk,jp_tem) + 0.00694_wp * tsn(ji,jj,jk,jp_tem) * tsn(ji,jj,jk,jp_tem) & 1027 & + 0.02305_wp * tsn(ji,jj,jk,jp_sal) ) * tmask(ji,jj,jk) * r1_rau0 1028 END DO 1029 END DO 974 1030 !$OMP DO schedule(static) private(jk) 975 1031 DO jk = 2, jpkm1 … … 1094 1150 END DO 1095 1151 END DO 1096 !$OMP END DO NOWAII 1097 !$OMP WORKSHARE 1098 pcmap_tmx(:,:) = 0._wp 1099 !$OMP END WORKSHARE 1100 !$OMP PARALLEL DO schedule(static) private(jk) 1101 DO jk = 2, jpkm1 1102 pcmap_tmx(:,:) = pcmap_tmx(:,:) + e3w_n(:,:,jk) * bflx_tmx(:,:,jk) * wmask(:,:,jk) 1103 END DO 1104 !$OMP WORKSHARE 1105 pcmap_tmx(:,:) = rau0 * pcmap_tmx(:,:) 1106 !$OMP END WORKSHARE NOWAIT 1152 !$OMP END DO NOWAIT 1153 !$OMP DO schedule(static) private(jj, ji) 1154 DO jj = 1, jpj 1155 DO ji = 1, jpi 1156 pcmap_tmx(ji,jj) = 0._wp 1157 END DO 1158 END DO 1159 DO jk = 2, jpkm1 1160 !$OMP DO schedule(static) private(jj, ji) 1161 DO jj = 1, jpj 1162 DO ji = 1, jpi 1163 pcmap_tmx(ji,jj) = pcmap_tmx(ji,jj) + e3w_n(ji,jj,jk) * bflx_tmx(ji,jj,jk) * wmask(ji,jj,jk) 1164 END DO 1165 END DO 1166 END DO 1167 !$OMP DO schedule(static) private(jj, ji) 1168 DO jj = 1, jpj 1169 DO ji = 1, jpi 1170 pcmap_tmx(ji,jj) = rau0 * pcmap_tmx(ji,jj) 1171 END DO 1172 END DO 1107 1173 !$OMP END PARALLEL 1108 1174 CALL iom_put( "bflx_tmx", bflx_tmx ) … … 1180 1246 ! ensures that avt remains larger than its molecular value (=1.4e-7). Therefore, avtb should 1181 1247 ! be set here to a very small value, and avmb to its (uniform) molecular value (=1.4e-6). 1182 !$OMP PARALLEL WORKSHARE1183 1248 avmb(:) = 1.4e-6_wp ! viscous molecular value 1184 1249 avtb(:) = 1.e-10_wp ! very small diffusive minimum (background avt is specified in zdf_tmx) 1185 avtb_2d(:,:) = 1.e0_wp ! uniform 1186 !$OMP END PARALLEL WORKSHARE 1250 1251 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1252 DO jj = 1, jpj 1253 DO ji = 1, jpi 1254 avtb_2d(ji,jj) = 1.e0_wp ! uniform 1255 END DO 1256 END DO 1257 1187 1258 IF(lwp) THEN ! Control print 1188 1259 WRITE(numout,*) … … 1217 1288 CALL iom_close(inum) 1218 1289 1219 !$OMP PARALLEL WORKSHARE 1220 ebot_tmx(:,:) = ebot_tmx(:,:) * ssmask(:,:) 1221 epyc_tmx(:,:) = epyc_tmx(:,:) * ssmask(:,:) 1222 ecri_tmx(:,:) = ecri_tmx(:,:) * ssmask(:,:) 1223 1224 ! Set once for all to zero the first and last vertical levels of appropriate variables 1225 emix_tmx (:,:, 1 ) = 0._wp 1226 emix_tmx (:,:,jpk) = 0._wp 1227 zav_ratio(:,:, 1 ) = 0._wp 1228 zav_ratio(:,:,jpk) = 0._wp 1229 zav_wave (:,:, 1 ) = 0._wp 1230 zav_wave (:,:,jpk) = 0._wp 1231 !$OMP END PARALLEL WORKSHARE 1290 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1291 DO jj = 1, jpj 1292 DO ji = 1, jpi 1293 ebot_tmx(ji,jj) = ebot_tmx(ji,jj) * ssmask(ji,jj) 1294 epyc_tmx(ji,jj) = epyc_tmx(ji,jj) * ssmask(ji,jj) 1295 ecri_tmx(ji,jj) = ecri_tmx(ji,jj) * ssmask(ji,jj) 1296 1297 ! Set once for all to zero the first and last vertical levels of appropriate variables 1298 emix_tmx (ji,jj, 1 ) = 0._wp 1299 emix_tmx (ji,jj,jpk) = 0._wp 1300 zav_ratio(ji,jj, 1 ) = 0._wp 1301 zav_ratio(ji,jj,jpk) = 0._wp 1302 zav_wave (ji,jj, 1 ) = 0._wp 1303 zav_wave (ji,jj,jpk) = 0._wp 1304 END DO 1305 END DO 1232 1306 1233 1307 zbot = glob_sum( e1e2t(:,:) * ebot_tmx(:,:) ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/lib_fortran.F90
r6140 r7508 103 103 !!----------------------------------------------------------------------- 104 104 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 105 REAL(wp) :: glob_sum_3d ! global masked sum106 !! 107 INTEGER :: jk 105 REAL(wp) :: glob_sum_3d, a, b ! global masked sum 106 !! 107 INTEGER :: jk, jj, ji 108 108 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 109 109 !!----------------------------------------------------------------------- … … 112 112 ! 113 113 glob_sum_3d = 0.e0 114 !$OMP PARALLEL 115 !$OMP DO schedule(static) private(jk,jj,ji,b) REDUCTION(+:a,glob_sum_3d) 114 116 DO jk = 1, ijpk 115 glob_sum_3d = glob_sum_3d + SUM( ptab(:,:,jk)*tmask_i(:,:) ) 116 END DO 117 a = 0.e0 118 b = 0.e0 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 b = ptab(ji,jj,jk)*tmask_i(ji,jj) 122 a = a + b 123 END DO 124 END DO 125 glob_sum_3d = glob_sum_3d + a 126 END DO 127 !$OMP END PARALLEL 128 117 129 IF( lk_mpp ) CALL mpp_sum( glob_sum_3d ) 118 130 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/step.F90
r6748 r7508 73 73 !! -8- Outputs and diagnostics 74 74 !!---------------------------------------------------------------------- 75 INTEGER :: jk ! dummy loop indice75 INTEGER :: jk, jj, ji, jt ! dummy loop indice 76 76 INTEGER :: indic ! error indicator if < 0 77 77 INTEGER :: kcall ! optional integer argument (dom_vvl_sf_nxt) … … 132 132 IF( lk_zdfgls ) CALL zdf_gls( kstp ) ! GLS closure scheme for Kz 133 133 IF( lk_zdfcst ) THEN ! Constant Kz (reset avt, avm[uv] to the background value) 134 !$OMP PARALLEL WORKSHARE 135 avt (:,:,:) = rn_avt0 * wmask (:,:,:) 136 avmu(:,:,:) = rn_avm0 * wumask(:,:,:) 137 avmv(:,:,:) = rn_avm0 * wvmask(:,:,:) 138 !$OMP END PARALLEL WORKSHARE 134 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 135 DO jk = 1, jpk 136 DO jj = 1, jpj 137 DO ji = 1, jpi 138 avt (ji,jj,jk) = rn_avt0 * wmask (ji,jj,jk) 139 avmu(ji,jj,jk) = rn_avm0 * wumask(ji,jj,jk) 140 avmv(ji,jj,jk) = rn_avm0 * wvmask(ji,jj,jk) 141 END DO 142 END DO 143 END DO 139 144 ENDIF 140 145 … … 197 202 & rhd, gru , grv , grui, grvi ) ! of t, s, rd at the first ocean level 198 203 !!jc: fs simplification 199 !$OMP PARALLEL WORKSHARE 200 ua(:,:,:) = 0._wp ! set dynamics trends to zero 201 va(:,:,:) = 0._wp 202 !$OMP END PARALLEL WORKSHARE 204 !$OMP PARALLEL 205 !$OMP DO schedule(static) private(jk, jj, ji) 206 DO jk = 1, jpk 207 DO jj = 1, jpj 208 DO ji = 1, jpi 209 ua(ji,jj,jk) = 0._wp ! set dynamics trends to zero 210 va(ji,jj,jk) = 0._wp 211 END DO 212 END DO 213 END DO 214 !$OMP END DO NOWAIT 215 !$OMP DO schedule(static) private(jt, jk, jj, ji) 216 DO jt = 1, jpts 217 DO jk = 1, jpk 218 DO jj = 1, jpj 219 DO ji = 1, jpi 220 tsa(ji,jj,jk,jt) = 0._wp ! set tracer trends to zero 221 END DO 222 END DO 223 END DO 224 END DO 225 !$OMP END PARALLEL 203 226 IF( lk_asminc .AND. ln_asmiau .AND. ln_dyninc ) & 204 227 & CALL dyn_asm_inc ( kstp ) ! apply dynamics assimilation increment … … 253 276 ! Active tracers 254 277 !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 255 !$OMP PARALLEL WORKSHARE256 tsa(:,:,:,:) = 0._wp ! set tracer trends to zero257 !$OMP END PARALLEL WORKSHARE258 278 IF( lk_asminc .AND. ln_asmiau .AND. & 259 279 & ln_trainc ) CALL tra_asm_inc ( kstp ) ! apply tracer assimilation increment -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zbio.F90
r7037 r7508 63 63 64 64 !$OMP PARALLEL 65 !$OMP WORKSHARE 66 xdiss(:,:,:) = 1. 67 !$OMP END WORKSHARE 65 !$OMP DO schedule(static) private(jk,jj,ji) 66 DO jk = 1, jpk 67 DO jj = 1, jpj 68 DO ji = 1, jpi 69 xdiss(ji,jj,jk) = 1. 70 END DO 71 END DO 72 END DO 68 73 !!gm the use of nmld should be better here? 69 74 !$OMP DO schedule(static) private(jk,jj,ji) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zfechem.F90
r7037 r7508 83 83 CALL wrk_alloc( jpi,jpj,jpk, zFe3, zFeL1, zTL1, ztotlig ) 84 84 85 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 86 DO jk = 1, jpk 87 DO jj = 1, jpj 88 DO ji = 1, jpi 89 zFe3 (ji,jj,jk) = 0. 90 zFeL1(ji,jj,jk) = 0. 91 zTL1 (ji,jj,jk) = 0. 92 END DO 93 END DO 94 END DO 85 95 ! Total ligand concentration : Ligands can be chosen to be constant or variable 86 96 ! Parameterization from Tagliabue and Voelker (2011) … … 91 101 DO jj = 1, jpj 92 102 DO ji = 1, jpi 93 zFe3 (ji,jj,jk) = 0.94 zFeL1(ji,jj,jk) = 0.95 zTL1 (ji,jj,jk) = 0.96 103 ztotlig(ji,jj,jk) = 0.09 * trb(ji,jj,jk,jpdoc) * 1E6 + ligand * 1E9 97 104 ztotlig(ji,jj,jk) = MIN( ztotlig(ji,jj,jk), 10. ) … … 100 107 END DO 101 108 ELSE 102 !$OMP PARALLEL WORKSHARE 103 ztotlig(:,:,:) = ligand * 1E9 104 zFe3 (:,:,:) = 0. 105 zFeL1(:,:,:) = 0. 106 zTL1 (:,:,:) = 0. 107 !$OMP END PARALLEL WORKSHARE 109 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 110 DO jk = 1, jpk 111 DO jj = 1, jpj 112 DO ji = 1, jpi 113 ztotlig(ji,jj,jk) = ligand * 1E9 114 END DO 115 END DO 116 END DO 108 117 ENDIF 109 118 … … 116 125 CALL wrk_alloc( jpi,jpj,jpk, zFe2, zFeL2, zTL2, zFeP ) 117 126 !$OMP PARALLEL 118 !$OMP WORKSHARE 119 zFe2 (:,:,:) = 0. 120 zFeL2(:,:,:) = 0. 121 zTL2 (:,:,:) = 0. 122 zFeP (:,:,:) = 0. 123 !$OMP END WORKSHARE 127 !$OMP DO schedule(static) private(jk,jj,ji) 128 DO jk = 1, jpk 129 DO jj = 1, jpj 130 DO ji = 1, jpi 131 zFe2 (ji,jj,jk) = 0. 132 zFeL2(ji,jj,jk) = 0. 133 zTL2 (ji,jj,jk) = 0. 134 zFeP (ji,jj,jk) = 0. 135 END DO 136 END DO 137 END DO 124 138 !$OMP DO schedule(static) private(jk,jj,ji,ztligand,zionic,zph,zoxy,zkox,zkph2,zkph1,ztfe,za,zb,zc,zkappa1,zkappa2,za2,za1,za0,zp,zq,zp3,zq2,zd,zr,zphi,zxs,zfff,zfunc) 125 139 DO jk = 1, jpkm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zint.F90
r7037 r7508 71 71 ! 72 72 IF( nday_year == nyear_len(1) ) THEN 73 !$OMP WORKSHARE 74 xksi (:,:) = xksimax(:,:) 75 xksimax(:,:) = 0._wp 76 !$OMP END WORKSHARE NOWAIT 73 !$OMP DO schedule(static) private(jj,ji) 74 DO jj = 1, jpj 75 DO ji = 1, jpi 76 xksi (ji,jj) = xksimax(ji,jj) 77 xksimax(ji,jj) = 0._wp 78 END DO 79 END DO 77 80 ENDIF 78 81 !$OMP END PARALLEL -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zlys.F90
r7037 r7508 74 74 75 75 !$OMP PARALLEL 76 !$OMP WORKSHARE 77 zco3 (:,:,:) = 0. 78 zcaldiss(:,:,:) = 0. 79 !$OMP END WORKSHARE 76 !$OMP DO schedule(static) private(jk, jj, ji) 77 DO jk = 1, jpk 78 DO jj = 1, jpj 79 DO ji = 1, jpi 80 zco3 (ji,jj,jk) = 0. 81 zcaldiss(ji,jj,jk) = 0. 82 END DO 83 END DO 84 END DO 80 85 ! ------------------------------------------- 81 86 ! COMPUTE [CO3--] and [H+] CONCENTRATIONS -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmeso.F90
r7037 r7508 89 89 IF( lk_iomput ) THEN 90 90 CALL wrk_alloc( jpi, jpj, jpk, zgrazing ) 91 !$OMP PARALLEL WORKSHARE 92 zgrazing(:,:,:) = 0._wp 93 !$OMP END PARALLEL WORKSHARE 91 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 92 DO jk = 1, jpk 93 DO jj = 1, jpj 94 DO ji = 1, jpi 95 zgrazing(ji,jj,jk) = 0._wp 96 END DO 97 END DO 98 END DO 94 99 ENDIF 95 100 … … 252 257 CALL wrk_alloc( jpi, jpj, jpk, zw3d ) 253 258 IF( iom_use( "GRAZ2" ) ) THEN 254 !$OMP PARALLEL WORKSHARE 255 zw3d(:,:,:) = zgrazing(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Total grazing of phyto by zooplankton 256 !$OMP END PARALLEL WORKSHARE 259 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 260 DO jk = 1, jpk 261 DO jj = 1, jpj 262 DO ji = 1, jpi 263 zw3d(ji,jj,jk) = zgrazing(ji,jj,jk) * 1.e+3 * rfact2r * tmask(ji,jj,jk) ! Total grazing of phyto by zooplankton 264 END DO 265 END DO 266 END DO 257 267 CALL iom_put( "GRAZ2", zw3d ) 258 268 ENDIF 259 269 IF( iom_use( "PCAL" ) ) THEN 260 !$OMP PARALLEL WORKSHARE 261 zw3d(:,:,:) = prodcal(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Calcite production 262 !$OMP END PARALLEL WORKSHARE 270 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 271 DO jk = 1, jpk 272 DO jj = 1, jpj 273 DO ji = 1, jpi 274 zw3d(ji,jj,jk) = prodcal(ji,jj,jk) * 1.e+3 * rfact2r * tmask(ji,jj,jk) ! Calcite production 275 END DO 276 END DO 277 END DO 263 278 CALL iom_put( "PCAL", zw3d ) 264 279 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmicro.F90
r7037 r7508 192 192 CALL wrk_alloc( jpi, jpj, jpk, zw3d ) 193 193 IF( iom_use( "GRAZ1" ) ) THEN 194 !$OMP PARALLEL WORKSHARE 195 zw3d(:,:,:) = zgrazing(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Total grazing of phyto by zooplankton 196 !$OMP END PARALLEL WORKSHARE 194 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 195 DO jk = 1, jpk 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 zw3d(ji,jj,jk) = zgrazing(ji,jj,jk) * 1.e+3 * rfact2r * tmask(ji,jj,jk) ! Total grazing of phyto by zooplankton 199 END DO 200 END DO 201 END DO 197 202 CALL iom_put( "GRAZ1", zw3d ) 198 203 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmort.F90
r7037 r7508 80 80 ! 81 81 !$OMP PARALLEL 82 !$OMP WORKSHARE 83 prodcal(:,:,:) = 0. !: calcite production variable set to zero 84 !$OMP END WORKSHARE 82 !$OMP DO schedule(static) private(jk,jj,ji) 83 DO jk = 1, jpk 84 DO jj = 1, jpj 85 DO ji = 1, jpi 86 prodcal(ji,jj,jk) = 0. !: calcite production variable set to zero 87 END DO 88 END DO 89 END DO 85 90 !$OMP DO schedule(static) private(jk,jj,ji,zcompaph,zstep,zsizerat,zrespp,ztortp,zmortp,zfactfe,zfactch,zprcaca,zfracal) 86 91 DO jk = 1, jpkm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zopt.F90
r7037 r7508 89 89 ! ----------------------------------------------- 90 90 !$OMP PARALLEL 91 !$OMP WORKSHARE 92 ze1(:,:,:) = 0._wp 93 ze2(:,:,:) = 0._wp 94 ze3(:,:,:) = 0._wp 95 !$OMP END WORKSHARE NOWAIT 91 !$OMP DO schedule(static) private(jk,jj,ji) 92 DO jk = 1, jpk 93 DO jj = 1, jpj 94 DO ji = 1, jpi 95 ze1(ji,jj,jk) = 0._wp 96 ze2(ji,jj,jk) = 0._wp 97 ze3(ji,jj,jk) = 0._wp 98 END DO 99 END DO 100 END DO 101 !$OMP END DO NOWAIT 96 102 ! !* attenuation coef. function of Chlorophyll and wavelength (Red-Green-Blue) 97 103 !$OMP DO schedule(static) private(jk,jj,ji,zchl,irgb) … … 115 121 IF( l_trcdm2dc ) THEN ! diurnal cycle 116 122 ! 1% of qsr to compute euphotic layer 117 !$OMP PARALLEL WORKSHARE 118 zqsr100(:,:) = 0.01 * qsr_mean(:,:) ! daily mean qsr 119 !$OMP END PARALLEL WORKSHARE 123 !$OMP PARALLEL DO schedule(static) private(jj,ji) 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 zqsr100(ji,jj) = 0.01 * qsr_mean(ji,jj) ! daily mean qsr 127 END DO 128 END DO 120 129 ! 121 130 CALL p4z_opt_par( kt, qsr_mean, ze1, ze2, ze3 ) … … 137 146 ELSE 138 147 ! 1% of qsr to compute euphotic layer 139 !$OMP PARALLEL WORKSHARE 140 zqsr100(:,:) = 0.01 * qsr(:,:) 141 !$OMP END PARALLEL WORKSHARE 148 !$OMP PARALLEL DO schedule(static) private(jj,ji) 149 DO jj = 1, jpj 150 DO ji = 1, jpi 151 zqsr100(ji,jj) = 0.01 * qsr(ji,jj) 152 END DO 153 END DO 142 154 ! 143 155 CALL p4z_opt_par( kt, qsr, ze1, ze2, ze3 ) … … 150 162 ediat(:,:,jk) = 1.6 * ze1(:,:,jk) + 0.69 * ze2(:,:,jk) + 0.7 * ze3(:,:,jk) 151 163 END DO 152 !$OMP WORKSHARE 153 etot_ndcy(:,:,:) = etot(:,:,:) 154 !$OMP END WORKSHARE NOWAIT 155 !$OMP END PARALLEL 156 ENDIF 157 164 !$OMP DO schedule(static) private(jk,jj,ji) 165 DO jk = 1, jpk 166 DO jj = 1, jpj 167 DO ji = 1, jpi 168 etot_ndcy(ji,jj,jk) = etot(ji,jj,jk) 169 END DO 170 END DO 171 END DO 172 !$OMP END PARALLEL 173 ENDIF 158 174 159 175 IF( ln_qsr_bio ) THEN !* heat flux accros w-level (used in the dynamics) … … 162 178 ! 163 179 !$OMP PARALLEL 164 !$OMP WORKSHARE 165 etot3(:,:,1) = qsr(:,:) * tmask(:,:,1) 166 !$OMP END WORKSHARE 180 !$OMP DO schedule(static) private(jj,ji) 181 DO jj = 1, jpj 182 DO ji = 1, jpi 183 etot3(ji,jj,1) = qsr(ji,jj) * tmask(ji,jj,1) 184 END DO 185 END DO 167 186 !$OMP DO schedule(static) private(jk) 168 187 DO jk = 2, nksrp + 1 169 188 etot3(:,:,jk) = ( ze0(:,:,jk) + ze1(:,:,jk) + ze2(:,:,jk) + ze3(:,:,jk) ) * tmask(:,:,jk) 170 189 END DO 171 !$OMP END DO NOWAIT172 190 !$OMP END PARALLEL 173 191 ! ! ------------------------ … … 175 193 ! !* Euphotic depth and level 176 194 !$OMP PARALLEL 177 !$OMP WORKSHARE 178 neln(:,:) = 1 ! ------------------------ 179 heup(:,:) = 300. 180 !$OMP END WORKSHARE 195 !$OMP DO schedule(static) private(jj,ji) 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 neln(ji,jj) = 1 ! ------------------------ 199 heup(ji,jj) = 300. 200 END DO 201 END DO 181 202 182 203 DO jk = 2, nksrp … … 193 214 END DO 194 215 ! 195 !$OMP PARALLELDO schedule(static) private(jj,ji)216 !$OMP DO schedule(static) private(jj,ji) 196 217 DO jj = 1, jpj 197 218 DO ji = 1, jpi … … 200 221 END DO 201 222 ! !* mean light over the mixed layer 202 !$OMP WORKSHARE 203 zdepmoy(:,:) = 0.e0 ! ------------------------------- 204 zetmp1 (:,:) = 0.e0 205 zetmp2 (:,:) = 0.e0 206 zetmp3 (:,:) = 0.e0 207 zetmp4 (:,:) = 0.e0 208 !$OMP END WORKSHARE 223 !$OMP DO schedule(static) private(jj,ji) 224 DO jj = 1, jpj 225 DO ji = 1, jpi 226 zdepmoy(ji,jj) = 0.e0 ! ------------------------------- 227 zetmp1 (ji,jj) = 0.e0 228 zetmp2 (ji,jj) = 0.e0 229 zetmp3 (ji,jj) = 0.e0 230 zetmp4 (ji,jj) = 0.e0 231 END DO 232 END DO 209 233 210 234 DO jk = 1, nksrp … … 224 248 END DO 225 249 ! 226 !$OMP WORKSHARE 227 emoy(:,:,:) = etot(:,:,:) ! remineralisation 228 zpar(:,:,:) = etot_ndcy(:,:,:) ! diagnostic : PAR with no diurnal cycle 229 !$OMP END WORKSHARE 250 !$OMP DO schedule(static) private(jk,jj,ji) 251 DO jk = 1, jpk 252 DO jj = 1, jpj 253 DO ji = 1, jpi 254 emoy(ji,jj,jk) = etot(ji,jj,jk) ! remineralisation 255 zpar(ji,jj,jk) = etot_ndcy(ji,jj,jk) ! diagnostic : PAR with no diurnal cycle 256 END DO 257 END DO 258 END DO 230 259 ! 231 260 !$OMP DO schedule(static) private(jk,jj,ji,z1_dep) … … 254 283 ELSE 255 284 IF( ln_diatrc ) THEN ! save output diagnostics 256 !$OMP PARALLEL WORKSHARE 257 trc2d(:,:, jp_pcs0_2d + 10) = heup(:,: ) * tmask(:,:,1) 258 trc3d(:,:,:,jp_pcs0_3d + 3) = etot(:,:,:) * tmask(:,:,:) 259 !$OMP END PARALLEL WORKSHARE 285 !$OMP PARALLEL 286 !$OMP DO schedule(static) private(jj,ji) 287 DO jj = 1, jpj 288 DO ji = 1, jpi 289 trc2d(ji,jj, jp_pcs0_2d + 10) = heup(ji,jj ) * tmask(ji,jj,1) 290 END DO 291 END DO 292 !$OMP END DO NOWAIT 293 !$OMP DO schedule(static) private(jk,jj,ji) 294 DO jk = 1, jpk 295 DO jj = 1, jpj 296 DO ji = 1, jpi 297 trc3d(ji,jj,jk,jp_pcs0_3d + 3) = etot(ji,jj,jk) * tmask(ji,jj,jk) 298 END DO 299 END DO 300 END DO 301 !$OMP END PARALLEL 260 302 ENDIF 261 303 ENDIF … … 288 330 ! Real shortwave 289 331 IF( ln_varpar ) THEN 290 !$OMP PARALLEL WORKSHARE 291 zqsr(:,:) = par_varsw(:,:) * pqsr(:,:) 292 !$OMP END PARALLEL WORKSHARE 332 !$OMP PARALLEL DO schedule(static) private(jj,ji) 333 DO jj = 1, jpj 334 DO ji = 1, jpi 335 zqsr(ji,jj) = par_varsw(ji,jj) * pqsr(ji,jj) 336 END DO 337 END DO 293 338 ELSE 294 !$OMP PARALLEL WORKSHARE 295 zqsr(:,:) = xparsw * pqsr(:,:) 296 !$OMP END PARALLEL WORKSHARE 339 !$OMP PARALLEL DO schedule(static) private(jj,ji) 340 DO jj = 1, jpj 341 DO ji = 1, jpi 342 zqsr(ji,jj) = xparsw * pqsr(ji,jj) 343 END DO 344 END DO 297 345 ENDIF 298 346 ! … … 300 348 ! 301 349 !$OMP PARALLEL 302 !$OMP WORKSHARE 303 pe0(:,:,1) = pqsr(:,:) - 3. * zqsr(:,:) ! ( 1 - 3 * alpha ) * q 304 pe1(:,:,1) = zqsr(:,:) 305 pe2(:,:,1) = zqsr(:,:) 306 pe3(:,:,1) = zqsr(:,:) 307 !$OMP END WORKSHARE 350 !$OMP DO schedule(static) private(jj,ji) 351 DO jj = 1, jpj 352 DO ji = 1, jpi 353 pe0(ji,jj,1) = pqsr(ji,jj) - 3. * zqsr(ji,jj) ! ( 1 - 3 * alpha ) * q 354 pe1(ji,jj,1) = zqsr(ji,jj) 355 pe2(ji,jj,1) = zqsr(ji,jj) 356 pe3(ji,jj,1) = zqsr(ji,jj) 357 END DO 358 END DO 308 359 ! 309 360 DO jk = 2, nksrp + 1 … … 379 430 IF( kt == nit000 .OR. ( kt /= nit000 .AND. ntimes_par > 1 ) ) THEN 380 431 CALL fld_read( kt, 1, sf_par ) 381 !$OMP PARALLEL WORKSHARE 382 par_varsw(:,:) = ( sf_par(1)%fnow(:,:,1) ) / 3.0 383 !$OMP END PARALLEL WORKSHARE 432 !$OMP PARALLEL DO schedule(static) private(jj,ji) 433 DO jj = 1, jpj 434 DO ji = 1, jpi 435 par_varsw(ji,jj) = ( sf_par(1)%fnow(ji,jj,1) ) / 3.0 436 END DO 437 END DO 384 438 ENDIF 385 439 ENDIF … … 402 456 INTEGER :: ierr 403 457 INTEGER :: ios ! Local integer output status for namelist read 458 INTEGER :: ji, jj, jk ! dummy loop indices 404 459 REAL(wp), DIMENSION(nbtimes) :: zsteps ! times records 405 460 ! … … 457 512 IF(lwp) WRITE(numout,*) ' level of light extinction = ', nksrp, ' ref depth = ', gdepw_1d(nksrp+1), ' m' 458 513 ! 459 !$OMP PARALLEL WORKSHARE 460 ekr (:,:,:) = 0._wp 461 ekb (:,:,:) = 0._wp 462 ekg (:,:,:) = 0._wp 463 etot (:,:,:) = 0._wp 464 etot_ndcy(:,:,:) = 0._wp 465 enano (:,:,:) = 0._wp 466 ediat (:,:,:) = 0._wp 467 !$OMP END PARALLEL WORKSHARE 514 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 515 DO jk = 1, jpk 516 DO jj = 1, jpj 517 DO ji = 1, jpi 518 ekr (ji,jj,jk) = 0._wp 519 ekb (ji,jj,jk) = 0._wp 520 ekg (ji,jj,jk) = 0._wp 521 etot (ji,jj,jk) = 0._wp 522 etot_ndcy(ji,jj,jk) = 0._wp 523 enano (ji,jj,jk) = 0._wp 524 ediat (ji,jj,jk) = 0._wp 525 END DO 526 END DO 527 END DO 468 528 IF( ln_qsr_bio ) THEN 469 !$OMP PARALLEL WORKSHARE 470 etot3 (:,:,:) = 0._wp 471 !$OMP END PARALLEL WORKSHARE 529 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 530 DO jk = 1, jpk 531 DO jj = 1, jpj 532 DO ji = 1, jpi 533 etot3 (ji,jj,jk) = 0._wp 534 END DO 535 END DO 536 END DO 472 537 END IF 473 538 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zprod.F90
r7037 r7508 94 94 CALL wrk_alloc( jpi, jpj, jpk, zprorca, zprorcad, zprofed, zprofen, zprochln, zprochld, zpronew, zpronewd ) 95 95 ! 96 !$OMP PARALLEL WORKSHARE 97 zprorca (:,:,:) = 0._wp 98 zprorcad(:,:,:) = 0._wp 99 zprofed (:,:,:) = 0._wp 100 zprofen (:,:,:) = 0._wp 101 zprochln(:,:,:) = 0._wp 102 zprochld(:,:,:) = 0._wp 103 zpronew (:,:,:) = 0._wp 104 zpronewd(:,:,:) = 0._wp 105 zprdia (:,:,:) = 0._wp 106 zprbio (:,:,:) = 0._wp 107 zprdch (:,:,:) = 0._wp 108 zprnch (:,:,:) = 0._wp 109 zysopt (:,:,:) = 0._wp 96 !$OMP PARALLEL 97 !$OMP DO schedule(static) private(jk,jj,ji) 98 DO jk = 1, jpk 99 DO jj = 1, jpj 100 DO ji = 1, jpi 101 zprorca (ji,jj,jk) = 0._wp 102 zprorcad(ji,jj,jk) = 0._wp 103 zprofed (ji,jj,jk) = 0._wp 104 zprofen (ji,jj,jk) = 0._wp 105 zprochln(ji,jj,jk) = 0._wp 106 zprochld(ji,jj,jk) = 0._wp 107 zpronew (ji,jj,jk) = 0._wp 108 zpronewd(ji,jj,jk) = 0._wp 109 zprdia (ji,jj,jk) = 0._wp 110 zprbio (ji,jj,jk) = 0._wp 111 zprdch (ji,jj,jk) = 0._wp 112 zprnch (ji,jj,jk) = 0._wp 113 zysopt (ji,jj,jk) = 0._wp 110 114 111 115 ! Computation of the optimal production 112 prmax(:,:,:) = 0.6_wp * r1_rday * tgfunc(:,:,:) 116 prmax(ji,jj,jk) = 0.6_wp * r1_rday * tgfunc(ji,jj,jk) 117 END DO 118 END DO 119 END DO 113 120 114 121 ! day length in hours 115 zstrn(:,:) = 0. 116 !$OMP END PARALLEL WORKSHARE 122 !$OMP DO schedule(static) private(jj,ji) 123 DO jj = 1, jpj 124 DO ji = 1, jpi 125 zstrn(ji,jj) = 0. 126 END DO 127 END DO 128 !$OMP END PARALLEL 117 129 118 130 IF( lk_degrad ) THEN 119 !$OMP PARALLEL WORKSHARE 120 prmax(:,:,:) = prmax(:,:,:) * facvol(:,:,:) 121 !$OMP END PARALLEL WORKSHARE 131 !$OMP DO schedule(static) private(jk,jj,ji) 132 DO jk = 1, jpk 133 DO jj = 1, jpj 134 DO ji = 1, jpi 135 prmax(ji,jj,jk) = prmax(ji,jj,jk) * facvol(ji,jj,jk) 136 END DO 137 END DO 138 END DO 122 139 END IF 123 140 … … 153 170 154 171 ! Maximum light intensity 155 !$OMP WORKSHARE 156 WHERE( zstrn(:,:) < 1.e0 ) zstrn(:,:) = 24. 157 zstrn(:,:) = 24. / zstrn(:,:) 158 !$OMP END WORKSHARE NOWAIT 172 !$OMP DO schedule(static) private(jj,ji) 173 DO jj = 1 ,jpj 174 DO ji = 1, jpi 175 IF( zstrn(ji,jj) < 1.e0 ) zstrn(ji,jj) = 24. 176 zstrn(ji,jj) = 24. / zstrn(ji,jj) 177 END DO 178 END DO 159 179 !$OMP END PARALLEL 160 180 … … 433 453 ! 434 454 IF( iom_use( "PPPHY" ) .OR. iom_use( "PPPHY2" ) ) THEN 435 !$OMP PARALLEL WORKSHARE 436 zw3d(:,:,:) = zprorca (:,:,:) * zfact * tmask(:,:,:) ! primary production by nanophyto 437 !$OMP END PARALLEL WORKSHARE 455 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 456 DO jk = 1, jpk 457 DO jj = 1, jpj 458 DO ji = 1, jpi 459 zw3d(ji,jj,jk) = zprorca (ji,jj,jk) * zfact * tmask(ji,jj,jk) ! primary production by nanophyto 460 END DO 461 END DO 462 END DO 438 463 CALL iom_put( "PPPHY" , zw3d ) 464 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 465 DO jk = 1, jpk 466 DO jj = 1, jpj 467 DO ji = 1, jpi 468 zw3d(ji,jj,jk) = zprorcad (ji,jj,jk) * zfact * tmask(ji,jj,jk) ! primary production by nanophyto 469 END DO 470 END DO 471 END DO 439 472 ! 440 !$OMP PARALLEL WORKSHARE441 zw3d(:,:,:) = zprorcad(:,:,:) * zfact * tmask(:,:,:) ! primary production by diatomes442 !$OMP END PARALLEL WORKSHARE443 473 CALL iom_put( "PPPHY2" , zw3d ) 444 474 ENDIF 445 475 IF( iom_use( "PPNEWN" ) .OR. iom_use( "PPNEWD" ) ) THEN 446 !$OMP PARALLEL WORKSHARE 447 zw3d(:,:,:) = zpronew (:,:,:) * zfact * tmask(:,:,:) ! new primary production by nanophyto 448 !$OMP END PARALLEL WORKSHARE 476 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 477 DO jk = 1, jpk 478 DO jj = 1, jpj 479 DO ji = 1, jpi 480 zw3d(ji,jj,jk) = zpronew (ji,jj,jk) * zfact * tmask(ji,jj,jk) ! new primary production by nanophyto 481 END DO 482 END DO 483 END DO 449 484 CALL iom_put( "PPNEWN" , zw3d ) 450 485 ! 451 !$OMP PARALLEL WORKSHARE 452 zw3d(:,:,:) = zpronewd(:,:,:) * zfact * tmask(:,:,:) ! new primary production by diatomes 453 !$OMP END PARALLEL WORKSHARE 486 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 487 DO jk = 1, jpk 488 DO jj = 1, jpj 489 DO ji = 1, jpi 490 zw3d(ji,jj,jk) = zpronewd (ji,jj,jk) * zfact * tmask(ji,jj,jk) ! new primary production by nanophyto 491 END DO 492 END DO 493 END DO 454 494 CALL iom_put( "PPNEWD" , zw3d ) 455 495 ENDIF 456 496 IF( iom_use( "PBSi" ) ) THEN 457 !$OMP PARALLEL WORKSHARE 458 zw3d(:,:,:) = zprorcad(:,:,:) * zfact * tmask(:,:,:) * zysopt(:,:,:) ! biogenic silica production 459 !$OMP END PARALLEL WORKSHARE 497 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 498 DO jk = 1, jpk 499 DO jj = 1, jpj 500 DO ji = 1, jpi 501 zw3d(ji,jj,jk) = zprorcad(ji,jj,jk) * zfact * tmask(ji,jj,jk) * zysopt(ji,jj,jk) ! biogenic silica production 502 END DO 503 END DO 504 END DO 460 505 CALL iom_put( "PBSi" , zw3d ) 461 506 ENDIF 462 507 IF( iom_use( "PFeN" ) .OR. iom_use( "PFeD" ) ) THEN 463 !$OMP PARALLEL WORKSHARE 464 zw3d(:,:,:) = zprofen(:,:,:) * zfact * tmask(:,:,:) ! biogenic iron production by nanophyto 465 !$OMP END PARALLEL WORKSHARE 508 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 509 DO jk = 1, jpk 510 DO jj = 1, jpj 511 DO ji = 1, jpi 512 zw3d(ji,jj,jk) = zprofen(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! biogenic iron production by nanophyto 513 END DO 514 END DO 515 END DO 466 516 CALL iom_put( "PFeN" , zw3d ) 467 517 ! 468 !$OMP PARALLEL WORKSHARE 469 zw3d(:,:,:) = zprofed(:,:,:) * zfact * tmask(:,:,:) ! biogenic iron production by diatomes 470 !$OMP END PARALLEL WORKSHARE 518 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 519 DO jk = 1, jpk 520 DO jj = 1, jpj 521 DO ji = 1, jpi 522 zw3d(ji,jj,jk) = zprofed(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! biogenic iron production by nanophyto 523 END DO 524 END DO 525 END DO 471 526 CALL iom_put( "PFeD" , zw3d ) 472 527 ENDIF 473 528 IF( iom_use( "Mumax" ) ) THEN 474 !$OMP PARALLEL WORKSHARE 475 zw3d(:,:,:) = prmax(:,:,:) * tmask(:,:,:) ! Maximum growth rate 476 !$OMP END PARALLEL WORKSHARE 529 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 530 DO jk = 1, jpk 531 DO jj = 1, jpj 532 DO ji = 1, jpi 533 zw3d(ji,jj,jk) = prmax(ji,jj,jk) * tmask(ji,jj,jk) ! Maximum growth rate 534 END DO 535 END DO 536 END DO 477 537 CALL iom_put( "Mumax" , zw3d ) 478 538 ENDIF 479 539 IF( iom_use( "MuN" ) .OR. iom_use( "MuD" ) ) THEN 480 !$OMP PARALLEL WORKSHARE 481 zw3d(:,:,:) = zprbio(:,:,:) * xlimphy(:,:,:) * tmask(:,:,:) ! Realized growth rate for nanophyto 482 !$OMP END PARALLEL WORKSHARE 540 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 541 DO jk = 1, jpk 542 DO jj = 1, jpj 543 DO ji = 1, jpi 544 zw3d(ji,jj,jk) = zprbio(ji,jj,jk) * xlimphy(ji,jj,jk) * tmask(ji,jj,jk) ! Realized growth rate for nanophyto 545 END DO 546 END DO 547 END DO 483 548 CALL iom_put( "MuN" , zw3d ) 484 549 ! 485 !$OMP PARALLEL WORKSHARE 486 zw3d(:,:,:) = zprdia(:,:,:) * xlimdia(:,:,:) * tmask(:,:,:) ! Realized growth rate for diatoms 487 !$OMP END PARALLEL WORKSHARE 550 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 551 DO jk = 1, jpk 552 DO jj = 1, jpj 553 DO ji = 1, jpi 554 zw3d(ji,jj,jk) = zprdia(ji,jj,jk) * xlimdia(ji,jj,jk) * tmask(ji,jj,jk) ! Realized growth rate for diatoms 555 END DO 556 END DO 557 END DO 488 558 CALL iom_put( "MuD" , zw3d ) 489 559 ENDIF 490 560 IF( iom_use( "LNlight" ) .OR. iom_use( "LDlight" ) ) THEN 491 !$OMP PARALLEL WORKSHARE 492 zw3d(:,:,:) = zprbio (:,:,:) / (prmax(:,:,:) + rtrn) * tmask(:,:,:) ! light limitation term 493 !$OMP END PARALLEL WORKSHARE 561 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 562 DO jk = 1, jpk 563 DO jj = 1, jpj 564 DO ji = 1, jpi 565 zw3d(ji,jj,jk) = zprbio (ji,jj,jk) / (prmax(ji,jj,jk) + rtrn) * tmask(ji,jj,jk) ! light limitation term 566 END DO 567 END DO 568 END DO 494 569 CALL iom_put( "LNlight" , zw3d ) 495 570 ! 496 !$OMP PARALLEL WORKSHARE 497 zw3d(:,:,:) = zprdia (:,:,:) / (prmax(:,:,:) + rtrn) * tmask(:,:,:) ! light limitation term 498 !$OMP END PARALLEL WORKSHARE 571 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 572 DO jk = 1, jpk 573 DO jj = 1, jpj 574 DO ji = 1, jpi 575 zw3d(ji,jj,jk) = zprdia (ji,jj,jk) / (prmax(ji,jj,jk) + rtrn) * tmask(ji,jj,jk) ! light limitation term 576 END DO 577 END DO 578 END DO 499 579 CALL iom_put( "LDlight" , zw3d ) 500 580 ENDIF 501 581 IF( iom_use( "TPP" ) ) THEN 502 !$OMP PARALLEL WORKSHARE 503 zw3d(:,:,:) = ( zprorca(:,:,:) + zprorcad(:,:,:) ) * zfact * tmask(:,:,:) ! total primary production 504 !$OMP END PARALLEL WORKSHARE 582 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 583 DO jk = 1, jpk 584 DO jj = 1, jpj 585 DO ji = 1, jpi 586 zw3d(ji,jj,jk) = ( zprorca(ji,jj,jk) + zprorcad(ji,jj,jk) ) * zfact * tmask(ji,jj,jk) ! total primary production 587 END DO 588 END DO 589 END DO 505 590 CALL iom_put( "TPP" , zw3d ) 506 591 ENDIF 507 592 IF( iom_use( "TPNEW" ) ) THEN 508 !$OMP PARALLEL WORKSHARE 509 zw3d(:,:,:) = ( zpronew(:,:,:) + zpronewd(:,:,:) ) * zfact * tmask(:,:,:) ! total new production 510 !$OMP END PARALLEL WORKSHARE 593 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 594 DO jk = 1, jpk 595 DO jj = 1, jpj 596 DO ji = 1, jpi 597 zw3d(ji,jj,jk) = ( zpronew(ji,jj,jk) + zpronewd(ji,jj,jk) ) * zfact * tmask(ji,jj,jk) ! total new production 598 END DO 599 END DO 600 END DO 511 601 CALL iom_put( "TPNEW" , zw3d ) 512 602 ENDIF 513 603 IF( iom_use( "TPBFE" ) ) THEN 514 !$OMP PARALLEL WORKSHARE 515 zw3d(:,:,:) = ( zprofen(:,:,:) + zprofed(:,:,:) ) * zfact * tmask(:,:,:) ! total biogenic iron production 516 !$OMP END PARALLEL WORKSHARE 604 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 605 DO jk = 1, jpk 606 DO jj = 1, jpj 607 DO ji = 1, jpi 608 zw3d(ji,jj,jk) = ( zprofen(ji,jj,jk) + zprofed(ji,jj,jk) ) * zfact * tmask(ji,jj,jk) ! total biogenic iron production 609 END DO 610 END DO 611 END DO 517 612 CALL iom_put( "TPBFE" , zw3d ) 518 613 ENDIF 519 614 IF( iom_use( "INTPPPHY" ) .OR. iom_use( "INTPPPHY2" ) ) THEN 520 615 !$OMP PARALLEL 521 !$OMP WORKSHARE 522 zw2d(:,:) = 0. 523 !$OMP END WORKSHARE 616 !$OMP DO schedule(static) private(jj,ji) 617 DO jj = 1, jpj 618 DO ji =1 ,jpi 619 zw2d(ji,jj) = 0. 620 END DO 621 END DO 524 622 DO jk = 1, jpkm1 525 623 !$OMP DO schedule(static) private(jj,ji) … … 535 633 ! 536 634 !$OMP PARALLEL 537 !$OMP WORKSHARE 538 zw2d(:,:) = 0. 539 !$OMP END WORKSHARE 635 !$OMP DO schedule(static) private(jj,ji) 636 DO jj = 1, jpj 637 DO ji =1 ,jpi 638 zw2d(ji,jj) = 0. 639 END DO 640 END DO 540 641 DO jk = 1, jpkm1 541 642 !$OMP DO schedule(static) private(jj,ji) … … 552 653 IF( iom_use( "INTPP" ) ) THEN 553 654 !$OMP PARALLEL 554 !$OMP WORKSHARE 555 zw2d(:,:) = 0. 556 !$OMP END WORKSHARE 655 !$OMP DO schedule(static) private(jj,ji) 656 DO jj = 1, jpj 657 DO ji =1 ,jpi 658 zw2d(ji,jj) = 0. 659 END DO 660 END DO 557 661 DO jk = 1, jpkm1 558 662 !$OMP DO schedule(static) private(jj,ji) … … 569 673 IF( iom_use( "INTPNEW" ) ) THEN 570 674 !$OMP PARALLEL 571 !$OMP WORKSHARE 572 zw2d(:,:) = 0. 573 !$OMP END WORKSHARE 675 !$OMP DO schedule(static) private(jj,ji) 676 DO jj = 1, jpj 677 DO ji =1 ,jpi 678 zw2d(ji,jj) = 0. 679 END DO 680 END DO 574 681 DO jk = 1, jpkm1 575 682 !$OMP DO schedule(static) private(jj,ji) … … 586 693 IF( iom_use( "INTPBFE" ) ) THEN ! total biogenic iron production ( vertically integrated ) 587 694 !$OMP PARALLEL 588 !$OMP WORKSHARE 589 zw2d(:,:) = 0. 590 !$OMP END WORKSHARE 695 !$OMP DO schedule(static) private(jj,ji) 696 DO jj = 1, jpj 697 DO ji =1 ,jpi 698 zw2d(ji,jj) = 0. 699 END DO 700 END DO 591 701 DO jk = 1, jpkm1 592 702 !$OMP DO schedule(static) private(jj,ji) … … 603 713 IF( iom_use( "INTPBSI" ) ) THEN ! total biogenic silica production ( vertically integrated ) 604 714 !$OMP PARALLEL 605 !$OMP WORKSHARE 606 zw2d(:,:) = 0. 607 !$OMP END WORKSHARE 715 !$OMP DO schedule(static) private(jj,ji) 716 DO jj = 1, jpj 717 DO ji =1 ,jpi 718 zw2d(ji,jj) = 0. 719 END DO 720 END DO 608 721 DO jk = 1, jpkm1 609 722 !$OMP DO schedule(static) private(jj,ji) … … 626 739 IF( ln_diatrc ) THEN 627 740 zfact = 1.e+3 * rfact2r 628 !$OMP PARALLEL WORKSHARE 629 trc3d(:,:,:,jp_pcs0_3d + 4) = zprorca (:,:,:) * zfact * tmask(:,:,:) 630 trc3d(:,:,:,jp_pcs0_3d + 5) = zprorcad(:,:,:) * zfact * tmask(:,:,:) 631 trc3d(:,:,:,jp_pcs0_3d + 6) = zpronew (:,:,:) * zfact * tmask(:,:,:) 632 trc3d(:,:,:,jp_pcs0_3d + 7) = zpronewd(:,:,:) * zfact * tmask(:,:,:) 633 trc3d(:,:,:,jp_pcs0_3d + 8) = zprorcad(:,:,:) * zfact * tmask(:,:,:) * zysopt(:,:,:) 634 trc3d(:,:,:,jp_pcs0_3d + 9) = zprofed (:,:,:) * zfact * tmask(:,:,:) 741 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 742 DO jk = 1, jpk 743 DO jj = 1, jpj 744 DO ji = 1, jpi 745 trc3d(ji,jj,jk,jp_pcs0_3d + 4) = zprorca (ji,jj,jk) * zfact * tmask(ji,jj,jk) 746 trc3d(ji,jj,jk,jp_pcs0_3d + 5) = zprorcad(ji,jj,jk) * zfact * tmask(ji,jj,jk) 747 trc3d(ji,jj,jk,jp_pcs0_3d + 6) = zpronew (ji,jj,jk) * zfact * tmask(ji,jj,jk) 748 trc3d(ji,jj,jk,jp_pcs0_3d + 7) = zpronewd(ji,jj,jk) * zfact * tmask(ji,jj,jk) 749 trc3d(ji,jj,jk,jp_pcs0_3d + 8) = zprorcad(ji,jj,jk) * zfact * tmask(ji,jj,jk) * zysopt(ji,jj,jk) 750 trc3d(ji,jj,jk,jp_pcs0_3d + 9) = zprofed (ji,jj,jk) * zfact * tmask(ji,jj,jk) 635 751 # if ! defined key_kriest 636 trc3d(:,:,:,jp_pcs0_3d + 10) = zprofen (:,:,:) * zfact * tmask(:,:,:)752 trc3d(ji,jj,jk,jp_pcs0_3d + 10) = zprofen (ji,jj,jk) * zfact * tmask(ji,jj,jk) 637 753 # endif 638 !$OMP END PARALLEL WORKSHARE 754 END DO 755 END DO 756 END DO 639 757 ENDIF 640 758 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zrem.F90
r7037 r7508 90 90 ! Initialisation of temprary arrys 91 91 !$OMP PARALLEL 92 !$OMP WORKSHARE 93 zdepprod(:,:,:) = 1._wp 94 ztempbac(:,:) = 0._wp 95 !$OMP END WORKSHARE 96 92 !$OMP DO schedule(static) private(jk,jj,ji) 93 DO jk = 1, jpk 94 DO jj = 1, jpj 95 DO ji = 1, jpi 96 zdepprod(ji,jj,jk) = 1._wp 97 END DO 98 END DO 99 END DO 100 !$OMP DO schedule(static) private(jj,ji) 101 DO jj = 1, jpj 102 DO ji = 1, jpi 103 ztempbac(ji,jj) = 0._wp 104 END DO 105 END DO 97 106 ! Computation of the mean phytoplankton concentration as 98 107 ! a crude estimate of the bacterial biomass … … 332 341 ! 333 342 IF( iom_use( "REMIN" ) ) THEN 334 !$OMP PARALLEL WORKSHARE 335 zw3d(:,:,:) = zolimi(:,:,:) * tmask(:,:,:) * zfact ! Remineralisation rate 336 !$OMP END PARALLEL WORKSHARE 343 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 344 DO jk = 1, jpk 345 DO jj = 1, jpj 346 DO ji = 1, jpi 347 zw3d(ji,jj,jk) = zolimi(ji,jj,jk) * tmask(ji,jj,jk) * zfact ! Remineralisation rate 348 END DO 349 END DO 350 END DO 337 351 CALL iom_put( "REMIN" , zw3d ) 338 352 ENDIF 339 353 IF( iom_use( "DENIT" ) ) THEN 340 !$OMP PARALLEL WORKSHARE 341 zw3d(:,:,:) = denitr(:,:,:) * rdenit * rno3 * tmask(:,:,:) * zfact ! Denitrification 342 !$OMP END PARALLEL WORKSHARE 354 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 355 DO jk = 1, jpk 356 DO jj = 1, jpj 357 DO ji = 1, jpi 358 zw3d(ji,jj,jk) = denitr(ji,jj,jk) * rdenit * rno3 * tmask(ji,jj,jk) * zfact ! Denitrification 359 END DO 360 END DO 361 END DO 343 362 CALL iom_put( "DENIT" , zw3d ) 344 363 ENDIF … … 376 395 & oxymin 377 396 INTEGER :: ios ! Local integer output status for namelist read 397 INTEGER :: ji, jj, jk ! dummy loop indices 378 398 379 399 REWIND( numnatp_ref ) ! Namelist nampisrem in reference namelist : Pisces remineralization … … 399 419 ENDIF 400 420 ! 401 !$OMP PARALLEL WORKSHARE 402 nitrfac (:,:,:) = 0._wp 403 denitr (:,:,:) = 0._wp 404 denitnh4(:,:,:) = 0._wp 405 !$OMP END PARALLEL WORKSHARE 421 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 422 DO jk = 1, jpk 423 DO jj = 1, jpj 424 DO ji = 1, jpi 425 nitrfac (ji,jj,jk) = 0._wp 426 denitr (ji,jj,jk) = 0._wp 427 denitnh4(ji,jj,jk) = 0._wp 428 END DO 429 END DO 430 END DO 406 431 ! 407 432 END SUBROUTINE p4z_rem_init -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsbc.F90
r7037 r7508 278 278 ! online configuration : computed in sbcrnf 279 279 IF( lk_offline ) THEN 280 !$OMP PARALLEL WORKSHARE 281 nk_rnf(:,:) = 1 282 h_rnf (:,:) = gdept_n(:,:,1) 283 !$OMP END PARALLEL WORKSHARE 280 !$OMP PARALLEL DO schedule(static) private(jj, ji) 281 DO jj = 1, jpj 282 DO ji = 1, jpi 283 nk_rnf(ji,jj) = 1 284 h_rnf (ji,jj) = gdept_n(ji,jj,1) 285 END DO 286 END DO 284 287 ENDIF 285 288 … … 482 485 ! Coastal supply of iron 483 486 ! ------------------------- 484 !$OMP WORKSHARE 485 ironsed(:,:,jpk) = 0._wp 486 !$OMP END WORKSHARE 487 !$OMP PARALLEL DO schedule(static) private(jj, ji) 488 DO jj = 1, jpj 489 DO ji = 1, jpi 490 ironsed(ji,jj,jpk) = 0._wp 491 END DO 492 END DO 487 493 !$OMP DO schedule(static) private(jk) 488 494 DO jk = 1, jpkm1 … … 507 513 CALL iom_close( numhydro ) 508 514 ! 509 !$OMP PARALLEL WORKSHARE 510 hydrofe(:,:,:) = ( hydrofe(:,:,:) * hratio ) / ( cvol(:,:,:) * ryyss + rtrn ) / 1000._wp 511 !$OMP END PARALLEL WORKSHARE 515 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 516 DO jk = 1, jpk 517 DO jj = 1, jpj 518 DO ji = 1, jpi 519 hydrofe(ji,jj,jk) = ( hydrofe(ji,jj,jk) * hratio ) / ( cvol(ji,jj,jk) * ryyss + rtrn ) / 1000._wp 520 END DO 521 END DO 522 END DO 512 523 ! 513 524 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsed.F90
r7037 r7508 82 82 CALL wrk_alloc( jpi, jpj, jpk, zsoufer ) 83 83 84 !$OMP PARALLEL WORKSHARE 85 zdenit2d(:,:) = 0.e0 86 zbureff (:,:) = 0.e0 87 zwork1 (:,:) = 0.e0 88 zwork2 (:,:) = 0.e0 89 zwork3 (:,:) = 0.e0 90 !$OMP END PARALLEL WORKSHARE 84 !$OMP PARALLEL DO schedule(static) private(jj,ji) 85 DO jj = 1, jpj 86 DO ji = 1, jpi 87 zdenit2d(ji,jj) = 0.e0 88 zbureff (ji,jj) = 0.e0 89 zwork1 (ji,jj) = 0.e0 90 zwork2 (ji,jj) = 0.e0 91 zwork3 (ji,jj) = 0.e0 92 END DO 93 END DO 91 94 92 95 ! Iron input/uptake due to sea ice : Crude parameterization based on Lancelot et al. … … 107 110 END DO 108 111 ! 109 !$OMP WORKSHARE 110 tra(:,:,1,jpfer) = tra(:,:,1,jpfer) + zironice(:,:) 111 !$OMP END WORKSHARE NOWAIT 112 !$OMP DO schedule(static) private(jj,ji) 113 DO jj = 1, jpj 114 DO ji = 1, jpi 115 tra(ji,jj,1,jpfer) = tra(ji,jj,1,jpfer) + zironice(ji,jj) 116 END DO 117 END DO 112 118 !$OMP END PARALLEL 113 119 ! … … 127 133 ! ! Iron and Si deposition at the surface 128 134 IF( ln_solub ) THEN 129 !$OMP PARALLEL WORKSHARE 130 zirondep(:,:,1) = solub(:,:) * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 131 !$OMP END PARALLEL WORKSHARE 135 !$OMP PARALLEL DO schedule(static) private(jj,ji) 136 DO jj = 1, jpj 137 DO ji = 1, jpi 138 zirondep(ji,jj,1) = solub(ji,jj) * dust(ji,jj) * mfrac * rfact2 / e3t_n(ji,jj,1) / 55.85 + 3.e-10 * r1_ryyss 139 END DO 140 END DO 132 141 ELSE 133 !$OMP PARALLEL WORKSHARE 134 zirondep(:,:,1) = dustsolub * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 135 !$OMP END PARALLEL WORKSHARE 142 !$OMP PARALLEL DO schedule(static) private(jj,ji) 143 DO jj = 1, jpj 144 DO ji = 1, jpi 145 zirondep(ji,jj,1) = dustsolub * dust(ji,jj) * mfrac * rfact2 / e3t_n(ji,jj,1) / 55.85 + 3.e-10 * r1_ryyss 146 END DO 147 END DO 136 148 ENDIF 137 !$OMP PARALLEL WORKSHARE 138 zsidep(:,:) = 8.8 * 0.075 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 28.1 139 zpdep (:,:) = 0.1 * 0.021 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 31. / po4r 140 !$OMP END PARALLEL WORKSHARE 149 !$OMP PARALLEL DO schedule(static) private(jj,ji) 150 DO jj = 1, jpj 151 DO ji = 1, jpi 152 zsidep(ji,jj) = 8.8 * 0.075 * dust(ji,jj) * mfrac * rfact2 / e3t_n(ji,jj,1) / 28.1 153 zpdep (ji,jj) = 0.1 * 0.021 * dust(ji,jj) * mfrac * rfact2 / e3t_n(ji,jj,1) / 31. / po4r 154 END DO 155 END DO 141 156 ! ! Iron solubilization of particles in the water column 142 157 ! ! dust in kg/m2/s ---> 1/55.85 to put in mol/Fe ; wdust in m/j 143 158 zwdust = 0.03 * rday / ( wdust * 55.85 ) / ( 270. * rday ) 159 !$OMP PARALLEL 144 160 !$OMP DO schedule(static) private(jk) 145 161 DO jk = 2, jpkm1 … … 147 163 END DO 148 164 ! ! Iron solubilization of particles in the water column 149 !$OMP PARALLEL 150 !$OMP WORKSHARE 151 tra(:,:,1,jppo4) = tra(:,:,1,jppo4) + zpdep (:,:) 152 tra(:,:,1,jpsil) = tra(:,:,1,jpsil) + zsidep (:,:) 153 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + zirondep(:,:,:) 154 !$OMP END WORKSHARE 165 !$OMP DO schedule(static) private(jj,ji) 166 DO jj = 1, jpj 167 DO ji = 1, jpi 168 tra(ji,jj,1,jppo4) = tra(ji,jj,1,jppo4) + zpdep (ji,jj) 169 tra(ji,jj,1,jpsil) = tra(ji,jj,1,jpsil) + zsidep (ji,jj) 170 END DO 171 END DO 172 !$OMP DO schedule(static) private(jk,jj,ji) 173 DO jk = 1, jpk 174 DO jj = 1, jpj 175 DO ji = 1, jpi 176 tra(ji,jj,jk,jpfer) = tra(ji,jj,jk,jpfer) + zirondep(ji,jj,jk) 177 END DO 178 END DO 179 END DO 155 180 !$OMP END PARALLEL 156 181 ! … … 164 189 ELSE 165 190 IF( ln_diatrc ) THEN 166 !$OMP PARALLEL WORKSHARE 167 trc2d(:,:,jp_pcs0_2d + 11) = zirondep(:,:,1) * 1.e+3 * rfact2r * e3t_n(:,:,1) * tmask(:,:,1) 168 !$OMP END PARALLEL WORKSHARE 191 !$OMP PARALLEL DO schedule(static) private(jj,ji) 192 DO jj = 1, jpj 193 DO ji = 1, jpi 194 trc2d(ji,jj,jp_pcs0_2d + 11) = zirondep(ji,jj,1) * 1.e+3 * rfact2r * e3t_n(ji,jj,1) * tmask(ji,jj,1) 195 END DO 196 END DO 169 197 ENDIF 170 198 ENDIF … … 187 215 tra(ji,jj,jk,jpdic) = tra(ji,jj,jk,jpdic) + rivdic(ji,jj) * rfact2 188 216 tra(ji,jj,jk,jptal) = tra(ji,jj,jk,jptal) + ( rivalk(ji,jj) - rno3 * rivdin(ji,jj) ) * rfact2 189 END DO190 END DO191 END DO217 END DO 218 END DO 219 END DO 192 220 ENDIF 193 221 … … 195 223 ! ---------------------------------------------------------- 196 224 IF( ln_ndepo ) THEN 197 !$OMP PARALLEL WORKSHARE 198 tra(:,:,1,jpno3) = tra(:,:,1,jpno3) + nitdep(:,:) * rfact2 199 tra(:,:,1,jptal) = tra(:,:,1,jptal) - rno3 * nitdep(:,:) * rfact2 200 !$OMP END PARALLEL WORKSHARE 225 !$OMP PARALLEL DO schedule(static) private(jj,ji) 226 DO jj = 1, jpj 227 DO ji = 1, jpi 228 tra(ji,jj,1,jpno3) = tra(ji,jj,1,jpno3) + nitdep(ji,jj) * rfact2 229 tra(ji,jj,1,jptal) = tra(ji,jj,1,jptal) - rno3 * nitdep(ji,jj) * rfact2 230 ENDDO 231 ENDDO 201 232 ENDIF 202 233 … … 204 235 ! ------------------------------------------------------ 205 236 IF( ln_ironsed ) THEN 206 !$OMP PARALLEL WORKSHARE 207 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + ironsed(:,:,:) * rfact2 208 !$OMP END PARALLEL WORKSHARE 237 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 238 DO jk = 1, jpk 239 DO jj = 1, jpj 240 DO ji = 1, jpi 241 tra(ji,jj,jk,jpfer) = tra(ji,jj,jk,jpfer) + ironsed(ji,jj,jk) * rfact2 242 END DO 243 END DO 244 END DO 209 245 ! 210 246 IF( lk_iomput .AND. knt == nrdttrc .AND. iom_use( "Ironsed" ) ) & … … 215 251 ! ------------------------------------------------------ 216 252 IF( ln_hydrofe ) THEN 217 !$OMP PARALLEL WORKSHARE 218 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + hydrofe(:,:,:) * rfact2 219 !$OMP END PARALLEL WORKSHARE 253 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 254 DO jk = 1, jpk 255 DO jj = 1, jpj 256 DO ji = 1, jpi 257 tra(ji,jj,jk,jpfer) = tra(ji,jj,jk,jpfer) + hydrofe(ji,jj,jk) * rfact2 258 END DO 259 END DO 260 END DO 220 261 ! 221 262 IF( lk_iomput .AND. knt == nrdttrc .AND. iom_use( "HYDR" ) ) & … … 291 332 END DO 292 333 END DO 293 294 !$OMP SINGLE 334 !$OMP END PARALLEL 335 295 336 zsumsedsi = glob_sum( zwork1(:,:) * e1e2t(:,:) ) * r1_rday 296 337 zsumsedpo4 = glob_sum( zwork2(:,:) * e1e2t(:,:) ) * r1_rday … … 304 345 zrivsil = 1._wp - ( sumdepsi + rivdsiinput * r1_ryyss ) / ( zsumsedsi + rtrn ) 305 346 #endif 306 !$OMP END SINGLE347 !$OMP PARALLEL 307 348 308 349 !$OMP DO schedule(static) private(jj,ji,ikt,zdep,zws4,zwsc,zsiloss,zcaloss,zfactcal,zrivalk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsink.F90
r7037 r7508 115 115 116 116 ! limit the values of the sinking speeds to avoid numerical instabilities 117 !$OMP WORKSHARE 118 wsbio3(:,:,:) = wsbio 119 wscal (:,:,:) = wsbio4(:,:,:) 120 !$OMP END WORKSHARE NOWAIT 117 !$OMP DO schedule(static) private(jk, jj, ji) 118 DO jk = 1, jpk 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 wsbio3(ji,jj,jk) = wsbio 122 wscal (ji,jj,jk) = wsbio4(ji,jj,jk) 123 END DO 124 END DO 125 END DO 121 126 !$OMP END PARALLEL 122 127 ! … … 173 178 ! Initializa to zero all the sinking arrays 174 179 ! ----------------------------------------- 175 !$OMP WORKSHARE 176 sinking (:,:,:) = 0.e0 177 sinking2(:,:,:) = 0.e0 178 sinkcal (:,:,:) = 0.e0 179 sinkfer (:,:,:) = 0.e0 180 sinksil (:,:,:) = 0.e0 181 sinkfer2(:,:,:) = 0.e0 182 !$OMP END WORKSHARE NOWAIT 180 !$OMP DO schedule(static) private(jk, jj, ji) 181 DO jk = 1, jpk 182 DO jj = 1, jpj 183 DO ji = 1, jpi 184 sinking (ji,jj,jk) = 0.e0 185 sinking2(ji,jj,jk) = 0.e0 186 sinkcal (ji,jj,jk) = 0.e0 187 sinkfer (ji,jj,jk) = 0.e0 188 sinksil (ji,jj,jk) = 0.e0 189 sinkfer2(ji,jj,jk) = 0.e0 190 END DO 191 END DO 192 END DO 183 193 !$OMP END PARALLEL 184 194 … … 258 268 ! 259 269 IF( iom_use( "EPC100" ) ) THEN 260 !$OMP PARALLEL WORKSHARE 261 zw2d(:,:) = ( sinking(:,:,ik100) + sinking2(:,:,ik100) ) * zfact * tmask(:,:,1) ! Export of carbon at 100m 262 !$OMP END PARALLEL WORKSHARE 270 !$OMP DO schedule(static) private(jj, ji) 271 DO jj = 1, jpj 272 DO ji = 1, jpi 273 zw2d(ji,jj) = ( sinking(ji,jj,ik100) + sinking2(ji,jj,ik100) ) * zfact * tmask(ji,jj,1) ! Export of carbon at 100m 274 END DO 275 END DO 263 276 CALL iom_put( "EPC100" , zw2d ) 264 277 ENDIF 265 278 IF( iom_use( "EPFE100" ) ) THEN 266 !$OMP PARALLEL WORKSHARE 267 zw2d(:,:) = ( sinkfer(:,:,ik100) + sinkfer2(:,:,ik100) ) * zfact * tmask(:,:,1) ! Export of iron at 100m 268 !$OMP END PARALLEL WORKSHARE 279 !$OMP DO schedule(static) private(jj, ji) 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 zw2d(ji,jj) = ( sinkfer(ji,jj,ik100) + sinkfer2(ji,jj,ik100) ) * zfact * tmask(ji,jj,1) ! Export of iron at 100m 283 END DO 284 END DO 269 285 CALL iom_put( "EPFE100" , zw2d ) 270 286 ENDIF 271 287 IF( iom_use( "EPCAL100" ) ) THEN 272 !$OMP PARALLEL WORKSHARE 273 zw2d(:,:) = sinkcal(:,:,ik100) * zfact * tmask(:,:,1) ! Export of calcite at 100m 274 !$OMP END PARALLEL WORKSHARE 288 !$OMP DO schedule(static) private(jj, ji) 289 DO jj = 1, jpj 290 DO ji = 1, jpi 291 zw2d(ji,jj) = sinkcal(ji,jj,ik100) * zfact * tmask(ji,jj,1) ! Export of calcite at 100m 292 END DO 293 END DO 275 294 CALL iom_put( "EPCAL100" , zw2d ) 276 295 ENDIF 277 296 IF( iom_use( "EPSI100" ) ) THEN 278 !$OMP PARALLEL WORKSHARE 279 zw2d(:,:) = sinksil(:,:,ik100) * zfact * tmask(:,:,1) ! Export of bigenic silica at 100m 280 !$OMP END PARALLEL WORKSHARE 297 !$OMP DO schedule(static) private(jj, ji) 298 DO jj = 1, jpj 299 DO ji = 1, jpi 300 zw2d(ji,jj) = sinksil(ji,jj,ik100) * zfact * tmask(ji,jj,1) ! Export of bigenic silica at 100m 301 END DO 302 END DO 281 303 CALL iom_put( "EPSI100" , zw2d ) 282 304 ENDIF 283 305 IF( iom_use( "EXPC" ) ) THEN 284 !$OMP PARALLEL WORKSHARE 285 zw3d(:,:,:) = ( sinking(:,:,:) + sinking2(:,:,:) ) * zfact * tmask(:,:,:) ! Export of carbon in the water column 286 !$OMP END PARALLEL WORKSHARE 306 !$OMP DO schedule(static) private(jk, jj, ji) 307 DO jk = 1, jpk 308 DO jj = 1, jpj 309 DO ji = 1, jpi 310 zw3d(ji,jj,jk) = ( sinking(ji,jj,jk) + sinking2(ji,jj,jk) ) * zfact * tmask(ji,jj,jk) ! Export of carbon in the water column 311 END DO 312 END DO 313 END DO 287 314 CALL iom_put( "EXPC" , zw3d ) 288 315 ENDIF 289 316 IF( iom_use( "EXPFE" ) ) THEN 290 !$OMP PARALLEL WORKSHARE 291 zw3d(:,:,:) = ( sinkfer(:,:,:) + sinkfer2(:,:,:) ) * zfact * tmask(:,:,:) ! Export of iron 292 !$OMP END PARALLEL WORKSHARE 317 !$OMP DO schedule(static) private(jk, jj, ji) 318 DO jk = 1, jpk 319 DO jj = 1, jpj 320 DO ji = 1, jpi 321 zw3d(ji,jj,jk) = ( sinkfer(ji,jj,jk) + sinkfer2(ji,jj,jk) ) * zfact * tmask(ji,jj,jk) ! Export of iron 322 END DO 323 END DO 324 END DO 293 325 CALL iom_put( "EXPFE" , zw3d ) 294 326 ENDIF 295 327 IF( iom_use( "EXPCAL" ) ) THEN 296 !$OMP PARALLEL WORKSHARE 297 zw3d(:,:,:) = sinkcal(:,:,:) * zfact * tmask(:,:,:) ! Export of calcite 298 !$OMP END PARALLEL WORKSHARE 328 !$OMP DO schedule(static) private(jk, jj, ji) 329 DO jk = 1, jpk 330 DO jj = 1, jpj 331 DO ji = 1, jpi 332 zw3d(ji,jj,jk) = sinkcal(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of calcite 333 END DO 334 END DO 335 END DO 299 336 CALL iom_put( "EXPCAL" , zw3d ) 300 337 ENDIF 301 338 IF( iom_use( "EXPSI" ) ) THEN 302 !$OMP PARALLEL WORKSHARE 303 zw3d(:,:,:) = sinksil(:,:,:) * zfact * tmask(:,:,:) ! Export of bigenic silica 304 !$OMP END PARALLEL WORKSHARE 339 !$OMP DO schedule(static) private(jk, jj, ji) 340 DO jk = 1, jpk 341 DO jj = 1, jpj 342 DO ji = 1, jpi 343 zw3d(ji,jj,jk) = sinksil(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of bigenic silica 344 END DO 345 END DO 346 END DO 305 347 CALL iom_put( "EXPSI" , zw3d ) 306 348 ENDIF … … 313 355 IF( ln_diatrc ) THEN 314 356 zfact = 1.e3 * rfact2r 315 !$OMP PARALLEL WORKSHARE 316 trc2d(:,:,jp_pcs0_2d + 4) = sinking (:,:,ik100) * zfact * tmask(:,:,1) 317 trc2d(:,:,jp_pcs0_2d + 5) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) 318 trc2d(:,:,jp_pcs0_2d + 6) = sinkfer (:,:,ik100) * zfact * tmask(:,:,1) 319 trc2d(:,:,jp_pcs0_2d + 7) = sinkfer2(:,:,ik100) * zfact * tmask(:,:,1) 320 trc2d(:,:,jp_pcs0_2d + 8) = sinksil (:,:,ik100) * zfact * tmask(:,:,1) 321 trc2d(:,:,jp_pcs0_2d + 9) = sinkcal (:,:,ik100) * zfact * tmask(:,:,1) 322 !$OMP END PARALLEL WORKSHARE 357 !$OMP DO schedule(static) private(jj, ji) 358 DO jj = 1, jpj 359 DO ji = 1, jpi 360 trc2d(ji,jj,jp_pcs0_2d + 4) = sinking (ji,jj,ik100) * zfact * tmask(ji,jj,1) 361 trc2d(ji,jj,jp_pcs0_2d + 5) = sinking2(ji,jj,ik100) * zfact * tmask(ji,jj,1) 362 trc2d(ji,jj,jp_pcs0_2d + 6) = sinkfer (ji,jj,ik100) * zfact * tmask(ji,jj,1) 363 trc2d(ji,jj,jp_pcs0_2d + 7) = sinkfer2(ji,jj,ik100) * zfact * tmask(ji,jj,1) 364 trc2d(ji,jj,jp_pcs0_2d + 8) = sinksil (ji,jj,ik100) * zfact * tmask(ji,jj,1) 365 trc2d(ji,jj,jp_pcs0_2d + 9) = sinkcal (ji,jj,ik100) * zfact * tmask(ji,jj,1) 366 END DO 367 END DO 323 368 ENDIF 324 369 ENDIF … … 394 439 zval3 = 1. + xkr_eta 395 440 !$OMP PARALLEL 396 !$OMP WORKSHARE 397 znum3d(:,:,:) = 0.e0 398 !$OMP END WORKSHARE 441 !$OMP DO schedule(static) private(jk, jj, ji) 442 DO jk = 1, jpk 443 DO jj = 1, jpj 444 DO ji = 1, jpi 445 znum3d(ji,jj,jk) = 0.e0 446 END DO 447 END DO 448 END DO 399 449 ! Computation of the vertical sinking speed : Kriest et Evans, 2000 400 450 ! ----------------------------------------------------------------- … … 438 488 ! INITIALIZE TO ZERO ALL THE SINKING ARRAYS 439 489 ! ----------------------------------------- 440 !$OMP WORKSHARE 441 sinking (:,:,:) = 0.e0 442 sinking2(:,:,:) = 0.e0 443 sinkcal (:,:,:) = 0.e0 444 sinkfer (:,:,:) = 0.e0 445 sinksil (:,:,:) = 0.e0 446 !$OMP END WORKSHARE NOWAIT 490 !$OMP DO schedule(static) private(jk, jj, ji) 491 DO jk = 1, jpk 492 DO jj = 1, jpj 493 DO ji = 1, jpi 494 sinking (ji,jj,jk) = 0.e0 495 sinking2(ji,jj,jk) = 0.e0 496 sinkcal (ji,jj,jk) = 0.e0 497 sinkfer (ji,jj,jk) = 0.e0 498 sinksil (ji,jj,jk) = 0.e0 499 END DO 500 END DO 501 END DO 447 502 !$OMP END PARALLEL 448 503 ! Compute the sedimentation term using p4zsink2 for all the sinking particles … … 576 631 ! 577 632 IF( iom_use( "EPC100" ) ) THEN 578 !$OMP PARALLEL WORKSHARE 579 zw2d(:,:) = sinking(:,:,ik100) * zfact * tmask(:,:,1) ! Export of carbon at 100m 580 !$OMP END PARALLEL WORKSHARE 633 !$OMP DO schedule(static) private(jj, ji) 634 DO jj = 1, jpj 635 DO ji = 1, jpi 636 zw2d(ji,jj) = sinking(ji,jj,ik100) * zfact * tmask(ji,jj,1) ! Export of carbon at 100m 637 END DO 638 END DO 581 639 CALL iom_put( "EPC100" , zw2d ) 582 640 ENDIF 583 641 IF( iom_use( "EPN100" ) ) THEN 584 !$OMP PARALLEL WORKSHARE 585 zw2d(:,:) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) ! Export of number of aggregates ? 586 !$OMP END PARALLEL WORKSHARE 642 !$OMP DO schedule(static) private(jj, ji) 643 DO jj = 1, jpj 644 DO ji = 1, jpi 645 zw2d(ji,jj) = sinking2(ji,jj,ik100) * zfact * tmask(ji,jj,1) ! Export of number of aggregates ? 646 END DO 647 END DO 587 648 CALL iom_put( "EPN100" , zw2d ) 588 649 ENDIF 589 650 IF( iom_use( "EPCAL100" ) ) THEN 590 !$OMP PARALLEL WORKSHARE 591 zw2d(:,:) = sinkcal(:,:,ik100) * zfact * tmask(:,:,1) ! Export of calcite at 100m 592 !$OMP END PARALLEL WORKSHARE 651 !$OMP DO schedule(static) private(jj, ji) 652 DO jj = 1, jpj 653 DO ji = 1, jpi 654 zw2d(ji,jj) = sinkcal(ji,jj,ik100) * zfact * tmask(ji,jj,1) !Export of calcite at 100m 655 END DO 656 END DO 593 657 CALL iom_put( "EPCAL100" , zw2d ) 594 658 ENDIF 595 659 IF( iom_use( "EPSI100" ) ) THEN 596 !$OMP PARALLEL WORKSHARE 597 zw2d(:,:) = sinksil(:,:,ik100) * zfact * tmask(:,:,1) ! Export of bigenic silica at 100m 598 !$OMP END PARALLEL WORKSHARE 660 !$OMP DO schedule(static) private(jj, ji) 661 DO jj = 1, jpj 662 DO ji = 1, jpi 663 zw2d(ji,jj) = sinksil(ji,jj,ik100) * zfact * tmask(ji,jj,1) ! Export of bigenic silica at 100m 664 END DO 665 END DO 599 666 CALL iom_put( "EPSI100" , zw2d ) 600 667 ENDIF 601 668 IF( iom_use( "EXPC" ) ) THEN 602 !$OMP PARALLEL WORKSHARE 603 zw3d(:,:,:) = sinking(:,:,:) * zfact * tmask(:,:,:) ! Export of carbon in the water column 604 !$OMP END PARALLEL WORKSHARE 669 !$OMP DO schedule(static) private(jk, jj, ji) 670 DO jk = 1, jpk 671 DO jj = 1, jpj 672 DO ji = 1, jpi 673 zw3d(ji,jj,jk) = sinking(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of carbon in the water column 674 END DO 675 END DO 676 END DO 605 677 CALL iom_put( "EXPC" , zw3d ) 606 678 ENDIF 607 679 IF( iom_use( "EXPN" ) ) THEN 608 !$OMP PARALLEL WORKSHARE 609 zw3d(:,:,:) = sinking(:,:,:) * zfact * tmask(:,:,:) ! Export of carbon in the water column 610 !$OMP END PARALLEL WORKSHARE 680 !$OMP DO schedule(static) private(jk, jj, ji) 681 DO jk = 1, jpk 682 DO jj = 1, jpj 683 DO ji = 1, jpi 684 zw3d(ji,jj,jk) = sinking(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of carbon in the water column 685 END DO 686 END DO 687 END DO 611 688 CALL iom_put( "EXPN" , zw3d ) 612 689 ENDIF 613 690 IF( iom_use( "EXPCAL" ) ) THEN 614 !$OMP PARALLEL WORKSHARE 615 zw3d(:,:,:) = sinkcal(:,:,:) * zfact * tmask(:,:,:) ! Export of calcite 616 !$OMP END PARALLEL WORKSHARE 691 !$OMP DO schedule(static) private(jk, jj, ji) 692 DO jk = 1, jpk 693 DO jj = 1, jpj 694 DO ji = 1, jpi 695 zw3d(ji,jj,jk) = sinkcal(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of calcite 696 END DO 697 END DO 698 END DO 617 699 CALL iom_put( "EXPCAL" , zw3d ) 618 700 ENDIF 619 701 IF( iom_use( "EXPSI" ) ) THEN 620 !$OMP PARALLEL WORKSHARE 621 zw3d(:,:,:) = sinksil(:,:,:) * zfact * tmask(:,:,:) ! Export of bigenic silica 622 !$OMP END PARALLEL WORKSHARE 702 !$OMP DO schedule(static) private(jk, jj, ji) 703 DO jk = 1, jpk 704 DO jj = 1, jpj 705 DO ji = 1, jpi 706 zw3d(ji,jj,jk) = sinksil(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! Export of bigenic silica 707 END DO 708 END DO 709 END DO 623 710 CALL iom_put( "EXPSI" , zw3d ) 624 711 ENDIF 625 712 IF( iom_use( "XNUM" ) ) THEN 626 !$OMP PARALLEL WORKSHARE 627 zw3d(:,:,:) = znum3d(:,:,:) * tmask(:,:,:) ! Number of particles on aggregats 628 !$OMP END PARALLEL WORKSHARE 713 !$OMP DO schedule(static) private(jk, jj, ji) 714 DO jk = 1, jpk 715 DO jj = 1, jpj 716 DO ji = 1, jpi 717 zw3d(ji,jj,jk) = znum3d(ji,jj,jk) * tmask(ji,jj,jk) ! Number of particles on aggregats 718 END DO 719 END DO 720 END DO 629 721 CALL iom_put( "XNUM" , zw3d ) 630 722 ENDIF 631 723 IF( iom_use( "WSC" ) ) THEN 632 !$OMP PARALLEL WORKSHARE 633 zw3d(:,:,:) = wsbio3(:,:,:) * tmask(:,:,:) ! Sinking speed of carbon particles 634 !$OMP END PARALLEL WORKSHARE 724 !$OMP DO schedule(static) private(jk, jj, ji) 725 DO jk = 1, jpk 726 DO jj = 1, jpj 727 DO ji = 1, jpi 728 zw3d(ji,jj,jk) = wsbio3(ji,jj,jk) * tmask(ji,jj,jk) ! Sinking speed of carbon particles 729 END DO 730 END DO 731 END DO 635 732 CALL iom_put( "WSC" , zw3d ) 636 733 ENDIF 637 734 IF( iom_use( "WSN" ) ) THEN 638 !$OMP PARALLEL WORKSHARE 639 zw3d(:,:,:) = wsbio4(:,:,:) * tmask(:,:,:) ! Sinking speed of particles number 640 !$OMP END PARALLEL WORKSHARE 735 !$OMP DO schedule(static) private(jk, jj, ji) 736 DO jk = 1, jpk 737 DO jj = 1, jpj 738 DO ji = 1, jpi 739 zw3d(ji,jj,jk) = wsbio4(ji,jj,jk) * tmask(ji,jj,jk) ! Sinking speed of particles number 740 END DO 741 END DO 742 END DO 641 743 CALL iom_put( "WSN" , zw3d ) 642 744 ENDIF … … 647 749 IF( ln_diatrc ) THEN 648 750 zfact = 1.e3 * rfact2r 649 !$OMP PARALLEL WORKSHARE 650 trc2d(:,: ,jp_pcs0_2d + 4) = sinking (:,:,ik100) * zfact * tmask(:,:,1) 651 trc2d(:,: ,jp_pcs0_2d + 5) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) 652 trc2d(:,: ,jp_pcs0_2d + 6) = sinkfer (:,:,ik100) * zfact * tmask(:,:,1) 653 trc2d(:,: ,jp_pcs0_2d + 7) = sinksil (:,:,ik100) * zfact * tmask(:,:,1) 654 trc2d(:,: ,jp_pcs0_2d + 8) = sinkcal (:,:,ik100) * zfact * tmask(:,:,1) 655 trc3d(:,:,:,jp_pcs0_3d + 11) = sinking (:,:,:) * zfact * tmask(:,:,:) 656 trc3d(:,:,:,jp_pcs0_3d + 12) = sinking2(:,:,:) * zfact * tmask(:,:,:) 657 trc3d(:,:,:,jp_pcs0_3d + 13) = sinksil (:,:,:) * zfact * tmask(:,:,:) 658 trc3d(:,:,:,jp_pcs0_3d + 14) = sinkcal (:,:,:) * zfact * tmask(:,:,:) 659 trc3d(:,:,:,jp_pcs0_3d + 15) = znum3d (:,:,:) * tmask(:,:,:) 660 trc3d(:,:,:,jp_pcs0_3d + 16) = wsbio3 (:,:,:) * tmask(:,:,:) 661 trc3d(:,:,:,jp_pcs0_3d + 17) = wsbio4 (:,:,:) * tmask(:,:,:) 662 !$OMP END PARALLEL WORKSHARE 751 !$OMP PARALLEL 752 !$OMP DO schedule(static) private(jj, ji) 753 DO jj = 1, jpj 754 DO ji = 1, jpi 755 trc2d(ji,jj ,jp_pcs0_2d + 4) = sinking (ji,jj,ik100) * zfact * tmask(ji,jj,1) 756 trc2d(ji,jj ,jp_pcs0_2d + 5) = sinking2(ji,jj,ik100) * zfact * tmask(ji,jj,1) 757 trc2d(ji,jj ,jp_pcs0_2d + 6) = sinkfer (ji,jj,ik100) * zfact * tmask(ji,jj,1) 758 trc2d(ji,jj ,jp_pcs0_2d + 7) = sinksil (ji,jj,ik100) * zfact * tmask(ji,jj,1) 759 trc2d(ji,jj ,jp_pcs0_2d + 8) = sinkcal (ji,jj,ik100) * zfact * tmask(ji,jj,1) 760 END DO 761 END DO 762 !$OMP DO schedule(static) private(jk, jj, ji) 763 DO jk = 1, jpk 764 DO jj = 1, jpj 765 DO ji = 1, jpi 766 trc3d(ji,jj,jk,jp_pcs0_3d + 11) = sinking (ji,jj,jk) * zfact * tmask(ji,jj,jk) 767 trc3d(ji,jj,jk,jp_pcs0_3d + 12) = sinking2(ji,jj,jk) * zfact * tmask(ji,jj,jk) 768 trc3d(ji,jj,jk,jp_pcs0_3d + 13) = sinksil (ji,jj,jk) * zfact * tmask(ji,jj,jk) 769 trc3d(ji,jj,jk,jp_pcs0_3d + 14) = sinkcal (ji,jj,jk) * zfact * tmask(ji,jj,jk) 770 trc3d(ji,jj,jk,jp_pcs0_3d + 15) = znum3d (ji,jj,jk) * tmask(ji,jj,jk) 771 trc3d(ji,jj,jk,jp_pcs0_3d + 16) = wsbio3 (ji,jj,jk) * tmask(ji,jj,jk) 772 trc3d(ji,jj,jk,jp_pcs0_3d + 17) = wsbio4 (ji,jj,jk) * tmask(ji,jj,jk) 773 END DO 774 END DO 775 END DO 776 !$OMP END PARALLEL 663 777 ENDIF 664 778 ENDIF … … 890 1004 891 1005 IF( lk_degrad ) THEN 892 !$OMP PARALLEL WORKSHARE 893 zwsink2(:,:,:) = zwsink2(:,:,:) * facvol(:,:,:) 894 !$OMP END PARALLEL WORKSHARE 1006 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 1007 DO jk = 1, jpk 1008 DO jj = 1, jpj 1009 DO ji = 1, jpi 1010 zwsink2(ji,jj,jk) = zwsink2(ji,jj,jk) * facvol(ji,jj,jk) 1011 END DO 1012 END DO 1013 END DO 895 1014 ENDIF 896 1015 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsms.F90
r7037 r7508 143 143 ! 144 144 !$OMP PARALLEL 145 !$OMP WORKSHARE 146 xnegtr(:,:,:) = 1.e0 147 !$OMP END WORKSHARE 145 !$OMP DO schedule(static) private(jk, jj, ji) 146 DO jk = 1, jpk 147 DO jj = 1, jpj 148 DO ji = 1, jpi 149 xnegtr(ji,jj,jk) = 1.e0 150 END DO 151 END DO 152 END DO 148 153 DO jn = jp_pcs0, jp_pcs1 149 154 !$OMP DO schedule(static) private(jk, jj, ji, ztra) … … 406 411 CALL iom_get( numrtr, jpdom_autoglo, 'Silicamax' , xksimax(:,:) ) 407 412 ELSE 408 !$OMP PARALLEL WORKSHARE 409 xksimax(:,:) = xksi(:,:) 410 !$OMP END PARALLEL WORKSHARE 413 !$OMP PARALLEL DO schedule(static) private(jj, ji) 414 DO jj = 1, jpj 415 DO ji = 1, jpi 416 xksimax(ji,jj) = xksi(ji,jj) 417 END DO 418 END DO 411 419 ENDIF 412 420 ! … … 462 470 ! set total alkalinity, phosphate, nitrate & silicate 463 471 zarea = 1._wp / glob_sum( cvol(:,:,:) ) * 1e6 464 !$OMP PARALLEL 465 !$OMP DO schedule(static) private(jk,jj,ji)472 473 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 466 474 DO jk = 1, jpk 467 475 DO jj = 1, jpj … … 474 482 END DO 475 483 END DO 476 !$OMP SINGLE 484 477 485 zalksumn = glob_sum( zctrn_jptal(:,:,:) ) * zarea 478 486 zpo4sumn = glob_sum( zctrn_jppo4(:,:,:) ) * zarea * po4r 479 487 zno3sumn = glob_sum( zctrn_jppo3(:,:,:) ) * zarea * rno3 480 488 zsilsumn = glob_sum( zctrn_jpsil(:,:,:) ) * zarea 481 !$OMP END SINGLE 482 483 !$OMP DO schedule(static) private(jk,jj,ji) 489 490 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 484 491 DO jk = 1, jpk 485 492 DO jj = 1, jpj … … 492 499 END DO 493 500 END DO 494 !$OMP END DO NOWAIT495 !$OMP END PARALLEL496 501 497 502 IF(lwp) THEN … … 503 508 ! 504 509 IF( .NOT. ln_top_euler ) THEN 505 !$OMP PARALLEL 506 !$OMP DO schedule(static) private(jk,jj,ji) 510 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 507 511 DO jk = 1, jpk 508 512 DO jj = 1, jpj … … 515 519 END DO 516 520 END DO 517 !$OMP SINGLE 521 518 522 zalksumb = glob_sum( zctrb_jptal(:,:,:) ) * zarea 519 523 zpo4sumb = glob_sum( zctrb_jppo4(:,:,:) ) * zarea * po4r 520 524 zno3sumb = glob_sum( zctrb_jppo3(:,:,:) ) * zarea * rno3 521 zsilsumb = glob_sum( zctrb_jpsil(:,:,:) ) * zarea 522 !$OMP END SINGLE 523 524 !$OMP DO schedule(static) private(jk,jj,ji) 525 zsilsumb = glob_sum( zctrb_jpsil(:,:,:) ) * zarea 526 527 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 525 528 DO jk = 1, jpk 526 529 DO jj = 1, jpj … … 533 536 END DO 534 537 END DO 535 !$OMP END DO NOWAIT536 !$OMP END PARALLEL537 538 538 539 IF(lwp) THEN -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcadv.F90
r7037 r7508 79 79 INTEGER, INTENT(in) :: kt ! ocean time-step index 80 80 ! 81 INTEGER :: jk ! dummy loop index81 INTEGER :: jk, jj, ji ! dummy loop index 82 82 CHARACTER (len=22) :: charout 83 83 REAL(wp), POINTER, DIMENSION(:,:,:) :: zun, zvn, zwn ! effective velocity … … 102 102 ! 103 103 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! add z-tilde and/or vvl corrections 104 !$OMP PARALLEL WORKSHARE 105 zun(:,:,:) = zun(:,:,:) + un_td(:,:,:) 106 zvn(:,:,:) = zvn(:,:,:) + vn_td(:,:,:) 107 !$OMP END PARALLEL WORKSHARE 104 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 105 DO jk = 1, jpk 106 DO jj = 1, jpj 107 DO ji = 1, jpi 108 zun(ji,jj,jk) = zun(ji,jj,jk) + un_td(ji,jj,jk) 109 zvn(ji,jj,jk) = zvn(ji,jj,jk) + vn_td(ji,jj,jk) 110 END DO 111 END DO 112 END DO 108 113 ENDIF 109 114 ! … … 113 118 IF( ln_mle ) CALL tra_adv_mle( kt, nittrc000, zun, zvn, zwn, 'TRC' ) ! add the mle transport 114 119 ! 115 !$OMP PARALLEL WORKSHARE 116 zun(:,:,jpk) = 0._wp ! no transport trough the bottom 117 zvn(:,:,jpk) = 0._wp 118 zwn(:,:,jpk) = 0._wp 119 !$OMP END PARALLEL WORKSHARE 120 !$OMP PARALLEL DO schedule(static) private(jj,ji) 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 zun(ji,jj,jpk) = 0._wp ! no transport trough the bottom 124 zvn(ji,jj,jpk) = 0._wp 125 zwn(ji,jj,jpk) = 0._wp 126 END DO 127 END DO 120 128 ! 121 129 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcbbl.F90
r7037 r7508 61 61 IF( l_trdtrc ) THEN 62 62 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrtrd ) ! temporary save of trends 63 !$OMP PARALLEL WORKSHARE 64 ztrtrd(:,:,:,:) = tra(:,:,:,:) 65 !$OMP END PARALLEL WORKSHARE 63 DO jn = 1, jptra 64 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 65 DO jk = 1, jpk 66 DO jj = 1, jpj 67 DO ji = 1, jpi 68 ztrtrd(ji,jj,jk,jn) = tra(ji,jj,jk,jn) 69 END DO 70 END DO 71 END DO 72 END DO 66 73 ENDIF 67 74 … … 90 97 IF( l_trdtrc ) THEN ! save the horizontal diffusive trends for further diagnostics 91 98 DO jn = 1, jptra 92 !$OMP PARALLEL WORKSHARE 93 ztrtrd(:,:,:,jn) = tra(:,:,:,jn) - ztrtrd(:,:,:,jn) 94 !$OMP END PARALLEL WORKSHARE 99 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 100 DO jk = 1, jpk 101 DO jj = 1, jpj 102 DO ji = 1, jpi 103 ztrtrd(ji,jj,jk,jn) = tra(ji,jj,jk,jn) - ztrtrd(ji,jj,jk,jn) 104 END DO 105 END DO 106 END DO 95 107 CALL trd_tra( kt, 'TRC', jn, jptra_bbl, ztrtrd(:,:,:,jn) ) 96 108 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcldf.F90
r7037 r7508 76 76 IF( l_trdtrc ) THEN 77 77 CALL wrk_alloc( jpi,jpj,jpk,jptra, ztrtrd ) 78 !$OMP PARALLEL WORKSHARE 79 ztrtrd(:,:,:,:) = tra(:,:,:,:) 80 !$OMP END PARALLEL WORKSHARE 78 DO jn = 1, jptra 79 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 80 DO jk = 1, jpk 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 ztrtrd(ji,jj,jk,jn) = tra(ji,jj,jk,jn) 84 END DO 85 END DO 86 END DO 87 END DO 81 88 ENDIF 82 89 ! !* set the lateral diffusivity coef. for passive tracer 83 90 CALL wrk_alloc( jpi,jpj,jpk, zahu, zahv ) 84 91 !$OMP PARALLEL 85 !$OMP WORKSHARE 86 zahu(:,:,:) = rldf * ahtu(:,:,:) 87 zahv(:,:,:) = rldf * ahtv(:,:,:) 88 !$OMP END WORKSHARE 92 !$OMP DO schedule(static) private(jk,jj,ji) 93 DO jk = 1, jpk 94 DO jj = 1, jpj 95 DO ji = 1, jpi 96 zahu(ji,jj,jk) = rldf * ahtu(ji,jj,jk) 97 zahv(ji,jj,jk) = rldf * ahtv(ji,jj,jk) 98 END DO 99 END DO 100 END DO 89 101 ! !* Enhanced zonal diffusivity coefficent in the equatorial domain 90 102 !$OMP DO schedule(static) private(jk,jj,ji,zdep) … … 120 132 IF( l_trdtrc ) THEN ! send the trends for further diagnostics 121 133 DO jn = 1, jptra 122 !$OMP PARALLEL WORKSHARE 123 ztrtrd(:,:,:,jn) = tra(:,:,:,jn) - ztrtrd(:,:,:,jn) 124 !$OMP END PARALLEL WORKSHARE 134 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 135 DO jk = 1, jpk 136 DO jj = 1, jpj 137 DO ji = 1, jpi 138 ztrtrd(ji,jj,jk,jn) = tra(ji,jj,jk,jn) - ztrtrd(ji,jj,jk,jn) 139 END DO 140 END DO 141 END DO 125 142 CALL trd_tra( kt, 'TRC', jn, jptra_ldf, ztrtrd(:,:,:,jn) ) 126 143 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcnxt.F90
r7037 r7508 108 108 IF( l_trdtrc ) THEN ! trends: store now fields before the Asselin filter application 109 109 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrdt ) 110 !$OMP PARALLEL WORKSHARE 111 ztrdt(:,:,:,:) = trn(:,:,:,:) 112 !$OMP END PARALLEL WORKSHARE 110 DO jn = 1, jptra 111 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 112 DO jk = 1, jpk 113 DO jj = 1, jpj 114 DO ji = 1, jpi 115 ztrdt(ji,jj,jk,jn) = trn(ji,jj,jk,jn) 116 END DO 117 END DO 118 END DO 119 END DO 113 120 ENDIF 114 121 ! ! Leap-Frog + Asselin filter time stepping -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcrad.F90
r7037 r7508 155 155 156 156 IF( l_trdtrc ) THEN 157 !$OMP PARALLEL WORKSHARE 158 ztrtrdb(:,:,:) = ptrb(:,:,:,jn) ! save input trb for trend computation 159 ztrtrdn(:,:,:) = ptrn(:,:,:,jn) 160 !$OMP END PARALLEL WORKSHARE 157 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 158 DO jk = 1, jpk 159 DO jj = 1, jpj 160 DO ji = 1, jpi 161 ztrtrdb(ji,jj,jk) = ptrb(ji,jj,jk,jn) ! save input trb for trend computation 162 ztrtrdn(ji,jj,jk) = ptrn(ji,jj,jk,jn) 163 END DO 164 END DO 165 END DO 161 166 ENDIF 162 167 ! ! sum over the global domain … … 198 203 ! 199 204 zs2rdt = 1. / ( 2. * rdt ) 200 !$OMP PARALLEL WORKSHARE 201 ztrtrdb(:,:,:) = ( ptrb(:,:,:,jn) - ztrtrdb(:,:,:) ) * zs2rdt 202 ztrtrdn(:,:,:) = ( ptrn(:,:,:,jn) - ztrtrdn(:,:,:) ) * zs2rdt 203 !$OMP END PARALLEL WORKSHARE 205 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 206 DO jk = 1, jpk 207 DO jj = 1, jpj 208 DO ji = 1, jpi 209 ztrtrdb(ji,jj,jk) = ( ptrb(ji,jj,jk,jn) - ztrtrdb(ji,jj,jk) ) * zs2rdt 210 ztrtrdn(ji,jj,jk) = ( ptrn(ji,jj,jk,jn) - ztrtrdn(ji,jj,jk) ) * zs2rdt 211 END DO 212 END DO 213 END DO 204 214 205 215 CALL trd_tra( kt, 'TRC', jn, jptra_radb, ztrtrdb ) ! Asselin-like trend handling … … 216 226 217 227 IF( l_trdtrc ) THEN 218 !$OMP PARALLEL 219 !$OMP WORKSHARE 220 ztrtrdb(:,:,:) = ptrb(:,:,:,jn) ! save input trb for trend computation 221 ztrtrdn(:,:,:) = ptrn(:,:,:,jn) ! save input trn for trend computation 222 !$OMP END WORKSHARE NOWAIT 223 224 !$OMP DO schedule(static) private(jk,jj,ji) 225 DO jk = 1, jpkm1 226 DO jj = 1, jpj 227 DO ji = 1, jpi 228 ptrn(ji,jj,jk,jn) = MAX( 0. , ptrn(ji,jj,jk,jn) ) 229 ptrb(ji,jj,jk,jn) = MAX( 0. , ptrb(ji,jj,jk,jn) ) 230 END DO 231 END DO 232 END DO 228 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 229 DO jk = 1, jpk 230 DO jj = 1, jpj 231 DO ji = 1, jpi 232 ztrtrdb(ji,jj,jk) = ptrb(ji,jj,jk,jn) ! save input trb for trend computation 233 ztrtrdn(ji,jj,jk) = ptrn(ji,jj,jk,jn) 234 END DO 235 END DO 236 END DO 237 END IF 238 239 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 240 DO jk = 1, jpkm1 241 DO jj = 1, jpj 242 DO ji = 1, jpi 243 ptrn(ji,jj,jk,jn) = MAX( 0. , ptrn(ji,jj,jk,jn) ) 244 ptrb(ji,jj,jk,jn) = MAX( 0. , ptrb(ji,jj,jk,jn) ) 245 END DO 246 END DO 247 END DO 248 249 IF( l_trdtrc ) THEN 233 250 ! 234 251 zs2rdt = 1. / ( 2. * rdt * FLOAT( nn_dttrc ) ) 235 !$OMP WORKSHARE 236 ztrtrdb(:,:,:) = ( ptrb(:,:,:,jn) - ztrtrdb(:,:,:) ) * zs2rdt 237 ztrtrdn(:,:,:) = ( ptrn(:,:,:,jn) - ztrtrdn(:,:,:) ) * zs2rdt 238 !$OMP END WORKSHARE NOWAIT 239 !$OMP END PARALLEL 252 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 253 DO jk = 1, jpk 254 DO jj = 1, jpj 255 DO ji = 1, jpi 256 ztrtrdb(ji,jj,jk) = ( ptrb(ji,jj,jk,jn) - ztrtrdb(ji,jj,jk) ) * zs2rdt 257 ztrtrdn(ji,jj,jk) = ( ptrn(ji,jj,jk,jn) - ztrtrdn(ji,jj,jk) ) * zs2rdt 258 END DO 259 END DO 260 END DO 240 261 CALL trd_tra( kt, 'TRC', jn, jptra_radb, ztrtrdb ) ! Asselin-like trend handling 241 262 CALL trd_tra( kt, 'TRC', jn, jptra_radn, ztrtrdn ) ! standard trend handling 242 263 ! 243 ELSE 244 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 245 DO jk = 1, jpkm1 246 DO jj = 1, jpj 247 DO ji = 1, jpi 248 ptrn(ji,jj,jk,jn) = MAX( 0. , ptrn(ji,jj,jk,jn) ) 249 ptrb(ji,jj,jk,jn) = MAX( 0. , ptrb(ji,jj,jk,jn) ) 250 END DO 251 END DO 252 END DO 253 ENDIF 254 ! 264 END IF 255 265 ENDDO 256 266 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcsbc.F90
r7037 r7508 61 61 INTEGER, INTENT( in ) :: kt ! ocean time-step index 62 62 ! 63 INTEGER :: ji, jj, j n ! dummy loop indices63 INTEGER :: ji, jj, jk, jn ! dummy loop indices 64 64 REAL(wp) :: zse3t, zrtrn, zratio, zfact ! temporary scalars 65 65 REAL(wp) :: zswitch, zftra, zcd, zdtra, ztfx, ztra ! temporary scalars … … 109 109 ELSE ! No restart or restart not found: Euler forward time stepping 110 110 zfact = 1._wp 111 !$OMP PARALLEL WORKSHARE 112 sbc_trc_b(:,:,:) = 0._wp 113 !$OMP END PARALLEL WORKSHARE 111 DO jn = 1, jptra 112 !$OMP PARALLEL DO schedule(static) private(jj,ji) 113 DO jj = 1, jpj 114 DO ji = 1, jpi 115 sbc_trc_b(ji,jj,jn) = 0._wp 116 END DO 117 END DO 118 END DO 114 119 ENDIF 115 120 ELSE ! Swap of forcing fields 116 121 IF( ln_top_euler ) THEN 117 122 zfact = 1._wp 118 !$OMP PARALLEL WORKSHARE 119 sbc_trc_b(:,:,:) = 0._wp 120 !$OMP END PARALLEL WORKSHARE 123 DO jn = 1, jptra 124 !$OMP PARALLEL DO schedule(static) private(jj,ji) 125 DO jj = 1, jpj 126 DO ji = 1, jpi 127 sbc_trc_b(ji,jj,jn) = 0._wp 128 END DO 129 END DO 130 END DO 121 131 ELSE 122 132 zfact = 0.5_wp 123 !$OMP PARALLEL WORKSHARE 124 sbc_trc_b(:,:,:) = sbc_trc(:,:,:) 125 !$OMP END PARALLEL WORKSHARE 133 DO jn = 1, jptra 134 !$OMP PARALLEL DO schedule(static) private(jj,ji) 135 DO jj = 1, jpj 136 DO ji = 1, jpi 137 sbc_trc_b(ji,jj,jn) = sbc_trc(ji,jj,jn) 138 END DO 139 END DO 140 END DO 126 141 ENDIF 127 142 ! … … 133 148 ! 134 149 IF( .NOT. lk_offline .AND. .NOT.ln_linssh ) THEN ! online coupling with vvl 135 !$OMP PARALLEL WORKSHARE 136 zsfx(:,:) = 0._wp 137 !$OMP END PARALLEL WORKSHARE 150 !$OMP PARALLEL DO schedule(static) private(jj,ji) 151 DO jj = 1, jpj 152 DO ji = 1, jpi 153 zsfx(ji,jj) = 0._wp 154 END DO 155 END DO 138 156 ELSE ! online coupling free surface or offline with free surface 139 !$OMP PARALLEL WORKSHARE 140 zsfx(:,:) = emp(:,:) 141 !$OMP END PARALLEL WORKSHARE 157 !$OMP PARALLEL DO schedule(static) private(jj,ji) 158 DO jj = 1, jpj 159 DO ji = 1, jpi 160 zsfx(ji,jj) = emp(ji,jj) 161 END DO 162 END DO 142 163 ENDIF 143 164 … … 146 167 ! 147 168 IF( l_trdtrc ) THEN 148 !$OMP PARALLEL WORKSHARE 149 ztrtrd(:,:,:) = tra(:,:,:,jn) ! save trends 150 !$OMP END PARALLEL WORKSHARE 169 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 170 DO jk = 1, jpk 171 DO jj = 1, jpj 172 DO ji = 1, jpi 173 ztrtrd(ji,jj,jk) = tra(ji,jj,jk,jn) ! save trends 174 END DO 175 END DO 176 END DO ! online coupling free surface or offline with free surface 151 177 END IF 152 178 IF ( nn_ice_tr == -1 ) THEN ! No tracers in sea ice (null concentration in sea ice) … … 193 219 ! 194 220 IF( l_trdtrc ) THEN 195 !$OMP PARALLEL WORKSHARE 196 ztrtrd(:,:,:) = tra(:,:,:,jn) - ztrtrd(:,:,:) 197 !$OMP END PARALLEL WORKSHARE 221 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 222 DO jk = 1, jpk 223 DO jj = 1, jpj 224 DO ji = 1, jpi 225 ztrtrd(ji,jj,jk) = tra(ji,jj,jk,jn) - ztrtrd(ji,jj,jk) 226 END DO 227 END DO 228 END DO ! online coupling free surface or offline with free surface 198 229 CALL trd_tra( kt, 'TRC', jn, jptra_nsr, ztrtrd ) 199 230 END IF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trczdf.F90
r7037 r7508 56 56 INTEGER, INTENT( in ) :: kt ! ocean time-step index 57 57 ! 58 INTEGER :: jk, jn 58 INTEGER :: jk, jn, jj, ji 59 59 CHARACTER (len=22) :: charout 60 60 REAL(wp), POINTER, DIMENSION(:,:,:,:) :: ztrtrd ! 4D workspace … … 71 71 IF( l_trdtrc ) THEN 72 72 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrtrd ) 73 !$OMP PARALLEL WORKSHARE 74 ztrtrd(:,:,:,:) = tra(:,:,:,:) 75 !$OMP END PARALLEL WORKSHARE 73 DO jn = 1, jptra 74 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 75 DO jk = 1, jpk 76 DO jj = 1, jpj 77 DO ji = 1, jpi 78 ztrtrd(ji,jj,jk,jn) = tra(ji,jj,jk,jn) 79 END DO 80 END DO 81 END DO 82 END DO 76 83 ENDIF 77 84 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/trcini.F90
r7037 r7508 113 113 !! ** Purpose : passive tracers inventories at initialsation phase 114 114 !!---------------------------------------------------------------------- 115 INTEGER :: jk, jn ! dummy loop indices115 INTEGER :: jk, jn, jj, ji ! dummy loop indices 116 116 CHARACTER (len=25) :: charout 117 117 !!---------------------------------------------------------------------- … … 122 122 END DO 123 123 IF( lk_degrad ) THEN 124 !$OMP PARALLEL WORKSHARE 125 cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 126 !$OMP END PARALLEL WORKSHARE 124 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 125 DO jk = 1, jpk 126 DO jj = 1, jpj 127 DO ji = 1, jpi 128 cvol(ji,jj,jk) = cvol(ji,jj,jk) * facvol(ji,jj,jk) ! degrad option: reduction by facvol 129 END DO 130 END DO 131 END DO 127 132 END IF 128 133 ! ! total volume of the ocean … … 208 213 USE trcdta ! initialisation from files 209 214 ! 210 INTEGER :: jk, jn, jl ! dummy loop indices215 INTEGER :: jk, jn, jl, jj, ji ! dummy loop indices 211 216 !!---------------------------------------------------------------------- 212 217 ! … … 229 234 jl = n_trc_index(jn) 230 235 CALL trc_dta( nit000, sf_trcdta(jl) ) ! read tracer data at nit000 231 !$OMP PARALLEL WORKSHARE 232 trn(:,:,:,jn) = sf_trcdta(jl)%fnow(:,:,:) * tmask(:,:,:) * rf_trfac(jl) 233 !$OMP END PARALLEL WORKSHARE 236 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 237 DO jk = 1, jpk 238 DO jj = 1, jpj 239 DO ji = 1, jpi 240 trn(ji,jj,jk,jn) = sf_trcdta(jl)%fnow(ji,jj,jk) * tmask(ji,jj,jk) * rf_trfac(jl) 241 END DO 242 END DO 243 END DO 234 244 ! 235 245 IF( .NOT.ln_trcdmp .AND. .NOT.ln_trcdmp_clo ) THEN !== deallocate data structure ==! … … 245 255 ENDIF 246 256 ! 247 !$OMP PARALLEL WORKSHARE 248 trb(:,:,:,:) = trn(:,:,:,:) 249 !$OMP END PARALLEL WORKSHARE 257 DO jn = 1, jptra 258 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 259 DO jk = 1, jpk 260 DO jj = 1, jpj 261 DO ji = 1, jpi 262 trb(ji,jj,jk,jn) = trn(ji,jj,jk,jn) 263 END DO 264 END DO 265 END DO 266 END DO 250 267 ! 251 268 ENDIF 252 269 253 !$OMP PARALLEL WORKSHARE 254 tra(:,:,:,:) = 0._wp 255 !$OMP END PARALLEL WORKSHARE 270 DO jn = 1, jptra 271 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 272 DO jk = 1, jpk 273 DO jj = 1, jpj 274 DO ji = 1, jpi 275 tra(ji,jj,jk,jn) = 0._wp 276 END DO 277 END DO 278 END DO 279 END DO 256 280 ! ! Partial top/bottom cell: GRADh(trn) 257 281 END SUBROUTINE trc_ini_state -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/trcstp.F90
r7037 r7508 73 73 END DO 74 74 IF( lk_degrad ) THEN 75 !$OMP WORKSHARE 76 cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 77 !$OMP END WORKSHARE NOWAIT 75 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 76 DO jk = 1, jpk 77 DO jj = 1, jpj 78 DO ji = 1, jpi 79 cvol(ji,jj,jk) = cvol(ji,jj,jk) * facvol(ji,jj,jk) ! degrad option: reduction by facvol 80 END DO 81 END DO 82 END DO 78 83 END IF 79 84 !$OMP END PARALLEL … … 92 97 ENDIF 93 98 ! 94 !$OMP PARALLEL WORKSHARE 95 tra(:,:,:,:) = 0.e0 96 !$OMP END PARALLEL WORKSHARE 99 DO jn = 1, jptra 100 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 101 DO jk = 1, jpk 102 DO jj = 1, jpj 103 DO ji = 1, jpi 104 tra(ji,jj,jk,jn) = 0._wp 105 END DO 106 END DO 107 END DO 108 END DO 97 109 ! 98 110 CALL trc_rst_opn ( kt ) ! Open tracer restart file … … 172 184 ALLOCATE( qsr_arr(jpi,jpj,nb_rec_per_days ) ) 173 185 !$OMP PARALLEL 174 !$OMP DO schedule(static) private(jn, ji, jj)175 186 DO jn = 1, nb_rec_per_days 187 !$OMP DO schedule(static) private(ji, jj) 176 188 DO jj = 1, jpj 177 189 DO ji = 1, jpi … … 209 221 END DO 210 222 END DO 211 !$OMP WORKSHARE 212 qsr_arr (:,:,nb_rec_per_days) = qsr(:,:) 213 !$OMP END WORKSHARE 214 qsr_mean(:,: ) = SUM( qsr_arr(:,:,:), 3 ) / nb_rec_per_days 223 !$OMP DO schedule(static) private(ji, jj) 224 DO jj = 1, jpj 225 DO ji = 1, jpi 226 qsr_arr (ji,jj,nb_rec_per_days) = qsr(ji,jj) 227 END DO 228 END DO 215 229 !$OMP END PARALLEL 230 qsr_mean(:,: ) = SUM( qsr_arr(:,:,:), 3 ) / nb_rec_per_days 216 231 ENDIF 217 232 !
Note: See TracChangeset
for help on using the changeset viewer.