Changeset 6748 for branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC
- Timestamp:
- 2016-06-28T11:53:56+02:00 (8 years ago)
- Location:
- branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC
- Files:
-
- 51 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/dia25h.F90
r6140 r6748 134 134 ! ------------------------- ! 135 135 cnt_25h = 1 ! sets the first value of sum at timestep 1 (note - should strictly be at timestep zero so before values used where possible) 136 !$OMP PARALLEL WORKSHARE 136 137 tn_25h(:,:,:) = tsb(:,:,:,jp_tem) 137 138 sn_25h(:,:,:) = tsb(:,:,:,jp_sal) … … 148 149 rmxln_25h(:,:,:) = mxln(:,:,:) 149 150 #endif 151 !$OMP END PARALLEL WORKSHARE 150 152 #if defined key_lim3 || defined key_lim2 151 153 CALL ctl_stop('STOP', 'dia_25h not setup yet to do tidemean ice') -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diacfl.F90
r6140 r6748 181 181 182 182 ALLOCATE( zcu_cfl(jpi, jpj, jpk), zcv_cfl(jpi, jpj, jpk), zcw_cfl(jpi, jpj, jpk) ) 183 183 !$OMP PARALLEL WORKSHARE 184 184 zcu_cfl(:,:,:)=0.0 185 185 zcv_cfl(:,:,:)=0.0 186 186 zcw_cfl(:,:,:)=0.0 187 187 !$OMP END PARALLEL WORKSHARE 188 188 IF( lwp ) THEN 189 189 WRITE(numout,*) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diafwb.F90
r6140 r6748 75 75 IF( lk_mpp ) CALL mpp_sum( a_sshb ) ! sum over the global domain 76 76 77 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei,a_salb) 77 78 DO jk = 1, jpkm1 78 79 DO jj = 2, jpjm1 … … 101 102 IF( lk_mpp ) CALL mpp_sum( zarea ) ! sum over the global domain 102 103 104 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei,a_saln,zvol) 103 105 DO jk = 1, jpkm1 104 106 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diawri.F90
r6387 r6748 157 157 CALL iom_put( "sst", tsn(:,:,1,jp_tem) ) ! surface temperature 158 158 IF ( iom_use("sbt") ) THEN 159 !$OMP PARALLEL DO schedule(static) private(jj, ji, jkbot) 159 160 DO jj = 1, jpj 160 161 DO ji = 1, jpi … … 169 170 CALL iom_put( "sss", tsn(:,:,1,jp_sal) ) ! surface salinity 170 171 IF ( iom_use("sbs") ) THEN 172 !$OMP PARALLEL DO schedule(static) private(jj, ji, jkbot) 171 173 DO jj = 1, jpj 172 174 DO ji = 1, jpi … … 180 182 IF ( iom_use("taubot") ) THEN ! bottom stress 181 183 z2d(:,:) = 0._wp 184 !$OMP PARALLEL DO schedule(static) private(jj, ji, zztmpx,zztmpy) 182 185 DO jj = 2, jpjm1 183 186 DO ji = fs_2, fs_jpim1 ! vector opt. … … 197 200 CALL iom_put( "ssu", un(:,:,1) ) ! surface i-current 198 201 IF ( iom_use("sbu") ) THEN 202 !$OMP PARALLEL DO schedule(static) private(jj, ji, jkbot) 199 203 DO jj = 1, jpj 200 204 DO ji = 1, jpi … … 209 213 CALL iom_put( "ssv", vn(:,:,1) ) ! surface j-current 210 214 IF ( iom_use("sbv") ) THEN 215 !$OMP PARALLEL DO schedule(static) private(jj, ji,jkbot) 211 216 DO jj = 1, jpj 212 217 DO ji = 1, jpi … … 222 227 ! Caution: in the VVL case, it only correponds to the baroclinic mass transport. 223 228 z2d(:,:) = rau0 * e1e2t(:,:) 229 !$OMP PARALLEL DO schedule(static) private(jk) 224 230 DO jk = 1, jpk 225 z3d(:,:,jk) = wn(:,:,jk) * z2d(:,:) 231 z3d(:,:,jk) = wn(:,:,jk) * z2d(:,:) 226 232 END DO 227 233 CALL iom_put( "w_masstr" , z3d ) … … 237 243 238 244 IF ( iom_use("sstgrad") .OR. iom_use("sstgrad2") ) THEN 245 !$OMP PARALLEL DO schedule(static) private(jj, ji, zztmp, zztmpx, zztmpy) 239 246 DO jj = 2, jpjm1 ! sst gradient 240 247 DO ji = fs_2, fs_jpim1 ! vector opt. … … 255 262 IF( iom_use("heatc") ) THEN 256 263 z2d(:,:) = 0._wp 264 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 257 265 DO jk = 1, jpkm1 258 266 DO jj = 1, jpj … … 267 275 IF( iom_use("saltc") ) THEN 268 276 z2d(:,:) = 0._wp 277 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 269 278 DO jk = 1, jpkm1 270 279 DO jj = 1, jpj … … 279 288 IF ( iom_use("eken") ) THEN 280 289 rke(:,:,jk) = 0._wp ! kinetic energy 290 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zztmp, zztmpx, zztmpy) 281 291 DO jk = 1, jpkm1 282 292 DO jj = 2, jpjm1 … … 304 314 IF( iom_use("u_masstr") .OR. iom_use("u_heattr") .OR. iom_use("u_salttr") ) THEN 305 315 z3d(:,:,jpk) = 0.e0 316 !$OMP PARALLEL DO schedule(static) private(jk) 306 317 DO jk = 1, jpkm1 307 318 z3d(:,:,jk) = rau0 * un(:,:,jk) * e2u(:,:) * e3u_n(:,:,jk) * umask(:,:,jk) … … 312 323 IF( iom_use("u_heattr") ) THEN 313 324 z2d(:,:) = 0.e0 325 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 314 326 DO jk = 1, jpkm1 315 327 DO jj = 2, jpjm1 … … 325 337 IF( iom_use("u_salttr") ) THEN 326 338 z2d(:,:) = 0.e0 339 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 327 340 DO jk = 1, jpkm1 328 341 DO jj = 2, jpjm1 … … 339 352 IF( iom_use("v_masstr") .OR. iom_use("v_heattr") .OR. iom_use("v_salttr") ) THEN 340 353 z3d(:,:,jpk) = 0.e0 354 !$OMP PARALLEL DO schedule(static) private(jk) 341 355 DO jk = 1, jpkm1 342 356 z3d(:,:,jk) = rau0 * vn(:,:,jk) * e1v(:,:) * e3v_n(:,:,jk) * vmask(:,:,jk) … … 347 361 IF( iom_use("v_heattr") ) THEN 348 362 z2d(:,:) = 0.e0 363 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 349 364 DO jk = 1, jpkm1 350 365 DO jj = 2, jpjm1 … … 360 375 IF( iom_use("v_salttr") ) THEN 361 376 z2d(:,:) = 0.e0 377 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 362 378 DO jk = 1, jpkm1 363 379 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/closea.F90
r5836 r6748 446 446 ! 447 447 DO jc = 1, jpncs 448 !$OMP PARALLEL DO schedule(static) private(jj, ji) 448 449 DO jj = ncsj1(jc), ncsj2(jc) 449 450 DO ji = ncsi1(jc), ncsi2(jc) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domain.F90
r6140 r6748 70 70 !! - 1D configuration, move Coriolis, u and v at T-point 71 71 !!---------------------------------------------------------------------- 72 INTEGER :: jk ! dummy loop indices72 INTEGER :: jk, jj, ji ! dummy loop indices 73 73 INTEGER :: iconf = 0 ! local integers 74 74 REAL(wp), POINTER, DIMENSION(:,:) :: z1_hu_0, z1_hv_0 … … 92 92 IF( ln_sco ) CALL dom_stiff ! Maximum stiffness ratio/hydrostatic consistency 93 93 ! 94 !$OMP PARALLEL WORKSHARE 94 95 ht_0(:,:) = e3t_0(:,:,1) * tmask(:,:,1) ! Reference ocean thickness 95 96 hu_0(:,:) = e3u_0(:,:,1) * umask(:,:,1) 96 97 hv_0(:,:) = e3v_0(:,:,1) * vmask(:,:,1) 98 !$OMP END PARALLEL WORKSHARE 97 99 DO jk = 2, jpk 98 ht_0(:,:) = ht_0(:,:) + e3t_0(:,:,jk) * tmask(:,:,jk) 99 hu_0(:,:) = hu_0(:,:) + e3u_0(:,:,jk) * umask(:,:,jk) 100 hv_0(:,:) = hv_0(:,:) + e3v_0(:,:,jk) * vmask(:,:,jk) 100 !$OMP PARALLEL DO schedule(static) private(jj,ji) 101 DO jj =1, jpj 102 DO ji=1, jpi 103 ht_0(ji,jj) = ht_0(ji,jj) + e3t_0(ji,jj,jk) * tmask(ji,jj,jk) 104 hu_0(ji,jj) = hu_0(ji,jj) + e3u_0(ji,jj,jk) * umask(ji,jj,jk) 105 hv_0(ji,jj) = hv_0(ji,jj) + e3v_0(ji,jj,jk) * vmask(ji,jj,jk) 106 END DO 107 END DO 101 108 END DO 102 109 ! … … 119 126 CALL wrk_alloc( jpi,jpj, z1_hu_0, z1_hv_0 ) 120 127 ! 128 !$OMP PARALLEL WORKSHARE 121 129 z1_hu_0(:,:) = ssumask(:,:) / ( hu_0(:,:) + 1._wp - ssumask(:,:) ) ! _i mask due to ISF 122 130 z1_hv_0(:,:) = ssvmask(:,:) / ( hv_0(:,:) + 1._wp - ssvmask(:,:) ) 131 !$OMP END PARALLEL WORKSHARE 123 132 ! 124 133 ! before ! now ! after ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domhgr.F90
r6140 r6748 140 140 IF( ie1e2u_v == 0 ) THEN ! e1e2u and e1e2v have not been read: compute them 141 141 ! ! e2u and e1v does not include a reduction in some strait: apply reduction 142 !$OMP PARALLEL WORKSHARE 142 143 e1e2u (:,:) = e1u(:,:) * e2u(:,:) 143 144 e1e2v (:,:) = e1v(:,:) * e2v(:,:) 145 !$OMP END PARALLEL WORKSHARE 144 146 ENDIF 145 147 ! … … 150 152 IF(lwp) WRITE(numout,*) ' given by ppe1_deg and ppe2_deg' 151 153 ! 154 !$OMP PARALLEL DO schedule(static) private(jj, ji, zti, zui, zvi, zfi, ztj, zuj, zvj, zfj) 152 155 DO jj = 1, jpj 153 156 DO ji = 1, jpi … … 200 203 ENDIF 201 204 #endif 205 !$OMP PARALLEL DO schedule(static) private(jj, ji) 202 206 DO jj = 1, jpj 203 207 DO ji = 1, jpi … … 216 220 ! Horizontal scale factors (in meters) 217 221 ! ====== 222 !$OMP PARALLEL WORKSHARE 218 223 e1t(:,:) = ppe1_m ; e2t(:,:) = ppe2_m 219 224 e1u(:,:) = ppe1_m ; e2u(:,:) = ppe2_m 220 225 e1v(:,:) = ppe1_m ; e2v(:,:) = ppe2_m 221 226 e1f(:,:) = ppe1_m ; e2f(:,:) = ppe2_m 227 !$OMP END PARALLEL WORKSHARE 222 228 ! 223 229 CASE ( 4 ) !== geographical mesh on the sphere, isotropic MERCATOR type ==! … … 238 244 IF(lwp) WRITE(numout,*) ' Index of the equator on the MERCATOR grid:', ijeq 239 245 ! 246 !$OMP PARALLEL DO schedule(static) private(jj, ji, zti, zui, zvi, zfi, ztj, zuj, zvj, zfj) 240 247 DO jj = 1, jpj 241 248 DO ji = 1, jpi … … 296 303 ENDIF 297 304 ! 305 !$OMP PARALLEL DO schedule(static) private(jj, ji, zim1, zjm1) 298 306 DO jj = 1, jpj 299 307 DO ji = 1, jpi … … 317 325 ! Horizontal scale factors (in meters) 318 326 ! ====== 327 !$OMP PARALLEL WORKSHARE 319 328 e1t(:,:) = ze1 ; e2t(:,:) = ze1 320 329 e1u(:,:) = ze1 ; e2u(:,:) = ze1 321 330 e1v(:,:) = ze1 ; e2v(:,:) = ze1 322 331 e1f(:,:) = ze1 ; e2f(:,:) = ze1 332 !$OMP END PARALLEL WORKSHARE 323 333 ! 324 334 CASE DEFAULT … … 331 341 ! ----------------------------- 332 342 ! 343 !$OMP PARALLEL WORKSHARE 333 344 r1_e1t(:,:) = 1._wp / e1t(:,:) ; r1_e2t (:,:) = 1._wp / e2t(:,:) 334 345 r1_e1u(:,:) = 1._wp / e1u(:,:) ; r1_e2u (:,:) = 1._wp / e2u(:,:) … … 338 349 e1e2t (:,:) = e1t(:,:) * e2t(:,:) ; r1_e1e2t(:,:) = 1._wp / e1e2t(:,:) 339 350 e1e2f (:,:) = e1f(:,:) * e2f(:,:) ; r1_e1e2f(:,:) = 1._wp / e1e2f(:,:) 351 !$OMP END PARALLEL WORKSHARE 340 352 IF( jphgr_msh /= 0 ) THEN ! e1e2u and e1e2v have not been set: compute them 353 !$OMP PARALLEL WORKSHARE 341 354 e1e2u (:,:) = e1u(:,:) * e2u(:,:) 342 355 e1e2v (:,:) = e1v(:,:) * e2v(:,:) 356 !$OMP END PARALLEL WORKSHARE 343 357 ENDIF 358 !$OMP PARALLEL WORKSHARE 344 359 r1_e1e2u(:,:) = 1._wp / e1e2u(:,:) ! compute their invert in both cases 345 360 r1_e1e2v(:,:) = 1._wp / e1e2v(:,:) … … 347 362 e2_e1u(:,:) = e2u(:,:) / e1u(:,:) 348 363 e1_e2v(:,:) = e1v(:,:) / e2v(:,:) 364 !$OMP END PARALLEL WORKSHARE 349 365 350 366 IF( lwp .AND. nn_print >=1 .AND. .NOT.ln_rstart ) THEN ! Control print : Grid informations (if not restart) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/dommsk.F90
r6140 r6748 146 146 ! N.B. tmask has already the right boundary conditions since mbathy is ok 147 147 ! 148 tmask(:,:,:) = 0._wp 148 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 149 149 DO jk = 1, jpk 150 150 DO jj = 1, jpj 151 151 DO ji = 1, jpi 152 tmask(ji,jj,jk) = 0._wp 152 153 IF( REAL( mbathy(ji,jj) - jk, wp ) + 0.1_wp >= 0._wp ) tmask(ji,jj,jk) = 1._wp 153 154 END DO … … 156 157 157 158 ! (ISF) define barotropic mask and mask the ice shelf point 159 !$OMP PARALLEL WORKSHARE 158 160 ssmask(:,:)=tmask(:,:,1) ! at this stage ice shelf is not masked 159 161 !$OMP END PARALLEL WORKSHARE 162 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 160 163 DO jk = 1, jpk 161 164 DO jj = 1, jpj … … 170 173 ! Interior domain mask (used for global sum) 171 174 ! -------------------- 175 !$OMP PARALLEL WORKSHARE 172 176 tmask_i(:,:) = ssmask(:,:) ! (ISH) tmask_i = 1 even on the ice shelf 173 177 174 178 tmask_h(:,:) = 1._wp ! 0 on the halo and 1 elsewhere 179 !$OMP END PARALLEL WORKSHARE 175 180 iif = jpreci ! ??? 176 181 iil = nlci - jpreci + 1 … … 206 211 ! 2. Ocean/land mask at u-, v-, and z-points (computed from tmask) 207 212 ! ------------------------------------------- 213 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 208 214 DO jk = 1, jpk 209 215 DO jj = 1, jpjm1 … … 219 225 END DO 220 226 ! (ISF) MIN(1,SUM(umask)) is here to check if you have effectively at least 1 wet cell at u point 227 !$OMP PARALLEL DO schedule(static) private(jj, ji) 221 228 DO jj = 1, jpjm1 222 229 DO ji = 1, fs_jpim1 ! vector loop … … 241 248 wumask(:,:,1) = umask(:,:,1) 242 249 wvmask(:,:,1) = vmask(:,:,1) 250 !$OMP PARALLEL DO schedule(static) private(jk) 243 251 DO jk = 2, jpk ! interior values 244 252 wmask (:,:,jk) = tmask(:,:,jk) * tmask(:,:,jk-1) … … 249 257 ! Lateral boundary conditions on velocity (modify fmask) 250 258 ! --------------------------------------- 259 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 251 260 DO jk = 1, jpk 252 261 zwf(:,:) = fmask(:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domngb.F90
r6140 r6748 56 56 IF ( PRESENT(kkk) ) ik=kkk 57 57 SELECT CASE( cdgrid ) 58 CASE( 'U' ) ; zglam(:,:) = glamu(:,:) ; zgphi(:,:) = gphiu(:,:) ; zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 59 CASE( 'V' ) ; zglam(:,:) = glamv(:,:) ; zgphi(:,:) = gphiv(:,:) ; zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 60 CASE( 'F' ) ; zglam(:,:) = glamf(:,:) ; zgphi(:,:) = gphif(:,:) ; zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 61 CASE DEFAULT ; zglam(:,:) = glamt(:,:) ; zgphi(:,:) = gphit(:,:) ; zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 58 CASE( 'U' ) 59 !$OMP PARALLEL WORKSHARE 60 zglam(:,:) = glamu(:,:) ; zgphi(:,:) = gphiu(:,:) ; zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 61 !$OMP END PARALLEL WORKSHARE 62 CASE( 'V' ) 63 !$OMP PARALLEL WORKSHARE 64 zglam(:,:) = glamv(:,:) ; zgphi(:,:) = gphiv(:,:) ; zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 65 !$OMP END PARALLEL WORKSHARE 66 CASE( 'F' ) 67 !$OMP PARALLEL WORKSHARE 68 zglam(:,:) = glamf(:,:) ; zgphi(:,:) = gphif(:,:) ; zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 69 !$OMP END PARALLEL WORKSHARE 70 CASE DEFAULT 71 !$OMP PARALLEL WORKSHARE 72 zglam(:,:) = glamt(:,:) ; zgphi(:,:) = gphit(:,:) ; zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 73 !$OMP END PARALLEL WORKSHARE 62 74 END SELECT 63 75 … … 71 83 zglam(:,:) = zglam(:,:) - plon 72 84 END IF 73 85 !$OMP PARALLEL WORKSHARE 74 86 zgphi(:,:) = zgphi(:,:) - plat 75 87 zdist(:,:) = zglam(:,:) * zglam(:,:) + zgphi(:,:) * zgphi(:,:) 76 88 !$OMP END PARALLEL WORKSHARE 77 89 IF( lk_mpp ) THEN 78 90 CALL mpp_minloc( zdist(:,:), zmask, zmini, kii, kjj) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domwri.F90
r5836 r6748 196 196 197 197 CALL dom_uniq( zprw, 'T' ) 198 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 198 199 DO jj = 1, jpj 199 200 DO ji = 1, jpi … … 204 205 CALL iom_rstput( 0, 0, inum2, 'tmaskutil', zprt, ktype = jp_i1 ) 205 206 CALL dom_uniq( zprw, 'U' ) 207 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 206 208 DO jj = 1, jpj 207 209 DO ji = 1, jpi … … 212 214 CALL iom_rstput( 0, 0, inum2, 'umaskutil', zprt, ktype = jp_i1 ) 213 215 CALL dom_uniq( zprw, 'V' ) 216 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 214 217 DO jj = 1, jpj 215 218 DO ji = 1, jpi … … 220 223 CALL iom_rstput( 0, 0, inum2, 'vmaskutil', zprt, ktype = jp_i1 ) 221 224 CALL dom_uniq( zprw, 'F' ) 225 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 222 226 DO jj = 1, jpj 223 227 DO ji = 1, jpi … … 303 307 IF( nmsh <= 3 ) THEN ! ! 3D depth 304 308 CALL iom_rstput( 0, 0, inum4, 'gdept_0', gdept_0, ktype = jp_r4 ) 309 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 305 310 DO jk = 1,jpk 306 311 DO jj = 1, jpjm1 … … 316 321 CALL iom_rstput( 0, 0, inum4, 'gdepw_0', gdepw_0, ktype = jp_r4 ) 317 322 ELSE ! ! 2D bottom depth 323 !$OMP PARALLEL DO schedule(static) private(jj, ji) 318 324 DO jj = 1,jpj 319 325 DO ji = 1,jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domzgr.F90
r6492 r6748 325 325 ! need to be like this to compute the pressure gradient with ISF. If not, level beneath the ISF are not aligned (sum(e3t) /= depth) 326 326 ! define e3t_0 and e3w_0 as the differences between gdept and gdepw respectively 327 !$OMP PARALLEL DO schedule(static) private(jk) 327 328 DO jk = 1, jpkm1 328 329 e3t_1d(jk) = gdepw_1d(jk+1)-gdepw_1d(jk) 329 330 END DO 330 331 e3t_1d(jpk) = e3t_1d(jpk-1) ! we don't care because this level is masked in NEMO 331 332 !$OMP PARALLEL DO schedule(static) private(jk) 332 333 DO jk = 2, jpk 333 334 e3w_1d(jk) = gdept_1d(jk) - gdept_1d(jk-1) … … 420 421 IF( rn_bathy > 0.01 ) THEN 421 422 IF(lwp) WRITE(numout,*) ' Depth = rn_bathy read in namelist' 423 !$OMP PARALLEL WORKSHARE 422 424 zdta(:,:) = rn_bathy 425 !$OMP END PARALLEL WORKSHARE 423 426 IF( ln_sco ) THEN ! s-coordinate (zsc ): idta()=jpk 427 !$OMP PARALLEL WORKSHARE 424 428 idta(:,:) = jpkm1 429 !$OMP END PARALLEL WORKSHARE 425 430 ELSE ! z-coordinate (zco or zps): step-like topography 431 !$OMP PARALLEL WORKSHARE 426 432 idta(:,:) = jpkm1 433 !$OMP END PARALLEL WORKSHARE 427 434 DO jk = 1, jpkm1 428 435 WHERE( gdept_1d(jk) < zdta(:,:) .AND. zdta(:,:) <= gdept_1d(jk+1) ) idta(:,:) = jk … … 431 438 ELSE 432 439 IF(lwp) WRITE(numout,*) ' Depth = depthw(jpkm1)' 440 !$OMP PARALLEL WORKSHARE 433 441 idta(:,:) = jpkm1 ! before last level 434 442 zdta(:,:) = gdepw_1d(jpk) ! last w-point depth 443 !$OMP END PARALLEL WORKSHARE 435 444 h_oce = gdepw_1d(jpk) 436 445 ENDIF … … 449 458 IF(lwp) WRITE(numout,*) ' background ocean depth = ', h_oce , ' meters' 450 459 ! 460 !$OMP PARALLEL DO schedule(static) private(jj, ji, zi, zj) 451 461 DO jj = 1, jpjdta ! zdta : 452 462 DO ji = 1, jpidta … … 458 468 ! ! idta : 459 469 IF( ln_sco ) THEN ! s-coordinate (zsc ): idta()=jpk 470 !$OMP PARALLEL WORKSHARE 460 471 idta(:,:) = jpkm1 472 !$OMP END PARALLEL WORKSHARE 461 473 ELSE ! z-coordinate (zco or zps): step-like topography 474 !$OMP PARALLEL WORKSHARE 462 475 idta(:,:) = jpkm1 476 !$OMP END PARALLEL WORKSHARE 463 477 DO jk = 1, jpkm1 464 478 WHERE( gdept_1d(jk) < zdta(:,:) .AND. zdta(:,:) <= gdept_1d(jk+1) ) idta(:,:) = jk … … 469 483 ! ! Caution : idta on the global domain: use of jperio, not nperio 470 484 IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN 485 !$OMP PARALLEL WORKSHARE 471 486 idta( : , 1 ) = -1 ; zdta( : , 1 ) = -1._wp 472 487 idta( : ,jpjdta) = 0 ; zdta( : ,jpjdta) = 0._wp 488 !$OMP END PARALLEL WORKSHARE 473 489 ELSEIF( jperio == 2 ) THEN 490 !$OMP PARALLEL WORKSHARE 474 491 idta( : , 1 ) = idta( : , 3 ) ; zdta( : , 1 ) = zdta( : , 3 ) 475 492 idta( : ,jpjdta) = 0 ; zdta( : ,jpjdta) = 0._wp 476 493 idta( 1 , : ) = 0 ; zdta( 1 , : ) = 0._wp 477 494 idta(jpidta, : ) = 0 ; zdta(jpidta, : ) = 0._wp 495 !$OMP END PARALLEL WORKSHARE 478 496 ELSE 479 497 ih = 0 ; zh = 0._wp 480 498 IF( ln_sco ) ih = jpkm1 ; IF( ln_sco ) zh = h_oce 499 !$OMP PARALLEL WORKSHARE 481 500 idta( : , 1 ) = ih ; zdta( : , 1 ) = zh 482 501 idta( : ,jpjdta) = ih ; zdta( : ,jpjdta) = zh 483 502 idta( 1 , : ) = ih ; zdta( 1 , : ) = zh 484 503 idta(jpidta, : ) = ih ; zdta(jpidta, : ) = zh 504 !$OMP END PARALLEL WORKSHARE 485 505 ENDIF 486 506 487 507 ! ! local domain level and meter bathymetries (mbathy,bathy) 508 !$OMP PARALLEL WORKSHARE 488 509 mbathy(:,:) = 0 ! set to zero extra halo points 489 510 bathy (:,:) = 0._wp ! (require for mpp case) 511 !$OMP END PARALLEL WORKSHARE 512 !$OMP PARALLEL DO schedule(static) private(jj, ji) 490 513 DO jj = 1, nlcj ! interior values 491 514 DO ji = 1, nlci … … 494 517 END DO 495 518 END DO 519 !$OMP PARALLEL WORKSHARE 496 520 risfdep(:,:)=0.e0 497 521 misfdep(:,:)=1 522 !$OMP END PARALLEL WORKSHARE 498 523 ! 499 524 DEALLOCATE( idta, zdta ) … … 507 532 CALL iom_get ( inum, jpdom_data, 'Bathy_level', bathy ) 508 533 CALL iom_close( inum ) 534 !$OMP PARALLEL WORKSHARE 509 535 mbathy(:,:) = INT( bathy(:,:) ) 510 536 ! initialisation isf variables 511 537 risfdep(:,:)=0._wp ; misfdep(:,:)=1 538 !$OMP END PARALLEL WORKSHARE 512 539 ! ! ===================== 513 540 IF( cp_cfg == "orca" .AND. jp_cfg == 2 ) THEN ! ORCA R2 configuration … … 547 574 ! 548 575 ! initialisation isf variables 576 !$OMP PARALLEL WORKSHARE 549 577 risfdep(:,:)=0._wp ; misfdep(:,:)=1 578 !$OMP END PARALLEL WORKSHARE 550 579 ! 551 580 IF ( ln_isfcav ) THEN … … 864 893 mikt(:,:) = MAX( misfdep(:,:) , 1 ) ! top k-index of T-level (=1) 865 894 ! ! top k-index of W-level (=mikt) 895 !$OMP PARALLEL DO schedule(static) private(jj, ji) 866 896 DO jj = 1, jpjm1 ! top k-index of U- (U-) level 867 897 DO ji = 1, jpim1 … … 897 927 IF( nn_timing == 1 ) CALL timing_start('zgr_zco') 898 928 ! 929 !$OMP PARALLEL DO schedule(static) private(jk) 899 930 DO jk = 1, jpk 900 931 gdept_0(:,:,jk) = gdept_1d(jk) … … 996 1027 997 1028 ! Scale factors and depth at T- and W-points 1029 !$OMP PARALLEL DO schedule(static) private(jk) 998 1030 DO jk = 1, jpk ! intitialization to the reference z-coordinate 999 1031 gdept_0(:,:,jk) = gdept_1d(jk) … … 1069 1101 ! 1070 1102 ! Scale factors and depth at U-, V-, UW and VW-points 1103 !$OMP PARALLEL DO schedule(static) private(jk) 1071 1104 DO jk = 1, jpk ! initialisation to z-scale factors 1072 1105 e3u_0 (:,:,jk) = e3t_1d(jk) … … 1076 1109 END DO 1077 1110 1111 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 1078 1112 DO jk = 1,jpk ! Computed as the minimum of neighbooring scale factors 1079 1113 DO jj = 1, jpjm1 … … 1114 1148 1115 1149 ! Scale factor at F-point 1150 !$OMP PARALLEL DO schedule(static) private(jk) 1116 1151 DO jk = 1, jpk ! initialisation to z-scale factors 1117 1152 e3f_0(:,:,jk) = e3t_1d(jk) 1118 1153 END DO 1154 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 1119 1155 DO jk = 1, jpk ! Computed as the minimum of neighbooring V-scale factors 1120 1156 DO jj = 1, jpjm1 … … 1131 1167 !!gm bug ? : must be a do loop with mj0,mj1 1132 1168 ! 1169 !$OMP PARALLEL WORKSHARE 1133 1170 e3t_0(:,mj0(1),:) = e3t_0(:,mj0(2),:) ! we duplicate factor scales for jj = 1 and jj = 2 1134 1171 e3w_0(:,mj0(1),:) = e3w_0(:,mj0(2),:) … … 1136 1173 e3v_0(:,mj0(1),:) = e3v_0(:,mj0(2),:) 1137 1174 e3f_0(:,mj0(1),:) = e3f_0(:,mj0(2),:) 1138 1175 !$OMP END PARALLEL WORKSHARE 1139 1176 ! Control of the sign 1140 1177 IF( MINVAL( e3t_0 (:,:,:) ) <= 0._wp ) CALL ctl_stop( ' zgr_zps : e r r o r e3t_0 <= 0' ) … … 1161 1198 gde3w_0(:,:,1) = 0.5_wp * e3w_0(:,:,1) 1162 1199 DO jk = 2, jpk 1163 gde3w_0(:,:,jk) = gde3w_0(:,:,jk-1) + e3w_0(:,:,jk) 1200 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1201 DO jj =1, jpj 1202 DO ji=1, jpi 1203 gde3w_0(ji,jj,jk) = gde3w_0(ji,jj,jk-1) + e3w_0(ji,jj,jk) 1204 END DO 1205 END DO 1164 1206 END DO 1165 1207 END IF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/istate.F90
r6140 r6748 78 78 IF( lk_c1d ) CALL dta_uvd_init ! Initialization of U & V input data 79 79 80 !$OMP PARALLEL WORKSHARE 80 81 rhd (:,:,: ) = 0._wp ; rhop (:,:,: ) = 0._wp ! set one for all to 0 at level jpk 81 82 rn2b (:,:,: ) = 0._wp ; rn2 (:,:,: ) = 0._wp ! set one for all to 0 at levels 1 and jpk 82 83 tsa (:,:,:,:) = 0._wp ! set one for all to 0 at level jpk 83 84 rab_b(:,:,:,:) = 0._wp ; rab_n(:,:,:,:) = 0._wp ! set one for all to 0 at level jpk 85 !$OMP END PARALLEL WORKSHARE 84 86 85 87 IF( ln_rstart ) THEN ! Restart from a file … … 96 98 ! ! Initialization of ocean to zero 97 99 ! before fields ! now fields 100 !$OMP PARALLEL WORKSHARE 98 101 sshb (:,:) = 0._wp ; sshn (:,:) = 0._wp 99 102 ub (:,:,:) = 0._wp ; un (:,:,:) = 0._wp 100 103 vb (:,:,:) = 0._wp ; vn (:,:,:) = 0._wp 104 !$OMP END PARALLEL WORKSHARE 101 105 hdivn(:,:,:) = 0._wp 102 106 ! … … 108 112 IF ( ln_tsd_init ) THEN ! read 3D T and S data at nit000 109 113 CALL dta_tsd( nit000, tsb ) 114 !$OMP PARALLEL WORKSHARE 110 115 tsn(:,:,:,:) = tsb(:,:,:,:) 116 !$OMP END PARALLEL WORKSHARE 111 117 ! 112 118 ELSE ! Initial T-S fields defined analytically … … 116 122 CALL wrk_alloc( jpi,jpj,jpk,2, zuvd ) 117 123 CALL dta_uvd( nit000, zuvd ) 124 !$OMP PARALLEL WORKSHARE 118 125 ub(:,:,:) = zuvd(:,:,:,1) ; un(:,:,:) = ub(:,:,:) 119 126 vb(:,:,:) = zuvd(:,:,:,2) ; vn(:,:,:) = vb(:,:,:) 127 !$OMP END PARALLEL WORKSHARE 120 128 CALL wrk_dealloc( jpi,jpj,jpk,2, zuvd ) 121 129 ENDIF … … 125 133 ! - ML - sshn could be modified by istate_eel, so that initialization of e3t_b is done here 126 134 IF( .NOT.ln_linssh ) THEN 135 !$OMP PARALLEL DO schedule(static) private(jk) 127 136 DO jk = 1, jpk 128 137 e3t_b(:,:,jk) = e3t_n(:,:,jk) … … 136 145 ! Do it whatever the free surface method, these arrays being eventually used 137 146 ! 147 !$OMP PARALLEL WORKSHARE 138 148 un_b(:,:) = 0._wp ; vn_b(:,:) = 0._wp 139 149 ub_b(:,:) = 0._wp ; vb_b(:,:) = 0._wp 150 !$OMP END PARALLEL WORKSHARE 140 151 ! 141 152 !!gm the use of umsak & vmask is not necessary belox as un, vn, ub, vb are always masked 153 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 142 154 DO jk = 1, jpkm1 143 155 DO jj = 1, jpj … … 152 164 END DO 153 165 ! 166 !$OMP PARALLEL WORKSHARE 154 167 un_b(:,:) = un_b(:,:) * r1_hu_n(:,:) 155 168 vn_b(:,:) = vn_b(:,:) * r1_hv_n(:,:) … … 157 170 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 158 171 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 172 !$OMP END PARALLEL WORKSHARE 159 173 ! 160 174 IF( nn_timing == 1 ) CALL timing_stop('istate_init') … … 352 366 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~' 353 367 ! 368 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 354 369 DO jk = 1, jpk 355 370 DO jj = 1, jpj … … 389 404 CALL iom_close( inum ) 390 405 406 !$OMP PARALLEL WORKSHARE 391 407 tsn(:,:,:,jp_tem) = tsn(:,:,:,jp_tem) * tmask(:,:,:) 392 408 tsb(:,:,:,jp_tem) = tsn(:,:,:,jp_tem) 409 !$OMP END PARALLEL WORKSHARE 393 410 394 411 ! Read salinity field … … 398 415 CALL iom_close( inum ) 399 416 417 !$OMP PARALLEL WORKSHARE 400 418 tsn(:,:,:,jp_sal) = tsn(:,:,:,jp_sal) * tmask(:,:,:) 401 419 tsb(:,:,:,jp_sal) = tsn(:,:,:,jp_sal) 420 !$OMP END PARALLEL WORKSHARE 402 421 ! 403 422 END SELECT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/divhor.F90
r6140 r6748 72 72 ENDIF 73 73 ! 74 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 74 75 DO jk = 1, jpkm1 !== Horizontal divergence ==! 75 76 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynbfr.F90
r6140 r6748 65 65 IF( l_trddyn ) THEN ! trends: store the input trends 66 66 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 67 !$OMP PARALLEL WORKSHARE 67 68 ztrdu(:,:,:) = ua(:,:,:) 68 69 ztrdv(:,:,:) = va(:,:,:) 70 !$OMP END PARALLEL WORKSHARE 69 71 ENDIF 70 72 71 73 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 72 74 DO jj = 2, jpjm1 73 75 DO ji = 2, jpim1 … … 82 84 ! 83 85 IF( ln_isfcav ) THEN ! ocean cavities 86 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 84 87 DO jj = 2, jpjm1 85 88 DO ji = 2, jpim1 … … 99 102 ! 100 103 IF( l_trddyn ) THEN ! trends: send trends to trddyn for further diagnostics 104 !$OMP PARALLEL WORKSHARE 101 105 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 102 106 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 107 !$OMP END PARALLEL WORKSHARE 103 108 CALL trd_dyn( ztrdu(:,:,:), ztrdv(:,:,:), jpdyn_bfr, kt ) 104 109 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynhpg.F90
r6152 r6748 91 91 IF( l_trddyn ) THEN ! Temporary saving of ua and va trends (l_trddyn) 92 92 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 93 !$OMP PARALLEL WORKSHARE 93 94 ztrdu(:,:,:) = ua(:,:,:) 94 95 ztrdv(:,:,:) = va(:,:,:) 96 !$OMP END PARALLEL WORKSHARE 95 97 ENDIF 96 98 ! … … 105 107 ! 106 108 IF( l_trddyn ) THEN ! save the hydrostatic pressure gradient trends for momentum trend diagnostics 109 !$OMP PARALLEL WORKSHARE 107 110 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 108 111 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 112 !$OMP END PARALLEL WORKSHARE 109 113 CALL trd_dyn( ztrdu, ztrdv, jpdyn_hpg, kt ) 110 114 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) … … 238 242 END DO 239 243 END DO 244 !$OMP PARALLEL WORKSHARE 240 245 riceload(:,:)=ziceload(:,:) ! need to be saved for diaar5 246 !$OMP END PARALLEL WORKSHARE 241 247 242 248 CALL wrk_dealloc( jpi,jpj, 2, ztstop) … … 282 288 283 289 ! Surface value 290 !$OMP PARALLEL DO private(ji,jj, zcoef1) 284 291 DO jj = 2, jpjm1 285 292 DO ji = fs_2, fs_jpim1 ! vector opt. … … 297 304 ! interior value (2=<jk=<jpkm1) 298 305 DO jk = 2, jpkm1 306 !$OMP PARALLEL DO private(ji,jj, zcoef1) 299 307 DO jj = 2, jpjm1 300 308 DO ji = fs_2, fs_jpim1 ! vector opt. -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynkeg.F90
r5328 r6748 92 92 IF( l_trddyn ) THEN ! Save ua and va trends 93 93 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 94 !$OMP PARALLEL WORKSHARE 94 95 ztrdu(:,:,:) = ua(:,:,:) 95 96 ztrdv(:,:,:) = va(:,:,:) 97 !$OMP END PARALLEL WORKSHARE 96 98 ENDIF 97 99 98 zhke(:,:,jpk) = 0._wp100 zhke(:,:,jpk) = 0._wp 99 101 100 102 SELECT CASE ( kscheme ) !== Horizontal kinetic energy at T-point ==! 101 103 ! 102 104 CASE ( nkeg_C2 ) !-- Standard scheme --! 105 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zu, zv) 103 106 DO jk = 1, jpkm1 104 107 DO jj = 2, jpj … … 114 117 ! 115 118 CASE ( nkeg_HW ) !-- Hollingsworth scheme --! 119 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zu, zv) 116 120 DO jk = 1, jpkm1 117 121 DO jj = 2, jpjm1 … … 134 138 END SELECT 135 139 ! 140 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 136 141 DO jk = 1, jpkm1 !== grad( KE ) added to the general momentum trends ==! 137 142 DO jj = 2, jpjm1 … … 144 149 ! 145 150 IF( l_trddyn ) THEN ! save the Kinetic Energy trends for diagnostic 151 !$OMP PARALLEL WORKSHARE 146 152 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 147 153 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 154 !$OMP END PARALLEL WORKSHARE 148 155 CALL trd_dyn( ztrdu, ztrdv, jpdyn_keg, kt ) 149 156 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynldf.F90
r6140 r6748 69 69 IF( l_trddyn ) THEN ! temporary save of momentum trends 70 70 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 71 !$OMP PARALLEL WORKSHARE 71 72 ztrdu(:,:,:) = ua(:,:,:) 72 73 ztrdv(:,:,:) = va(:,:,:) 74 !$OMP END PARALLEL WORKSHARE 73 75 ENDIF 74 76 … … 82 84 83 85 IF( l_trddyn ) THEN ! save the horizontal diffusive trends for further diagnostics 86 !$OMP PARALLEL WORKSHARE 84 87 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 85 88 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 89 !$OMP END PARALLEL WORKSHARE 86 90 CALL trd_dyn( ztrdu, ztrdv, jpdyn_ldf, kt ) 87 91 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynldf_lap_blp.F90
r6140 r6748 77 77 DO jk = 1, jpkm1 ! Horizontal slab 78 78 ! ! =============== 79 !$OMP PARALLEL DO schedule(static) private(jj, ji) 79 80 DO jj = 2, jpj 80 81 DO ji = fs_2, jpi ! vector opt. … … 93 94 END DO 94 95 ! 96 !$OMP PARALLEL DO schedule(static) private(jj, ji) 95 97 DO jj = 2, jpjm1 ! - curl( curl) + grad( div ) 96 98 DO ji = fs_2, fs_jpim1 ! vector opt. -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynnxt.F90
r6140 r6748 115 115 ! Ensure below that barotropic velocities match time splitting estimate 116 116 ! Compute actual transport and replace it with ts estimate at "after" time step 117 !$OMP PARALLEL WORKSHARE 117 118 zue(:,:) = e3u_a(:,:,1) * ua(:,:,1) * umask(:,:,1) 118 119 zve(:,:) = e3v_a(:,:,1) * va(:,:,1) * vmask(:,:,1) 120 !$OMP END PARALLEL WORKSHARE 119 121 DO jk = 2, jpkm1 120 122 zue(:,:) = zue(:,:) + e3u_a(:,:,jk) * ua(:,:,jk) * umask(:,:,jk) 121 123 zve(:,:) = zve(:,:) + e3v_a(:,:,jk) * va(:,:,jk) * vmask(:,:,jk) 122 124 END DO 125 !$OMP PARALLEL DO schedule(static) private(jk) 123 126 DO jk = 1, jpkm1 124 127 ua(:,:,jk) = ( ua(:,:,jk) - zue(:,:) * r1_hu_a(:,:) + ua_b(:,:) ) * umask(:,:,jk) … … 131 134 ! In the forward case, this is done below after asselin filtering 132 135 ! so that asselin contribution is removed at the same time 136 !$OMP PARALLEL DO schedule(static) private(jk) 133 137 DO jk = 1, jpkm1 134 138 un(:,:,jk) = ( un(:,:,jk) - un_adv(:,:) + un_b(:,:) )*umask(:,:,jk) … … 164 168 ! 165 169 IF( ln_dyn_trd ) THEN ! 3D output: total momentum trends 170 !$OMP PARALLEL WORKSHARE 166 171 zua(:,:,:) = ( ua(:,:,:) - ub(:,:,:) ) * z1_2dt 167 172 zva(:,:,:) = ( va(:,:,:) - vb(:,:,:) ) * z1_2dt 173 !$OMP END PARALLEL WORKSHARE 168 174 CALL iom_put( "utrd_tot", zua ) ! total momentum trends, except the asselin time filter 169 175 CALL iom_put( "vtrd_tot", zva ) 170 176 ENDIF 171 177 ! 178 !$OMP PARALLEL WORKSHARE 172 179 zua(:,:,:) = un(:,:,:) ! save the now velocity before the asselin filter 173 180 zva(:,:,:) = vn(:,:,:) ! (caution: there will be a shift by 1 timestep in the 181 !$OMP END PARALLEL WORKSHARE 174 182 ! ! computation of the asselin filter trends) 175 183 ENDIF … … 178 186 ! ------------------------------------------ 179 187 IF( neuler == 0 .AND. kt == nit000 ) THEN !* Euler at first time-step: only swap 188 !$OMP PARALLEL DO schedule(static) private(jk) 180 189 DO jk = 1, jpkm1 181 190 un(:,:,jk) = ua(:,:,jk) ! un <-- ua … … 183 192 END DO 184 193 IF(.NOT.ln_linssh ) THEN 194 !$OMP PARALLEL DO schedule(static) private(jk) 185 195 DO jk = 1, jpkm1 186 196 e3t_b(:,:,jk) = e3t_n(:,:,jk) … … 193 203 IF( ln_linssh ) THEN ! Fixed volume ! 194 204 ! ! =============! 205 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zuf, zvf) 195 206 DO jk = 1, jpkm1 196 207 DO jj = 1, jpj … … 215 226 e3t_b(:,:,1:jpkm1) = e3t_n(:,:,1:jpkm1) 216 227 ELSE 228 !$OMP PARALLEL DO schedule(static) private(jk) 217 229 DO jk = 1, jpkm1 218 230 e3t_b(:,:,jk) = e3t_n(:,:,jk) + atfp * ( e3t_b(:,:,jk) - 2._wp * e3t_n(:,:,jk) + e3t_a(:,:,jk) ) … … 240 252 CALL dom_vvl_interpol( e3t_b(:,:,:), e3u_b(:,:,:), 'U' ) 241 253 CALL dom_vvl_interpol( e3t_b(:,:,:), e3v_b(:,:,:), 'V' ) 254 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zuf, zvf) 242 255 DO jk = 1, jpkm1 243 256 DO jj = 1, jpj … … 260 273 CALL dom_vvl_interpol( e3t_b(:,:,:), ze3u_f, 'U' ) 261 274 CALL dom_vvl_interpol( e3t_b(:,:,:), ze3v_f, 'V' ) 275 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zue3a, zve3a, zue3n, zve3n, zue3b, zve3b, zuf, zvf) 262 276 DO jk = 1, jpkm1 263 277 DO jj = 1, jpj … … 297 311 zve(:,:) = zve(:,:) + e3v_b(:,:,jk) * vb(:,:,jk) * vmask(:,:,jk) 298 312 END DO 313 !$OMP PARALLEL DO schedule(static) private(jk) 299 314 DO jk = 1, jpkm1 300 315 ub(:,:,jk) = ub(:,:,jk) - (zue(:,:) * r1_hu_n(:,:) - un_b(:,:)) * umask(:,:,jk) … … 321 336 ENDIF 322 337 ! 338 !$OMP PARALLEL WORKSHARE 323 339 un_b(:,:) = e3u_a(:,:,1) * un(:,:,1) * umask(:,:,1) 324 340 ub_b(:,:) = e3u_b(:,:,1) * ub(:,:,1) * umask(:,:,1) 325 341 vn_b(:,:) = e3v_a(:,:,1) * vn(:,:,1) * vmask(:,:,1) 326 342 vb_b(:,:) = e3v_b(:,:,1) * vb(:,:,1) * vmask(:,:,1) 343 !$OMP END PARALLEL WORKSHARE 327 344 DO jk = 2, jpkm1 328 345 un_b(:,:) = un_b(:,:) + e3u_a(:,:,jk) * un(:,:,jk) * umask(:,:,jk) … … 331 348 vb_b(:,:) = vb_b(:,:) + e3v_b(:,:,jk) * vb(:,:,jk) * vmask(:,:,jk) 332 349 END DO 350 !$OMP PARALLEL WORKSHARE 333 351 un_b(:,:) = un_b(:,:) * r1_hu_a(:,:) 334 352 vn_b(:,:) = vn_b(:,:) * r1_hv_a(:,:) 335 353 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 336 354 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 355 !$OMP END PARALLEL WORKSHARE 337 356 ! 338 357 IF( .NOT.ln_dynspg_ts ) THEN ! output the barotropic currents … … 341 360 ENDIF 342 361 IF( l_trddyn ) THEN ! 3D output: asselin filter trends on momentum 362 !$OMP PARALLEL WORKSHARE 343 363 zua(:,:,:) = ( ub(:,:,:) - zua(:,:,:) ) * z1_2dt 344 364 zva(:,:,:) = ( vb(:,:,:) - zva(:,:,:) ) * z1_2dt 365 !$OMP END PARALLEL WORKSHARE 345 366 CALL trd_dyn( zua, zva, jpdyn_atf, kt ) 346 367 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg.F90
r6140 r6748 83 83 IF( l_trddyn ) THEN ! temporary save of ta and sa trends 84 84 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 85 !$OMP PARALLEL WORKSHARE 85 86 ztrdu(:,:,:) = ua(:,:,:) 86 87 ztrdv(:,:,:) = va(:,:,:) 88 !$OMP END PARALLEL WORKSHARE 87 89 ENDIF 88 90 ! … … 91 93 .OR. nn_ice_embd == 2 ) THEN ! embedded sea-ice 92 94 ! 95 !$OMP PARALLEL DO schedule(static) private(jj, ji) 93 96 DO jj = 2, jpjm1 94 97 DO ji = fs_2, fs_jpim1 ! vector opt. … … 100 103 IF( ln_apr_dyn .AND. .NOT.ln_dynspg_ts ) THEN !== Atmospheric pressure gradient (added later in time-split case) ==! 101 104 zg_2 = grav * 0.5 105 !$OMP PARALLEL DO schedule(static) private(jj, ji) 102 106 DO jj = 2, jpjm1 ! gradient of Patm using inverse barometer ssh 103 107 DO ji = fs_2, fs_jpim1 ! vector opt. … … 115 119 CALL upd_tide( kt ) ! update tide potential 116 120 ! 121 !$OMP PARALLEL DO schedule(static) private(jj, ji) 117 122 DO jj = 2, jpjm1 ! add tide potential forcing 118 123 DO ji = fs_2, fs_jpim1 ! vector opt. … … 129 134 zgrau0r = - grav * r1_rau0 130 135 zpice(:,:) = ( zintp * snwice_mass(:,:) + ( 1.- zintp ) * snwice_mass_b(:,:) ) * zgrau0r 136 !$OMP PARALLEL DO schedule(static) private(jj, ji) 131 137 DO jj = 2, jpjm1 132 138 DO ji = fs_2, fs_jpim1 ! vector opt. … … 139 145 ENDIF 140 146 ! 147 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 141 148 DO jk = 1, jpkm1 !== Add all terms to the general trend 142 149 DO jj = 2, jpjm1 … … 158 165 ! 159 166 IF( l_trddyn ) THEN ! save the surface pressure gradient trends for further diagnostics 167 !$OMP PARALLEL WORKSHARE 160 168 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 161 169 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 170 !$OMP END PARALLEL WORKSHARE 162 171 CALL trd_dyn( ztrdu, ztrdv, jpdyn_spg, kt ) 163 172 CALL wrk_dealloc( jpi,jpj,jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg_ts.F90
r6152 r6748 222 222 SELECT CASE( nn_een_e3f ) !* ff/e3 at F-point 223 223 CASE ( 0 ) ! original formulation (masked averaging of e3t divided by 4) 224 !$OMP PARALLEL DO schedule(static) private(jj, ji) 224 225 DO jj = 1, jpjm1 225 226 DO ji = 1, jpim1 … … 230 231 END DO 231 232 CASE ( 1 ) ! new formulation (masked averaging of e3t divided by the sum of mask) 233 !$OMP PARALLEL DO schedule(static) private(jj, ji) 232 234 DO jj = 1, jpjm1 233 235 DO ji = 1, jpim1 … … 243 245 ! 244 246 ftne(1,:) = 0._wp ; ftnw(1,:) = 0._wp ; ftse(1,:) = 0._wp ; ftsw(1,:) = 0._wp 247 248 !$OMP PARALLEL DO schedule(static) private(jj, ji) 245 249 DO jj = 2, jpj 246 250 DO ji = 2, jpi … … 253 257 ! 254 258 ELSE !== all other schemes (ENE, ENS, MIX) 259 !$OMP PARALLEL WORKSHARE 255 260 zwz(:,:) = 0._wp 256 261 zhf(:,:) = 0._wp 262 !$OMP END PARALLEL WORKSHARE 257 263 IF ( .not. ln_sco ) THEN 258 264 … … 269 275 END IF 270 276 277 !$OMP PARALLEL DO schedule(static) private(jj, ji) 271 278 DO jj = 1, jpjm1 272 zhf(:,jj) = zhf(:,jj) * (1._wp- umask(:,jj,1) * umask(:,jj+1,1)) 279 DO ji = 1, jpi 280 zhf(ji,jj) = zhf(ji,jj) * (1._wp- umask(ji,jj,1) * umask(ji,jj+1,1)) 281 END DO 273 282 END DO 274 283 275 284 DO jk = 1, jpkm1 285 !$OMP PARALLEL DO schedule(static) private(jj, ji) 276 286 DO jj = 1, jpjm1 277 zhf(:,jj) = zhf(:,jj) + e3f_n(:,jj,jk) * umask(:,jj,jk) * umask(:,jj+1,jk) 287 DO ji = 1, jpi 288 zhf(ji,jj) = zhf(ji,jj) + e3f_n(ji,jj,jk) * umask(ji,jj,jk) * umask(ji,jj+1,jk) 289 END DO 278 290 END DO 279 291 END DO 280 292 CALL lbc_lnk( zhf, 'F', 1._wp ) 281 293 ! JC: TBC. hf should be greater than 0 294 !$OMP PARALLEL DO schedule(static) private(jj, ji) 282 295 DO jj = 1, jpj 283 296 DO ji = 1, jpi … … 285 298 END DO 286 299 END DO 300 !$OMP PARALLEL WORKSHARE 287 301 zwz(:,:) = ff(:,:) * zwz(:,:) 302 !$OMP END PARALLEL WORKSHARE 288 303 ENDIF 289 304 ENDIF … … 303 318 ! !* e3*d/dt(Ua) (Vertically integrated) 304 319 ! ! -------------------------------------------------- 320 !$OMP PARALLEL WORKSHARE 305 321 zu_frc(:,:) = 0._wp 306 322 zv_frc(:,:) = 0._wp 323 !$OMP END PARALLEL WORKSHARE 307 324 ! 308 325 DO jk = 1, jpkm1 309 zu_frc(:,:) = zu_frc(:,:) + e3u_n(:,:,jk) * ua(:,:,jk) * umask(:,:,jk) 310 zv_frc(:,:) = zv_frc(:,:) + e3v_n(:,:,jk) * va(:,:,jk) * vmask(:,:,jk) 326 !$OMP PARALLEL DO schedule(static) private(jj,ji) 327 DO jj=1,jpj 328 DO ji=1,jpi 329 zu_frc(ji,jj) = zu_frc(ji,jj) + e3u_n(ji,jj,jk) * ua(ji,jj,jk) * umask(ji,jj,jk) 330 zv_frc(ji,jj) = zv_frc(ji,jj) + e3v_n(ji,jj,jk) * va(ji,jj,jk) * vmask(ji,jj,jk) 331 END DO 332 END DO 311 333 END DO 312 334 ! 335 !$OMP PARALLEL WORKSHARE 313 336 zu_frc(:,:) = zu_frc(:,:) * r1_hu_n(:,:) 314 337 zv_frc(:,:) = zv_frc(:,:) * r1_hv_n(:,:) 338 !$OMP END PARALLEL WORKSHARE 315 339 ! 316 340 ! 317 341 ! !* baroclinic momentum trend (remove the vertical mean trend) 342 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 318 343 DO jk = 1, jpkm1 ! ----------------------------------------------------------- 319 344 DO jj = 2, jpjm1 … … 326 351 ! !* barotropic Coriolis trends (vorticity scheme dependent) 327 352 ! ! -------------------------------------------------------- 353 !$OMP PARALLEL WORKSHARE 328 354 zwx(:,:) = un_b(:,:) * hu_n(:,:) * e2u(:,:) ! now fluxes 329 355 zwy(:,:) = vn_b(:,:) * hv_n(:,:) * e1v(:,:) 356 !$OMP END PARALLEL WORKSHARE 330 357 ! 331 358 IF( ln_dynvor_ene .OR. ln_dynvor_mix ) THEN ! energy conserving or mixed scheme 359 !$OMP PARALLEL DO schedule(static) private(jj,ji,zy1,zy2,zx1,zx2) 332 360 DO jj = 2, jpjm1 333 361 DO ji = fs_2, fs_jpim1 ! vector opt. … … 343 371 ! 344 372 ELSEIF ( ln_dynvor_ens ) THEN ! enstrophy conserving scheme 373 !$OMP PARALLEL DO schedule(static) private(jj,ji,zy1,zx1) 345 374 DO jj = 2, jpjm1 346 375 DO ji = fs_2, fs_jpim1 ! vector opt. … … 355 384 ! 356 385 ELSEIF ( ln_dynvor_een ) THEN ! enstrophy and energy conserving scheme 386 !$OMP PARALLEL DO schedule(static) private(jj,ji) 357 387 DO jj = 2, jpjm1 358 388 DO ji = fs_2, fs_jpim1 ! vector opt. … … 376 406 wduflt1(:,:) = 1.0_wp 377 407 wdvflt1(:,:) = 1.0_wp 408 !$OMP PARALLEL DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 378 409 DO jj = 2, jpjm1 379 410 DO ji = 2, jpim1 … … 415 446 CALL lbc_lnk( zcpx, 'U', 1._wp ) ; CALL lbc_lnk( zcpy, 'V', 1._wp ) 416 447 448 !$OMP PARALLEL DO schedule(static) private(jj,ji) 417 449 DO jj = 2, jpjm1 418 450 DO ji = 2, jpim1 … … 426 458 ELSE 427 459 460 !$OMP PARALLEL DO schedule(static) private(jj,ji) 428 461 DO jj = 2, jpjm1 429 462 DO ji = fs_2, fs_jpim1 ! vector opt. … … 436 469 ENDIF 437 470 471 !$OMP PARALLEL DO schedule(static) private(jj,ji) 438 472 DO jj = 2, jpjm1 ! Remove coriolis term (and possibly spg) from barotropic trend 439 473 DO ji = fs_2, fs_jpim1 … … 445 479 ! ! Add bottom stress contribution from baroclinic velocities: 446 480 IF (ln_bt_fw) THEN 481 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv) 447 482 DO jj = 2, jpjm1 448 483 DO ji = fs_2, fs_jpim1 ! vector opt. … … 454 489 END DO 455 490 ELSE 491 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv) 456 492 DO jj = 2, jpjm1 457 493 DO ji = fs_2, fs_jpim1 ! vector opt. … … 475 511 ! ! Add top stress contribution from baroclinic velocities: 476 512 IF (ln_bt_fw) THEN 513 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv) 477 514 DO jj = 2, jpjm1 478 515 DO ji = fs_2, fs_jpim1 ! vector opt. … … 484 521 END DO 485 522 ELSE 523 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikbu,ikbv) 486 524 DO jj = 2, jpjm1 487 525 DO ji = fs_2, fs_jpim1 ! vector opt. … … 495 533 ! 496 534 ! Note that the "unclipped" top friction parameter is used even with explicit drag 535 !$OMP PARALLEL WORKSHARE 497 536 zu_frc(:,:) = zu_frc(:,:) + r1_hu_n(:,:) * tfrua(:,:) * zwx(:,:) 498 537 zv_frc(:,:) = zv_frc(:,:) + r1_hv_n(:,:) * tfrva(:,:) * zwy(:,:) 538 !$OMP END PARALLEL WORKSHARE 499 539 ! 500 540 IF (ln_bt_fw) THEN ! Add wind forcing 541 !$OMP PARALLEL WORKSHARE 501 542 zu_frc(:,:) = zu_frc(:,:) + zraur * utau(:,:) * r1_hu_n(:,:) 502 543 zv_frc(:,:) = zv_frc(:,:) + zraur * vtau(:,:) * r1_hv_n(:,:) 544 !$OMP END PARALLEL WORKSHARE 503 545 ELSE 546 !$OMP PARALLEL WORKSHARE 504 547 zu_frc(:,:) = zu_frc(:,:) + zraur * z1_2 * ( utau_b(:,:) + utau(:,:) ) * r1_hu_n(:,:) 505 548 zv_frc(:,:) = zv_frc(:,:) + zraur * z1_2 * ( vtau_b(:,:) + vtau(:,:) ) * r1_hv_n(:,:) 549 !$OMP END PARALLEL WORKSHARE 506 550 ENDIF 507 551 ! 508 552 IF ( ln_apr_dyn ) THEN ! Add atm pressure forcing 509 553 IF (ln_bt_fw) THEN 554 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_spg,zv_spg) 510 555 DO jj = 2, jpjm1 511 556 DO ji = fs_2, fs_jpim1 ! vector opt. … … 517 562 END DO 518 563 ELSE 564 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_spg,zv_spg) 519 565 DO jj = 2, jpjm1 520 566 DO ji = fs_2, fs_jpim1 ! vector opt. … … 559 605 ! Initialize barotropic variables: 560 606 IF( ll_init )THEN 607 !$OMP PARALLEL WORKSHARE 561 608 sshbb_e(:,:) = 0._wp 562 609 ubb_e (:,:) = 0._wp … … 565 612 ub_e (:,:) = 0._wp 566 613 vb_e (:,:) = 0._wp 614 !$OMP END PARALLEL WORKSHARE 567 615 ENDIF 568 616 569 617 IF( ln_wd ) THEN !preserve the positivity of water depth 570 618 !ssh[b,n,a] should have already been processed for this 619 !$OMP PARALLEL WORKSHARE 571 620 sshbb_e(:,:) = MAX(sshbb_e(:,:), rn_wdmin1 - bathy(:,:)) 572 621 sshb_e(:,:) = MAX(sshb_e(:,:) , rn_wdmin1 - bathy(:,:)) 622 !$OMP END PARALLEL WORKSHARE 573 623 ENDIF 574 624 ! 575 625 IF (ln_bt_fw) THEN ! FORWARD integration: start from NOW fields 626 !$OMP PARALLEL WORKSHARE 576 627 sshn_e(:,:) = sshn(:,:) 577 628 un_e (:,:) = un_b(:,:) … … 582 633 hur_e (:,:) = r1_hu_n(:,:) 583 634 hvr_e (:,:) = r1_hv_n(:,:) 635 !$OMP END PARALLEL WORKSHARE 584 636 ELSE ! CENTRED integration: start from BEFORE fields 637 !$OMP PARALLEL WORKSHARE 585 638 sshn_e(:,:) = sshb(:,:) 586 639 un_e (:,:) = ub_b(:,:) … … 591 644 hur_e (:,:) = r1_hu_b(:,:) 592 645 hvr_e (:,:) = r1_hv_b(:,:) 646 !$OMP END PARALLEL WORKSHARE 593 647 ENDIF 594 648 ! … … 596 650 ! 597 651 ! Initialize sums: 652 !$OMP PARALLEL WORKSHARE 598 653 ua_b (:,:) = 0._wp ! After barotropic velocities (or transport if flux form) 599 654 va_b (:,:) = 0._wp … … 601 656 un_adv(:,:) = 0._wp ! Sum for now transport issued from ts loop 602 657 vn_adv(:,:) = 0._wp 658 !$OMP END PARALLEL WORKSHARE 603 659 ! ! ==================== ! 604 660 DO jn = 1, icycle ! sub-time-step loop ! … … 624 680 625 681 ! Extrapolate barotropic velocities at step jit+0.5: 682 !$OMP PARALLEL WORKSHARE 626 683 ua_e(:,:) = za1 * un_e(:,:) + za2 * ub_e(:,:) + za3 * ubb_e(:,:) 627 684 va_e(:,:) = za1 * vn_e(:,:) + za2 * vb_e(:,:) + za3 * vbb_e(:,:) 685 !$OMP END PARALLEL WORKSHARE 628 686 629 687 IF( .NOT.ln_linssh ) THEN !* Update ocean depth (variable volume case only) … … 632 690 zsshp2_e(:,:) = za1 * sshn_e(:,:) + za2 * sshb_e(:,:) + za3 * sshbb_e(:,:) 633 691 ! 692 !$OMP PARALLEL DO schedule(static) private(jj,ji) 634 693 DO jj = 2, jpjm1 ! Sea Surface Height at u- & v-points 635 694 DO ji = 2, fs_jpim1 ! Vector opt. … … 644 703 CALL lbc_lnk_multi( zwx, 'U', 1._wp, zwy, 'V', 1._wp ) 645 704 ! 705 !$OMP PARALLEL WORKSHARE 646 706 zhup2_e (:,:) = hu_0(:,:) + zwx(:,:) ! Ocean depth at U- and V-points 647 707 zhvp2_e (:,:) = hv_0(:,:) + zwy(:,:) 708 !$OMP END PARALLEL WORKSHARE 648 709 IF( ln_wd ) THEN 649 710 zhup2_e(:,:) = MAX(zhup2_e (:,:), rn_wdmin1) … … 651 712 END IF 652 713 ELSE 714 !$OMP PARALLEL WORKSHARE 653 715 zhup2_e (:,:) = hu_n(:,:) 654 716 zhvp2_e (:,:) = hv_n(:,:) 717 !$OMP END PARALLEL WORKSHARE 655 718 ENDIF 656 719 ! !* after ssh … … 659 722 ! considering fluxes below: 660 723 ! 724 !$OMP PARALLEL WORKSHARE 661 725 zwx(:,:) = e2u(:,:) * ua_e(:,:) * zhup2_e(:,:) ! fluxes at jn+0.5 662 726 zwy(:,:) = e1v(:,:) * va_e(:,:) * zhvp2_e(:,:) 727 !$OMP END PARALLEL WORKSHARE 663 728 ! 664 729 #if defined key_agrif … … 691 756 ! Sum over sub-time-steps to compute advective velocities 692 757 za2 = wgtbtp2(jn) 758 !$OMP PARALLEL WORKSHARE 693 759 un_adv(:,:) = un_adv(:,:) + za2 * zwx(:,:) * r1_e2u(:,:) 694 760 vn_adv(:,:) = vn_adv(:,:) + za2 * zwy(:,:) * r1_e1v(:,:) 761 !$OMP END PARALLEL WORKSHARE 695 762 ! 696 763 ! Set next sea level: 764 !$OMP PARALLEL DO schedule(static) private(jj,ji) 697 765 DO jj = 2, jpjm1 698 766 DO ji = fs_2, fs_jpim1 ! vector opt. … … 701 769 END DO 702 770 END DO 771 !$OMP PARALLEL WORKSHARE 703 772 ssha_e(:,:) = ( sshn_e(:,:) - rdtbt * ( zssh_frc(:,:) + zhdiv(:,:) ) ) * ssmask(:,:) 773 !$OMP END PARALLEL WORKSHARE 704 774 IF( ln_wd ) ssha_e(:,:) = MAX(ssha_e(:,:), rn_wdmin1 - bathy(:,:)) 705 775 CALL lbc_lnk( ssha_e, 'T', 1._wp ) … … 715 785 ! Sea Surface Height at u-,v-points (vvl case only) 716 786 IF( .NOT.ln_linssh ) THEN 787 !$OMP PARALLEL DO schedule(static) private(jj,ji) 717 788 DO jj = 2, jpjm1 718 789 DO ji = 2, jpim1 ! NO Vector Opt. … … 752 823 wduflt1(:,:) = 1._wp 753 824 wdvflt1(:,:) = 1._wp 825 !$OMP PARALLEL DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 754 826 DO jj = 2, jpjm1 755 827 DO ji = 2, jpim1 … … 793 865 IF( .NOT.ln_linssh .AND. .NOT.ln_dynadv_vec ) THEN !* Vector form 794 866 ! 867 !$OMP PARALLEL DO schedule(static) private(jj,ji,zx1,zy1) 795 868 DO jj = 2, jpjm1 796 869 DO ji = 2, jpim1 … … 821 894 ! 822 895 IF( ln_dynvor_ene .OR. ln_dynvor_mix ) THEN !== energy conserving or mixed scheme ==! 896 !$OMP PARALLEL DO schedule(static) private(jj,ji,zy1,zy2,zx1,zx2) 823 897 DO jj = 2, jpjm1 824 898 DO ji = fs_2, fs_jpim1 ! vector opt. … … 833 907 ! 834 908 ELSEIF ( ln_dynvor_ens ) THEN !== enstrophy conserving scheme ==! 909 !$OMP PARALLEL DO schedule(static) private(jj,ji,zx1,zy1) 835 910 DO jj = 2, jpjm1 836 911 DO ji = fs_2, fs_jpim1 ! vector opt. … … 845 920 ! 846 921 ELSEIF ( ln_dynvor_een ) THEN !== energy and enstrophy conserving scheme ==! 922 !$OMP PARALLEL DO schedule(static) private(jj,ji) 847 923 DO jj = 2, jpjm1 848 924 DO ji = fs_2, fs_jpim1 ! vector opt. … … 862 938 ! Add tidal astronomical forcing if defined 863 939 IF ( lk_tide.AND.ln_tide_pot ) THEN 940 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_spg,zv_spg) 864 941 DO jj = 2, jpjm1 865 942 DO ji = fs_2, fs_jpim1 ! vector opt. … … 873 950 ! 874 951 ! Add bottom stresses: 952 !$OMP PARALLEL WORKSHARE 875 953 zu_trd(:,:) = zu_trd(:,:) + bfrua(:,:) * un_e(:,:) * hur_e(:,:) 876 954 zv_trd(:,:) = zv_trd(:,:) + bfrva(:,:) * vn_e(:,:) * hvr_e(:,:) 955 !$OMP END PARALLEL WORKSHARE 877 956 ! 878 957 ! Add top stresses: 958 !$OMP PARALLEL WORKSHARE 879 959 zu_trd(:,:) = zu_trd(:,:) + tfrua(:,:) * un_e(:,:) * hur_e(:,:) 880 960 zv_trd(:,:) = zv_trd(:,:) + tfrva(:,:) * vn_e(:,:) * hvr_e(:,:) 961 !$OMP END PARALLEL WORKSHARE 881 962 ! 882 963 ! Surface pressure trend: 883 964 884 965 IF( ln_wd ) THEN 966 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_spg,zv_spg) 885 967 DO jj = 2, jpjm1 886 968 DO ji = 2, jpim1 … … 893 975 END DO 894 976 ELSE 977 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_spg,zv_spg) 895 978 DO jj = 2, jpjm1 896 979 DO ji = fs_2, fs_jpim1 ! vector opt. … … 907 990 ! Set next velocities: 908 991 IF( ln_dynadv_vec .OR. ln_linssh ) THEN !* Vector form 992 !$OMP PARALLEL DO schedule(static) private(jj,ji) 909 993 DO jj = 2, jpjm1 910 994 DO ji = fs_2, fs_jpim1 ! vector opt. … … 924 1008 ! 925 1009 ELSE !* Flux form 1010 !$OMP PARALLEL DO schedule(static) private(jj,ji,zhura,zhvra) 926 1011 DO jj = 2, jpjm1 927 1012 DO ji = fs_2, fs_jpim1 ! vector opt. … … 957 1042 hv_e (:,:) = MAX(hv_0(:,:) + zsshv_a(:,:), rn_wdmin1) 958 1043 ELSE 1044 !$OMP PARALLEL WORKSHARE 959 1045 hu_e (:,:) = hu_0(:,:) + zsshu_a(:,:) 960 1046 hv_e (:,:) = hv_0(:,:) + zsshv_a(:,:) 1047 !$OMP END PARALLEL WORKSHARE 961 1048 END IF 1049 !$OMP PARALLEL WORKSHARE 962 1050 hur_e(:,:) = ssumask(:,:) / ( hu_e(:,:) + 1._wp - ssumask(:,:) ) 963 1051 hvr_e(:,:) = ssvmask(:,:) / ( hv_e(:,:) + 1._wp - ssvmask(:,:) ) 1052 !$OMP END PARALLEL WORKSHARE 964 1053 ! 965 1054 ENDIF … … 976 1065 ! !* Swap 977 1066 ! ! ---- 1067 !$OMP PARALLEL WORKSHARE 978 1068 ubb_e (:,:) = ub_e (:,:) 979 1069 ub_e (:,:) = un_e (:,:) … … 987 1077 sshb_e (:,:) = sshn_e(:,:) 988 1078 sshn_e (:,:) = ssha_e(:,:) 1079 !$OMP END PARALLEL WORKSHARE 989 1080 990 1081 ! !* Sum over whole bt loop … … 992 1083 za1 = wgtbtp1(jn) 993 1084 IF( ln_dynadv_vec .OR. ln_linssh ) THEN ! Sum velocities 1085 !$OMP PARALLEL WORKSHARE 994 1086 ua_b (:,:) = ua_b (:,:) + za1 * ua_e (:,:) 995 1087 va_b (:,:) = va_b (:,:) + za1 * va_e (:,:) 1088 !$OMP END PARALLEL WORKSHARE 996 1089 ELSE ! Sum transports 1090 !$OMP PARALLEL WORKSHARE 997 1091 ua_b (:,:) = ua_b (:,:) + za1 * ua_e (:,:) * hu_e (:,:) 998 1092 va_b (:,:) = va_b (:,:) + za1 * va_e (:,:) * hv_e (:,:) 1093 !$OMP END PARALLEL WORKSHARE 999 1094 ENDIF 1000 1095 ! ! Sum sea level 1096 !$OMP PARALLEL WORKSHARE 1001 1097 ssha(:,:) = ssha(:,:) + za1 * ssha_e(:,:) 1098 !$OMP END PARALLEL WORKSHARE 1002 1099 ! ! ==================== ! 1003 1100 END DO ! end loop ! … … 1008 1105 ! 1009 1106 ! Set advection velocity correction: 1107 !$OMP PARALLEL WORKSHARE 1010 1108 zwx(:,:) = un_adv(:,:) 1011 1109 zwy(:,:) = vn_adv(:,:) 1110 !$OMP END PARALLEL WORKSHARE 1012 1111 IF( ( kt == nit000 .AND. neuler==0 ) .OR. .NOT.ln_bt_fw ) THEN 1112 !$OMP PARALLEL WORKSHARE 1013 1113 un_adv(:,:) = zwx(:,:) * r1_hu_n(:,:) 1014 1114 vn_adv(:,:) = zwy(:,:) * r1_hv_n(:,:) 1115 !$OMP END PARALLEL WORKSHARE 1015 1116 ELSE 1117 !$OMP PARALLEL WORKSHARE 1016 1118 un_adv(:,:) = z1_2 * ( ub2_b(:,:) + zwx(:,:) ) * r1_hu_n(:,:) 1017 1119 vn_adv(:,:) = z1_2 * ( vb2_b(:,:) + zwy(:,:) ) * r1_hv_n(:,:) 1120 !$OMP END PARALLEL WORKSHARE 1018 1121 END IF 1019 1122 1020 1123 IF( ln_bt_fw ) THEN ! Save integrated transport for next computation 1124 !$OMP PARALLEL WORKSHARE 1021 1125 ub2_b(:,:) = zwx(:,:) 1022 1126 vb2_b(:,:) = zwy(:,:) 1127 !$OMP END PARALLEL WORKSHARE 1023 1128 ENDIF 1024 1129 ! 1025 1130 ! Update barotropic trend: 1026 1131 IF( ln_dynadv_vec .OR. ln_linssh ) THEN 1132 !$OMP PARALLEL DO schedule(static) private(jk) 1027 1133 DO jk=1,jpkm1 1028 1134 ua(:,:,jk) = ua(:,:,jk) + ( ua_b(:,:) - ub_b(:,:) ) * z1_2dt_b … … 1031 1137 ELSE 1032 1138 ! At this stage, ssha has been corrected: compute new depths at velocity points 1139 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1033 1140 DO jj = 1, jpjm1 1034 1141 DO ji = 1, jpim1 ! NO Vector Opt. … … 1043 1150 CALL lbc_lnk_multi( zsshu_a, 'U', 1._wp, zsshv_a, 'V', 1._wp ) ! Boundary conditions 1044 1151 ! 1152 !$OMP PARALLEL DO schedule(static) private(jk) 1045 1153 DO jk=1,jpkm1 1046 1154 ua(:,:,jk) = ua(:,:,jk) + r1_hu_n(:,:) * ( ua_b(:,:) - ub_b(:,:) * hu_b(:,:) ) * z1_2dt_b … … 1048 1156 END DO 1049 1157 ! Save barotropic velocities not transport: 1158 !$OMP PARALLEL WORKSHARE 1050 1159 ua_b(:,:) = ua_b(:,:) / ( hu_0(:,:) + zsshu_a(:,:) + 1._wp - ssumask(:,:) ) 1051 1160 va_b(:,:) = va_b(:,:) / ( hv_0(:,:) + zsshv_a(:,:) + 1._wp - ssvmask(:,:) ) 1052 ENDIF 1053 ! 1161 !$OMP END PARALLEL WORKSHARE 1162 ENDIF 1163 ! 1164 !$OMP PARALLEL DO schedule(static) private(jk) 1054 1165 DO jk = 1, jpkm1 1055 1166 ! Correct velocities: … … 1244 1355 CALL wrk_alloc( jpi,jpj, zcu ) 1245 1356 ! 1357 !$OMP PARALLEL DO schedule(static) private(jj, ji, zxr2, zyr2) 1246 1358 DO jj = 1, jpj 1247 1359 DO ji =1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynvor.F90
r6140 r6748 106 106 CASE ( np_ENE ) !* energy conserving scheme 107 107 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 108 !$OMP PARALLEL WORKSHARE 108 109 ztrdu(:,:,:) = ua(:,:,:) 109 110 ztrdv(:,:,:) = va(:,:,:) 111 !$OMP END PARALLEL WORKSHARE 110 112 CALL vor_ene( kt, nrvm, ua, va ) ! relative vorticity or metric trend 113 !$OMP PARALLEL WORKSHARE 111 114 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 112 115 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 116 !$OMP END PARALLEL WORKSHARE 113 117 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 118 !$OMP PARALLEL WORKSHARE 114 119 ztrdu(:,:,:) = ua(:,:,:) 115 120 ztrdv(:,:,:) = va(:,:,:) 121 !$OMP END PARALLEL WORKSHARE 116 122 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend 123 !$OMP PARALLEL WORKSHARE 117 124 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 118 125 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 126 !$OMP END PARALLEL WORKSHARE 119 127 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 120 128 ELSE … … 124 132 CASE ( np_ENS ) !* enstrophy conserving scheme 125 133 IF( l_trddyn ) THEN ! trend diagnostics: splitthe trend in two 134 !$OMP PARALLEL WORKSHARE 126 135 ztrdu(:,:,:) = ua(:,:,:) 127 136 ztrdv(:,:,:) = va(:,:,:) 137 !$OMP END PARALLEL WORKSHARE 128 138 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend 139 !$OMP PARALLEL WORKSHARE 129 140 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 130 141 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 142 !$OMP END PARALLEL WORKSHARE 131 143 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 144 !$OMP PARALLEL WORKSHARE 132 145 ztrdu(:,:,:) = ua(:,:,:) 133 146 ztrdv(:,:,:) = va(:,:,:) 147 !$OMP END PARALLEL WORKSHARE 134 148 CALL vor_ens( kt, ncor, ua, va ) ! planetary vorticity trend 149 !$OMP PARALLEL WORKSHARE 135 150 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 136 151 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 152 !$OMP END PARALLEL WORKSHARE 137 153 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 138 154 ELSE … … 142 158 CASE ( np_MIX ) !* mixed ene-ens scheme 143 159 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 160 !$OMP PARALLEL WORKSHARE 144 161 ztrdu(:,:,:) = ua(:,:,:) 145 162 ztrdv(:,:,:) = va(:,:,:) 163 !$OMP END PARALLEL WORKSHARE 146 164 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend (ens) 165 !$OMP PARALLEL WORKSHARE 147 166 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 148 167 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 168 !$OMP END PARALLEL WORKSHARE 149 169 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 170 !$OMP PARALLEL WORKSHARE 150 171 ztrdu(:,:,:) = ua(:,:,:) 151 172 ztrdv(:,:,:) = va(:,:,:) 173 !$OMP END PARALLEL WORKSHARE 152 174 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend (ene) 175 !$OMP PARALLEL WORKSHARE 153 176 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 154 177 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 178 !$OMP END PARALLEL WORKSHARE 155 179 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 156 180 ELSE … … 161 185 CASE ( np_EEN ) !* energy and enstrophy conserving scheme 162 186 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 187 !$OMP PARALLEL WORKSHARE 163 188 ztrdu(:,:,:) = ua(:,:,:) 164 189 ztrdv(:,:,:) = va(:,:,:) 190 !$OMP END PARALLEL WORKSHARE 165 191 CALL vor_een( kt, nrvm, ua, va ) ! relative vorticity or metric trend 192 !$OMP PARALLEL WORKSHARE 166 193 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 167 194 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 195 !$OMP END PARALLEL WORKSHARE 168 196 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 197 !$OMP PARALLEL WORKSHARE 169 198 ztrdu(:,:,:) = ua(:,:,:) 170 199 ztrdv(:,:,:) = va(:,:,:) 200 !$OMP END PARALLEL WORKSHARE 171 201 CALL vor_een( kt, ncor, ua, va ) ! planetary vorticity trend 202 !$OMP PARALLEL WORKSHARE 172 203 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 173 204 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 205 !$OMP END PARALLEL WORKSHARE 174 206 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 175 207 ELSE … … 237 269 SELECT CASE( kvor ) !== vorticity considered ==! 238 270 CASE ( np_COR ) !* Coriolis (planetary vorticity) 271 !$OMP PARALLEL WORKSHARE 239 272 zwz(:,:) = ff(:,:) 273 !$OMP END PARALLEL WORKSHARE 240 274 CASE ( np_RVO ) !* relative vorticity 275 !$OMP PARALLEL DO private(jj,ji) 241 276 DO jj = 1, jpjm1 242 277 DO ji = 1, fs_jpim1 ! vector opt. … … 246 281 END DO 247 282 CASE ( np_MET ) !* metric term 283 !$OMP PARALLEL DO private(jj,ji) 248 284 DO jj = 1, jpjm1 249 285 DO ji = 1, fs_jpim1 ! vector opt. … … 254 290 END DO 255 291 CASE ( np_CRV ) !* Coriolis + relative vorticity 292 !$OMP PARALLEL DO private(jj,ji) 256 293 DO jj = 1, jpjm1 257 294 DO ji = 1, fs_jpim1 ! vector opt. … … 262 299 END DO 263 300 CASE ( np_CME ) !* Coriolis + metric 301 !$OMP PARALLEL DO private(jj,ji) 264 302 DO jj = 1, jpjm1 265 303 DO ji = 1, fs_jpim1 ! vector opt. … … 275 313 ! 276 314 IF( ln_dynvor_msk ) THEN !== mask/unmask vorticity ==! 315 !$OMP PARALLEL DO private(jj,ji) 277 316 DO jj = 1, jpjm1 278 317 DO ji = 1, fs_jpim1 ! vector opt. … … 287 326 zwy(:,:) = e1v(:,:) * e3v_n(:,:,jk) * vn(:,:,jk) 288 327 ELSE 328 !$OMP PARALLEL WORKSHARE 289 329 zwx(:,:) = e2u(:,:) * un(:,:,jk) 290 330 zwy(:,:) = e1v(:,:) * vn(:,:,jk) 331 !$OMP END PARALLEL WORKSHARE 291 332 ENDIF 292 333 ! !== compute and add the vorticity term trend =! 334 !$OMP PARALLEL DO private(jj, ji, zy1, zy2, zx1, zx2) 293 335 DO jj = 2, jpjm1 294 336 DO ji = fs_2, fs_jpim1 ! vector opt. -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzad.F90
r6140 r6748 77 77 IF( l_trddyn ) THEN ! Save ua and va trends 78 78 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 79 !$OMP PARALLEL WORKSHARE 79 80 ztrdu(:,:,:) = ua(:,:,:) 80 81 ztrdv(:,:,:) = va(:,:,:) 82 !$OMP END PARALLEL WORKSHARE 81 83 ENDIF 82 84 83 85 DO jk = 2, jpkm1 ! Vertical momentum advection at level w and u- and v- vertical 86 !$OMP PARALLEL DO schedule(static) private(jj, ji) 84 87 DO jj = 2, jpj ! vertical fluxes 85 88 DO ji = fs_2, jpi ! vector opt. … … 87 90 END DO 88 91 END DO 92 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 93 DO jj = 2, jpjm1 ! vertical momentum advection at w-point 90 94 DO ji = fs_2, fs_jpim1 ! vector opt. … … 97 101 ! Surface and bottom advective fluxes set to zero 98 102 IF ( ln_isfcav ) THEN 103 !$OMP PARALLEL DO schedule(static) private(jj, ji) 99 104 DO jj = 2, jpjm1 100 105 DO ji = fs_2, fs_jpim1 ! vector opt. … … 106 111 END DO 107 112 ELSE 113 !$OMP PARALLEL DO schedule(static) private(jj, ji) 108 114 DO jj = 2, jpjm1 109 115 DO ji = fs_2, fs_jpim1 ! vector opt. … … 116 122 END IF 117 123 124 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zua, zva) 118 125 DO jk = 1, jpkm1 ! Vertical momentum advection at u- and v-points 119 126 DO jj = 2, jpjm1 … … 130 137 131 138 IF( l_trddyn ) THEN ! save the vertical advection trends for diagnostic 139 !$OMP PARALLEL WORKSHARE 132 140 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 133 141 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 142 !$OMP END PARALLEL WORKSHARE 134 143 CALL trd_dyn( ztrdu, ztrdv, jpdyn_zad, kt ) 135 144 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzdf.F90
r6140 r6748 66 66 IF( l_trddyn ) THEN ! temporary save of ta and sa trends 67 67 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 68 !$OMP PARALLEL WORKSHARE 68 69 ztrdu(:,:,:) = ua(:,:,:) 69 70 ztrdv(:,:,:) = va(:,:,:) 71 !$OMP END PARALLEL WORKSHARE 70 72 ENDIF 71 73 … … 78 80 79 81 IF( l_trddyn ) THEN ! save the vertical diffusive trends for further diagnostics 82 !$OMP PARALLEL WORKSHARE 80 83 ztrdu(:,:,:) = ( ua(:,:,:) - ub(:,:,:) ) / r2dt - ztrdu(:,:,:) 81 84 ztrdv(:,:,:) = ( va(:,:,:) - vb(:,:,:) ) / r2dt - ztrdv(:,:,:) 85 !$OMP END PARALLEL WORKSHARE 82 86 CALL trd_dyn( ztrdu, ztrdv, jpdyn_zdf, kt ) 83 87 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzdf_imp.F90
r6140 r6748 112 112 ! 113 113 IF( ln_bfrimp ) THEN 114 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 114 115 DO jj = 2, jpjm1 115 116 DO ji = 2, jpim1 … … 121 122 END DO 122 123 IF ( ln_isfcav ) THEN 124 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 123 125 DO jj = 2, jpjm1 124 126 DO ji = 2, jpim1 … … 172 174 ! non zero value at the ocean bottom depending on the bottom friction used. 173 175 ! 176 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ze3ua, zzwi, zzws) 174 177 DO jk = 1, jpkm1 ! Matrix 175 178 DO jj = 2, jpjm1 … … 207 210 ! 208 211 DO jk = 2, jpkm1 !== First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 (increasing k) == 212 !$OMP PARALLEL DO schedule(static) private(jj, ji) 209 213 DO jj = 2, jpjm1 210 214 DO ji = fs_2, fs_jpim1 ! vector opt. … … 214 218 END DO 215 219 ! 220 !$OMP PARALLEL DO schedule(static) private(jj, ji, ze3ua) 216 221 DO jj = 2, jpjm1 !== second recurrence: SOLk = RHSk - Lk / Dk-1 Lk-1 ==! 217 222 DO ji = fs_2, fs_jpim1 ! vector opt. … … 222 227 END DO 223 228 DO jk = 2, jpkm1 229 !$OMP PARALLEL DO schedule(static) private(jj, ji) 224 230 DO jj = 2, jpjm1 225 231 DO ji = fs_2, fs_jpim1 … … 229 235 END DO 230 236 ! 231 DO jj = 2, jpjm1 !== thrid recurrence : SOLk = ( Lk - Uk * Ek+1 ) / Dk ==! 237 !$OMP PARALLEL DO schedule(static) private(jj, ji) 238 DO jj = 2, jpjm1 !== thrid recurrence : SOLk = ( Lk - Uk * Ek+1 ) / Dk ==! 232 239 DO ji = fs_2, fs_jpim1 ! vector opt. 233 240 ua(ji,jj,jpkm1) = ua(ji,jj,jpkm1) / zwd(ji,jj,jpkm1) … … 235 242 END DO 236 243 DO jk = jpk-2, 1, -1 237 DO jj = 2, jpjm1 244 !$OMP PARALLEL DO schedule(static) private(jj, ji) 245 DO jj = 2, jpjm1 238 246 DO ji = fs_2, fs_jpim1 239 247 ua(ji,jj,jk) = ( ua(ji,jj,jk) - zws(ji,jj,jk) * ua(ji,jj,jk+1) ) / zwd(ji,jj,jk) … … 248 256 ! non zero value at the ocean bottom depending on the bottom friction used 249 257 ! 258 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ze3va, zzwi, zzws) 250 259 DO jk = 1, jpkm1 ! Matrix 251 260 DO jj = 2, jpjm1 … … 260 269 END DO 261 270 END DO 271 !$OMP PARALLEL DO schedule(static) private(jj, ji) 262 272 DO jj = 2, jpjm1 ! Surface boundary conditions 263 273 DO ji = fs_2, fs_jpim1 ! vector opt. … … 283 293 ! 284 294 DO jk = 2, jpkm1 !== First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 (increasing k) == 295 !$OMP PARALLEL DO schedule(static) private(jj, ji) 285 296 DO jj = 2, jpjm1 286 297 DO ji = fs_2, fs_jpim1 ! vector opt. … … 290 301 END DO 291 302 ! 303 !$OMP PARALLEL DO schedule(static) private(jj, ji, ze3va) 292 304 DO jj = 2, jpjm1 !== second recurrence: SOLk = RHSk - Lk / Dk-1 Lk-1 ==! 293 305 DO ji = fs_2, fs_jpim1 ! vector opt. … … 298 310 END DO 299 311 DO jk = 2, jpkm1 312 !$OMP PARALLEL DO schedule(static) private(jj, ji) 300 313 DO jj = 2, jpjm1 301 314 DO ji = fs_2, fs_jpim1 ! vector opt. … … 305 318 END DO 306 319 ! 320 !$OMP PARALLEL DO schedule(static) private(jj, ji) 307 321 DO jj = 2, jpjm1 !== third recurrence : SOLk = ( Lk - Uk * SOLk+1 ) / Dk ==! 308 322 DO ji = fs_2, fs_jpim1 ! vector opt. … … 311 325 END DO 312 326 DO jk = jpk-2, 1, -1 327 !$OMP PARALLEL DO schedule(static) private(jj, ji) 313 328 DO jj = 2, jpjm1 314 329 DO ji = fs_2, fs_jpim1 … … 322 337 !!gm I almost sure it is !!!! 323 338 IF( ln_bfrimp ) THEN 339 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 324 340 DO jj = 2, jpjm1 325 341 DO ji = 2, jpim1 … … 331 347 END DO 332 348 IF (ln_isfcav) THEN 349 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv) 333 350 DO jj = 2, jpjm1 334 351 DO ji = 2, jpim1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/sshwzv.F90
r6152 r6748 73 73 INTEGER, INTENT(in) :: kt ! time step 74 74 ! 75 INTEGER :: jk ! dummy loop indice75 INTEGER :: jk, jj, ji ! dummy loop indice 76 76 REAL(wp) :: z2dt, zcoef ! local scalars 77 77 REAL(wp), POINTER, DIMENSION(:,: ) :: zhdiv ! 2D workspace … … 96 96 ! ! After Sea Surface Height ! 97 97 ! !------------------------------! 98 !$OMP PARALLEL WORKSHARE 98 99 zhdiv(:,:) = 0._wp 100 !$OMP END PARALLEL WORKSHARE 99 101 DO jk = 1, jpkm1 ! Horizontal divergence of barotropic transports 100 zhdiv(:,:) = zhdiv(:,:) + e3t_n(:,:,jk) * hdivn(:,:,jk) 102 !$OMP PARALLEL DO schedule(static) private(jj, ji) 103 DO jj = 1, jpj 104 DO ji = 1, jpi ! vector opt. 105 zhdiv(ji,jj) = zhdiv(ji,jj) + e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) 106 END DO 107 END DO 101 108 END DO 102 109 ! ! Sea surface elevation time stepping … … 107 114 108 115 IF(ln_wd) CALL wad_lmt(sshb, zcoef * (emp_b(:,:) + emp(:,:)), z2dt) 109 116 !$OMP PARALLEL WORKSHARE 110 117 ssha(:,:) = ( sshb(:,:) - z2dt * ( zcoef * ( emp_b(:,:) + emp(:,:) ) + zhdiv(:,:) ) ) * ssmask(:,:) 111 118 !$OMP END PARALLEL WORKSHARE 112 119 IF ( .NOT.ln_dynspg_ts ) THEN 113 120 ! These lines are not necessary with time splitting since … … 127 134 IF( lk_asminc .AND. ln_sshinc .AND. ln_asmiau ) THEN ! Include the IAU weighted SSH increment 128 135 CALL ssh_asm_inc( kt ) 136 !$OMP PARALLEL WORKSHARE 129 137 ssha(:,:) = ssha(:,:) + z2dt * ssh_iau(:,:) 138 !$OMP END PARALLEL WORKSHARE 130 139 ENDIF 131 140 #endif … … 183 192 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! z_tilde and layer cases 184 193 CALL wrk_alloc( jpi, jpj, jpk, zhdiv ) 185 ! 194 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 186 195 DO jk = 1, jpkm1 187 196 ! horizontal divergence of thickness diffusion transport ( velocity multiplied by e3t) … … 198 207 DO jk = jpkm1, 1, -1 ! integrate from the bottom the hor. divergence 199 208 ! computation of w 200 wn(:,:,jk) = wn(:,:,jk+1) - ( e3t_n(:,:,jk) * hdivn(:,:,jk) + zhdiv(:,:,jk) & 201 & + z1_2dt * ( e3t_a(:,:,jk) - e3t_b(:,:,jk) ) ) * tmask(:,:,jk) 209 !$OMP PARALLEL DO schedule(static) private(jj, ji) 210 DO jj = 1, jpj 211 DO ji = 1, jpi ! vector opt. 212 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) + zhdiv(ji,jj,jk) & 213 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk) 214 END DO 215 END DO 202 216 END DO 203 217 ! IF( ln_vvl_layer ) wn(:,:,:) = 0.e0 … … 205 219 ELSE ! z_star and linear free surface cases 206 220 DO jk = jpkm1, 1, -1 ! integrate from the bottom the hor. divergence 221 !$OMP PARALLEL DO schedule(static) private(jj, ji) 222 DO jj = 1, jpj 223 DO ji = 1, jpi ! vector opt. 207 224 ! computation of w 208 wn(:,:,jk) = wn(:,:,jk+1) - ( e3t_n(:,:,jk) * hdivn(:,:,jk) & 209 & + z1_2dt * ( e3t_a(:,:,jk) - e3t_b(:,:,jk) ) ) * tmask(:,:,jk) 225 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) & 226 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk) 227 END DO 228 END DO 210 229 END DO 211 230 ENDIF … … 213 232 #if defined key_bdy 214 233 IF( lk_bdy ) THEN 234 !$OMP PARALLEL DO schedule(static) private(jk) 215 235 DO jk = 1, jpkm1 216 236 wn(:,:,jk) = wn(:,:,jk) * bdytmask(:,:) … … 258 278 IF( ( neuler == 0 .AND. kt == nit000 ) .OR. & 259 279 & ( ln_bt_fw .AND. ln_dynspg_ts ) ) THEN 280 !$OMP PARALLEL WORKSHARE 260 281 sshb(:,:) = sshn(:,:) ! before <-- now 261 282 sshn(:,:) = ssha(:,:) ! now <-- after (before already = now) 283 !$OMP END PARALLEL WORKSHARE 262 284 ! 263 285 ELSE !== Leap-Frog time-stepping: Asselin filter + swap ==! 264 286 ! ! before <-- now filtered 287 !$OMP PARALLEL WORKSHARE 265 288 sshb(:,:) = sshn(:,:) + atfp * ( sshb(:,:) - 2 * sshn(:,:) + ssha(:,:) ) 289 !$OMP END PARALLEL WORKSHARE 266 290 IF( .NOT.ln_linssh ) THEN ! before <-- with forcing removed 267 291 zcoef = atfp * rdt * r1_rau0 292 !$OMP PARALLEL WORKSHARE 268 293 sshb(:,:) = sshb(:,:) - zcoef * ( emp_b(:,:) - emp (:,:) & 269 294 & - rnf_b(:,:) + rnf (:,:) & 270 295 & + fwfisf_b(:,:) - fwfisf(:,:) ) * ssmask(:,:) 296 !$OMP END PARALLEL WORKSHARE 271 297 ENDIF 298 !$OMP PARALLEL WORKSHARE 272 299 sshn(:,:) = ssha(:,:) ! now <-- after 300 !$OMP END PARALLEL WORKSHARE 273 301 ENDIF 274 302 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ICB/icbini.F90
r5215 r6748 85 85 first_width (:) = SQRT( rn_initial_mass(:) / ( rn_LoW_ratio * rn_rho_bergs * rn_initial_thickness(:) ) ) 86 86 first_length(:) = rn_LoW_ratio * first_width(:) 87 87 !$OMP PARALLEL WORKSHARE 88 88 berg_grid%calving (:,:) = 0._wp 89 89 berg_grid%calving_hflx (:,:) = 0._wp … … 95 95 src_calving (:,:) = 0._wp 96 96 src_calving_hflx (:,:) = 0._wp 97 97 !$OMP END PARALLEL WORKSHARE 98 98 ! ! domain for icebergs 99 99 IF( lk_mpp .AND. jpni == 1 ) CALL ctl_stop( 'icbinit: having ONE processor in x currently does not work' ) … … 108 108 nicbfldproc(:) = -1 109 109 110 !$OMP PARALLEL DO schedule(static) private(jj, ji) 110 111 DO jj = 1, jpj 111 112 DO ji = 1, jpi … … 218 219 CALL flush(numicb) 219 220 ENDIF 220 221 !$OMP PARALLEL WORKSHARE 221 222 src_calving (:,:) = 0._wp 222 223 src_calving_hflx(:,:) = 0._wp 223 224 !$OMP END PARALLEL WORKSHARE 224 225 ! assign each new iceberg with a unique number constructed from the processor number 225 226 ! and incremented by the total number of processors -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r6490 r6748 381 381 ! 382 382 ! WARNING ptab is defined only between nld and nle 383 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 383 384 DO jk = 1, jpk 384 385 DO jj = nlcj+1, jpj ! added line(s) (inner only) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/mppini.F90
r6412 r6748 155 155 ! The last line of blocks (west) will have fewer points 156 156 157 !$OMP PARALLEL DO schedule(static) private(jj,ji) 157 158 DO jj = 1, jpnj 158 159 DO ji=1, jpni-1 … … 164 165 #else 165 166 167 !$OMP PARALLEL DO schedule(static) private(jj,ji) 166 168 DO jj = 1, jpnj 167 169 DO ji = 1, iresti … … 174 176 175 177 #endif 178 !$OMP PARALLEL WORKSHARE 176 179 nfilcit(:,:) = ilcit(:,:) 180 !$OMP END PARALLEL WORKSHARE 177 181 IF( irestj == 0 ) irestj = jpnj 178 182 … … 202 206 ! ------------------------------- 203 207 208 !$OMP PARALLEL WORKSHARE 204 209 iimppt(:,:) = 1 205 210 ijmppt(:,:) = 1 211 !$OMP END PARALLEL WORKSHARE 206 212 207 213 IF( jpni > 1 ) THEN 214 !$OMP PARALLEL DO schedule(static) private(jj,ji) 208 215 DO jj = 1, jpnj 209 216 DO ji = 2, jpni … … 212 219 END DO 213 220 ENDIF 221 !$OMP PARALLEL WORKSHARE 214 222 nfiimpp(:,:)=iimppt(:,:) 223 !$OMP END PARALLEL WORKSHARE 215 224 216 225 IF( jpnj > 1 ) THEN 226 !$OMP PARALLEL DO schedule(static) private(jj,ji) 217 227 DO jj = 2, jpnj 218 228 DO ji = 1, jpni -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfdyn.F90
r6140 r6748 136 136 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'ldf_dyn_init: failed to allocate arrays') 137 137 ! 138 !$OMP PARALLEL WORKSHARE 138 139 ahmt(:,:,jpk) = 0._wp ! last level always 0 139 140 ahmf(:,:,jpk) = 0._wp 141 !$OMP END PARALLEL WORKSHARE 140 142 ! 141 143 ! ! value of eddy mixing coef. … … 154 156 CASE( 0 ) !== constant ==! 155 157 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = constant ' 158 !$OMP PARALLEL WORKSHARE 156 159 ahmt(:,:,:) = zah0 * tmask(:,:,:) 157 160 ahmf(:,:,:) = zah0 * fmask(:,:,:) 161 !$OMP END PARALLEL WORKSHARE 158 162 ! 159 163 CASE( 10 ) !== fixed profile ==! 160 164 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = F( depth )' 165 !$OMP PARALLEL WORKSHARE 161 166 ahmt(:,:,1) = zah0 * tmask(:,:,1) ! constant surface value 162 167 ahmf(:,:,1) = zah0 * fmask(:,:,1) 168 !$OMP END PARALLEL WORKSHARE 163 169 CALL ldf_c1d( 'DYN', r1_4, ahmt(:,:,1), ahmf(:,:,1), ahmt, ahmf ) 164 170 ! … … 172 178 !! do we introduce a scaling by the max value of the array, and then multiply by zah0 ???? 173 179 !! better: check that the max is <=1 i.e. it is a shape from 0 to 1, not a coef that has physical dimension 180 !$OMP PARALLEL DO schedule(static) private(jk) 174 181 DO jk = 2, jpkm1 175 182 ahmt(:,:,jk) = ahmt(:,:,1) * tmask(:,:,jk) … … 190 197 !!gm Question : info for LAP or BLP case to take into account the SQRT in the bilaplacian case ???? 191 198 !! do we introduce a scaling by the max value of the array, and then multiply by zah0 ???? 199 !$OMP PARALLEL DO schedule(static) private(jk) 192 200 DO jk = 1, jpkm1 193 201 ahmt(:,:,jk) = ahmt(:,:,jk) * tmask(:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfslp.F90
r6352 r6748 135 135 z1_slpmax = 1._wp / rn_slpmax 136 136 ! 137 138 !$OMP PARALLEL WORKSHARE 137 139 zww(:,:,:) = 0._wp 138 140 zwz(:,:,:) = 0._wp 139 ! 141 !$OMP END PARALLEL WORKSHARE 142 ! 143 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 140 144 DO jk = 1, jpk !== i- & j-gradient of density ==! 141 145 DO jj = 1, jpjm1 … … 147 151 END DO 148 152 IF( ln_zps ) THEN ! partial steps correction at the bottom ocean level 153 !$OMP PARALLEL DO schedule(static) private(jj, ji) 149 154 DO jj = 1, jpjm1 150 155 DO ji = 1, jpim1 … … 155 160 ENDIF 156 161 IF( ln_zps .AND. ln_isfcav ) THEN ! partial steps correction at the bottom ocean level 162 !$OMP PARALLEL DO schedule(static) private(jj, ji) 157 163 DO jj = 1, jpjm1 158 164 DO ji = 1, jpim1 … … 164 170 ! 165 171 zdzr(:,:,1) = 0._wp !== Local vertical density gradient at T-point == ! (evaluated from N^2) 172 !$OMP PARALLEL DO schedule(static) private(jk) 166 173 DO jk = 2, jpkm1 167 174 ! ! zdzr = d/dz(prd)= - ( prd ) / grav * mk(pn2) -- at t point … … 182 189 ! 183 190 IF ( ln_isfcav ) THEN 191 !$OMP PARALLEL DO schedule(static) private(jj,ji) 184 192 DO jj = 2, jpjm1 185 193 DO ji = fs_2, fs_jpim1 ! vector opt. … … 191 199 END DO 192 200 ELSE 201 !$OMP PARALLEL DO schedule(static) private(jj,ji) 193 202 DO jj = 2, jpjm1 194 203 DO ji = fs_2, fs_jpim1 ! vector opt. … … 198 207 END DO 199 208 END IF 200 209 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zau, zav, zbu, zbv, zfj, zfi, zdepu, zdepv) 201 210 DO jk = 2, jpkm1 !* Slopes at u and v points 202 211 DO jj = 2, jpjm1 … … 239 248 ! 240 249 ! !* horizontal Shapiro filter 250 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 241 251 DO jk = 2, jpkm1 242 252 DO jj = 2, jpjm1, MAX(1, jpj-3) ! rows jj=2 and =jpjm1 only … … 283 293 ! =========================== | wslpj = mij( d/dj( prd ) / d/dz( prd ) 284 294 ! 295 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zbw, zfk, zck, zbi, zbj, zai, zaj, zci, zcj) 285 296 DO jk = 2, jpkm1 286 297 DO jj = 2, jpjm1 … … 321 332 ! 322 333 ! !* horizontal Shapiro filter 334 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zcofw, zck) 323 335 DO jk = 2, jpkm1 324 336 DO jj = 2, jpjm1, MAX(1, jpj-3) ! rows jj=2 and =jpjm1 only … … 429 441 ! 430 442 ip = jl ; jp = jl ! guaranteed nonzero gradients ( absolute value larger than repsln) 443 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zdit,zdis,zdjt,zdjs,zdxrho_raw,zdyrho_raw) 431 444 DO jk = 1, jpkm1 ! done each pair of triad 432 445 DO jj = 1, jpjm1 ! NB: not masked ==> a minimum value is set … … 445 458 ! 446 459 IF( ln_zps .AND. l_grad_zps ) THEN ! partial steps: correction of i- & j-grad on bottom 460 !$OMP PARALLEL DO schedule(static) private(jj,ji,iku,zdit,zdis,zdxrho_raw,zdyrho_raw) 447 461 DO jj = 1, jpjm1 448 462 DO ji = 1, jpim1 … … 676 690 ! 677 691 ! !== surface mixed layer mask ! 692 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ik) 678 693 DO jk = 1, jpk ! =1 inside the mixed layer, =0 otherwise 679 694 DO jj = 1, jpj … … 698 713 !----------------------------------------------------------------------- 699 714 ! 715 !$OMP PARALLEL DO schedule(static) private(jj, ji, iku, ikv, zbu, zbv, zau, zav, ik, ikm1, zbw, zci, zcj, zai, zaj, zbi, zbj) 700 716 DO jj = 2, jpjm1 701 717 DO ji = 2, jpim1 … … 791 807 ! Direction of lateral diffusion (tracers and/or momentum) 792 808 ! ------------------------------ 793 uslp (:,:,:) = 0._wp ; uslpml (:,:) = 0._wp ! set the slope to zero (even in s-coordinates) 794 vslp (:,:,:) = 0._wp ; vslpml (:,:) = 0._wp 795 wslpi(:,:,:) = 0._wp ; wslpiml(:,:) = 0._wp 796 wslpj(:,:,:) = 0._wp ; wslpjml(:,:) = 0._wp 797 809 810 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 811 DO jk = 1, jpk 812 DO jj = 1, jpj 813 DO ji = 1, jpi 814 uslp (ji,jj,jk) = 0._wp 815 vslp (ji,jj,jk) = 0._wp 816 wslpi(ji,jj,jk) = 0._wp 817 wslpj(ji,jj,jk) = 0._wp 818 END DO 819 END DO 820 END DO 821 !$OMP PARALLEL DO schedule(static) private(jj, ji) 822 DO jj = 1, jpj 823 DO ji = 1, jpi 824 uslpml (ji,jj) = 0._wp 825 vslpml (ji,jj) = 0._wp 826 wslpiml(ji,jj) = 0._wp 827 wslpjml(ji,jj) = 0._wp 828 END DO 829 END DO 830 798 831 !!gm I no longer understand this..... 799 832 !!gm IF( (ln_traldf_hor .OR. ln_dynldf_hor) .AND. .NOT. (.NOT.ln_linssh .AND. ln_rstart) ) THEN -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldftra.F90
r6140 r6748 185 185 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'ldf_tra_init: failed to allocate arrays') 186 186 ! 187 !$OMP PARALLEL WORKSHARE 187 188 ahtu(:,:,jpk) = 0._wp ! last level always 0 188 189 ahtv(:,:,jpk) = 0._wp 190 !$OMP END PARALLEL WORKSHARE 189 191 ! 190 192 ! ! value of eddy mixing coef. … … 201 203 CASE( 0 ) !== constant ==! 202 204 IF(lwp) WRITE(numout,*) ' tracer mixing coef. = constant = ', rn_aht_0 205 !$OMP PARALLEL WORKSHARE 203 206 ahtu(:,:,:) = zah0 * umask(:,:,:) 204 207 ahtv(:,:,:) = zah0 * vmask(:,:,:) 208 !$OMP END PARALLEL WORKSHARE 205 209 ! 206 210 CASE( 10 ) !== fixed profile ==! 207 211 IF(lwp) WRITE(numout,*) ' tracer mixing coef. = F( depth )' 212 !$OMP PARALLEL WORKSHARE 208 213 ahtu(:,:,1) = zah0 * umask(:,:,1) ! constant surface value 209 214 ahtv(:,:,1) = zah0 * vmask(:,:,1) 215 !$OMP END PARALLEL WORKSHARE 210 216 CALL ldf_c1d( 'TRA', r1_4, ahtu(:,:,1), ahtv(:,:,1), ahtu, ahtv ) 211 217 ! … … 216 222 CALL iom_get ( inum, jpdom_data, 'ahtv_2D', ahtv(:,:,1) ) 217 223 CALL iom_close( inum ) 224 !$OMP PARALLEL DO schedule(static) private(jk) 218 225 DO jk = 2, jpkm1 219 226 ahtu(:,:,jk) = ahtu(:,:,1) * umask(:,:,jk) … … 245 252 CALL iom_get ( inum, jpdom_data, 'ahtv_3D', ahtv ) 246 253 CALL iom_close( inum ) 254 !$OMP PARALLEL DO schedule(static) private(jk) 247 255 DO jk = 1, jpkm1 248 256 ahtu(:,:,jk) = ahtu(:,:,jk) * umask(:,:,jk) … … 268 276 ! 269 277 IF( ln_traldf_blp .AND. .NOT. l_ldftra_time ) THEN 278 !$OMP PARALLEL WORKSHARE 270 279 ahtu(:,:,:) = SQRT( ahtu(:,:,:) ) 271 280 ahtv(:,:,:) = SQRT( ahtv(:,:,:) ) 281 !$OMP END PARALLEL WORKSHARE 272 282 ENDIF 273 283 ! … … 422 432 CASE( 0 ) !== constant ==! 423 433 IF(lwp) WRITE(numout,*) ' eddy induced velocity coef. = constant = ', rn_aeiv_0 434 !$OMP PARALLEL WORKSHARE 424 435 aeiu(:,:,:) = rn_aeiv_0 425 436 aeiv(:,:,:) = rn_aeiv_0 437 !$OMP END PARALLEL WORKSHARE 426 438 ! 427 439 CASE( 10 ) !== fixed profile ==! 428 440 IF(lwp) WRITE(numout,*) ' eddy induced velocity coef. = F( depth )' 441 !$OMP PARALLEL WORKSHARE 429 442 aeiu(:,:,1) = rn_aeiv_0 ! constant surface value 430 443 aeiv(:,:,1) = rn_aeiv_0 444 !$OMP END PARALLEL WORKSHARE 431 445 CALL ldf_c1d( 'TRA', r1_4, aeiu(:,:,1), aeiv(:,:,1), aeiu, aeiv ) 432 446 ! … … 437 451 CALL iom_get ( inum, jpdom_data, 'aeiv', aeiv(:,:,1) ) 438 452 CALL iom_close( inum ) 453 !$OMP PARALLEL DO schedule(static) private(jk) 439 454 DO jk = 2, jpk 440 455 aeiu(:,:,jk) = aeiu(:,:,1) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcana.F90
r6140 r6748 203 203 ztrp= - 40.e0 ! retroaction term on heat fluxes (W/m2/K) 204 204 zconv = 3.16e-5 ! convertion factor: 1 m/yr => 3.16e-5 mm/s 205 !$OMP PARALLEL DO schedule(static) private(jj, ji, t_star) 205 206 DO jj = 1, jpj 206 207 DO ji = 1, jpi … … 238 239 239 240 ! freshwater (mass flux) and update of qns with heat content of emp 241 !$OMP PARALLEL WORKSHARE 240 242 emp (:,:) = emp(:,:) - zsumemp * tmask(:,:,1) ! freshwater flux (=0 in domain average) 241 243 sfx (:,:) = 0.0_wp ! no salt flux 242 244 qns (:,:) = qns(:,:) - emp(:,:) * sst_m(:,:) * rcp ! evap and precip are at SST 243 245 !$OMP END PARALLEL WORKSHARE 244 246 245 247 ! ---------------------------- ! … … 267 269 ztau_sais = 0.015 268 270 ztaun = ztau - ztau_sais * COS( (ztime - ztimemax) / (ztimemin - ztimemax) * rpi ) 271 !$OMP PARALLEL DO schedule(static) private(jj, ji) 269 272 DO jj = 1, jpj 270 273 DO ji = 1, jpi … … 278 281 ! module of wind stress and wind speed at T-point 279 282 zcoef = 1. / ( zrhoa * zcdrag ) 283 !$OMP PARALLEL DO schedule(static) private(jj, ji, ztx, zty, zmod) 280 284 DO jj = 2, jpjm1 281 285 DO ji = fs_2, fs_jpim1 ! vect. opt. -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcmod.F90
r6460 r6748 187 187 IF( .NOT. ln_isf ) THEN ! variable initialisation if no ice shelf 188 188 IF( sbc_isf_alloc() /= 0 ) CALL ctl_stop( 'STOP', 'sbc_init : unable to allocate sbc_isf arrays' ) 189 !$OMP PARALLEL WORKSHARE 189 190 fwfisf (:,:) = 0.0_wp ; fwfisf_b (:,:) = 0.0_wp 190 191 risf_tsc(:,:,:) = 0.0_wp ; risf_tsc_b(:,:,:) = 0.0_wp 192 !$OMP END PARALLEL WORKSHARE 191 193 END IF 192 194 IF( nn_ice == 0 .AND. nn_components /= jp_iam_opa ) fr_i(:,:) = 0._wp ! no ice in the domain, ice fraction is always zero 193 195 !$OMP PARALLEL WORKSHARE 194 196 sfx(:,:) = 0._wp ! the salt flux due to freezing/melting will be computed (i.e. will be non-zero) 195 197 ! only if sea-ice is present … … 198 200 199 201 taum(:,:) = 0._wp ! Initialise taum for use in gls in case of reduced restart 200 202 !$OMP END PARALLEL WORKSHARE 201 203 ! ! restartability 202 204 IF( ( nn_ice == 2 .OR. nn_ice ==3 ) .AND. .NOT.( ln_blk_clio .OR. ln_blk_core .OR. ln_cpl ) ) & … … 318 320 IF( kt /= nit000 ) THEN ! Swap of forcing fields ! 319 321 ! ! ---------------------------------------- ! 322 !$OMP PARALLEL WORKSHARE 320 323 utau_b(:,:) = utau(:,:) ! Swap the ocean forcing fields 321 324 vtau_b(:,:) = vtau(:,:) ! (except at nit000 where before fields … … 323 326 emp_b (:,:) = emp (:,:) 324 327 sfx_b (:,:) = sfx (:,:) 328 !$OMP END PARALLEL WORKSHARE 325 329 IF ( ln_rnf ) THEN 330 !$OMP PARALLEL WORKSHARE 326 331 rnf_b (:,: ) = rnf (:,: ) 327 332 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 333 !$OMP END PARALLEL WORKSHARE 328 334 ENDIF 329 335 ENDIF … … 404 410 CALL iom_get( numror, jpdom_autoglo, 'sfx_b', sfx_b ) ! before salt flux (T-point) 405 411 ELSE 412 !$OMP PARALLEL WORKSHARE 406 413 sfx_b (:,:) = sfx(:,:) 414 !$OMP END PARALLEL WORKSHARE 407 415 ENDIF 408 416 ELSE !* no restart: set from nit000 values 409 417 IF(lwp) WRITE(numout,*) ' nit000-1 surface forcing fields set to nit000' 418 !$OMP PARALLEL WORKSHARE 410 419 utau_b(:,:) = utau(:,:) 411 420 vtau_b(:,:) = vtau(:,:) … … 413 422 emp_b (:,:) = emp(:,:) 414 423 sfx_b (:,:) = sfx(:,:) 424 !$OMP END PARALLEL WORKSHARE 415 425 ENDIF 416 426 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssm.F90
r6489 r6748 59 59 ! 60 60 ! !* surface T-, U-, V- ocean level variables (T, S, depth, velocity) 61 !$OMP PARALLEL DO schedule(static) private(jj, ji) 61 62 DO jj = 1, jpj 62 63 DO ji = 1, jpi … … 68 69 IF( nn_fsbc == 1 ) THEN ! Instantaneous surface fields ! 69 70 ! ! ---------------------------------------- ! 71 !$OMP PARALLEL WORKSHARE 70 72 ssu_m(:,:) = ub(:,:,1) 71 73 ssv_m(:,:) = vb(:,:,1) 74 !$OMP END PARALLEL WORKSHARE 72 75 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 73 76 ELSE ; sst_m(:,:) = zts(:,:,jp_tem) 74 77 ENDIF 78 !$OMP PARALLEL WORKSHARE 75 79 sss_m(:,:) = zts(:,:,jp_sal) 80 !$OMP END PARALLEL WORKSHARE 76 81 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 77 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 78 ELSE ; ssh_m(:,:) = sshn(:,:) 79 ENDIF 80 ! 82 IF( ln_apr_dyn ) THEN 83 !$OMP PARALLEL WORKSHARE 84 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 85 !$OMP END PARALLEL WORKSHARE 86 ELSE 87 !$OMP PARALLEL WORKSHARE 88 ssh_m(:,:) = sshn(:,:) 89 !$OMP END PARALLEL WORKSHARE 90 ENDIF 91 ! 92 !$OMP PARALLEL WORKSHARE 81 93 e3t_m(:,:) = e3t_n(:,:,1) 82 94 ! 83 95 frq_m(:,:) = fraqsr_1lev(:,:) 96 !$OMP END PARALLEL WORKSHARE 84 97 ! 85 98 ELSE … … 90 103 IF(lwp) WRITE(numout,*) '~~~~~~~ mean fields initialised to instantaneous values' 91 104 zcoef = REAL( nn_fsbc - 1, wp ) 105 !$OMP PARALLEL WORKSHARE 92 106 ssu_m(:,:) = zcoef * ub(:,:,1) 93 107 ssv_m(:,:) = zcoef * vb(:,:,1) 108 !$OMP END PARALLEL WORKSHARE 94 109 IF( l_useCT ) THEN ; sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 95 110 ELSE ; sst_m(:,:) = zcoef * zts(:,:,jp_tem) 96 111 ENDIF 112 !$OMP PARALLEL WORKSHARE 97 113 sss_m(:,:) = zcoef * zts(:,:,jp_sal) 114 !$OMP END PARALLEL WORKSHARE 98 115 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 99 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 100 ELSE ; ssh_m(:,:) = zcoef * sshn(:,:) 116 IF( ln_apr_dyn ) THEN 117 !$OMP PARALLEL WORKSHARE 118 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 119 !$OMP END PARALLEL WORKSHARE 120 ELSE 121 !$OMP PARALLEL WORKSHARE 122 ssh_m(:,:) = zcoef * sshn(:,:) 123 !$OMP END PARALLEL WORKSHARE 101 124 ENDIF 102 125 ! 126 !$OMP PARALLEL WORKSHARE 103 127 e3t_m(:,:) = zcoef * e3t_n(:,:,1) 104 128 ! 105 129 frq_m(:,:) = zcoef * fraqsr_1lev(:,:) 130 !$OMP END PARALLEL WORKSHARE 106 131 ! ! ---------------------------------------- ! 107 132 ELSEIF( MOD( kt - 2 , nn_fsbc ) == 0 ) THEN ! Initialisation: New mean computation ! 108 133 ! ! ---------------------------------------- ! 134 !$OMP PARALLEL WORKSHARE 109 135 ssu_m(:,:) = 0._wp ! reset to zero ocean mean sbc fields 110 136 ssv_m(:,:) = 0._wp … … 114 140 e3t_m(:,:) = 0._wp 115 141 frq_m(:,:) = 0._wp 142 !$OMP END PARALLEL WORKSHARE 116 143 ENDIF 117 144 ! ! ---------------------------------------- ! 118 145 ! ! Cumulate at each time step ! 119 146 ! ! ---------------------------------------- ! 147 !$OMP PARALLEL WORKSHARE 120 148 ssu_m(:,:) = ssu_m(:,:) + ub(:,:,1) 121 149 ssv_m(:,:) = ssv_m(:,:) + vb(:,:,1) 150 !$OMP END PARALLEL WORKSHARE 122 151 IF( l_useCT ) THEN ; sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 123 152 ELSE ; sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 124 153 ENDIF 154 !$OMP PARALLEL WORKSHARE 125 155 sss_m(:,:) = sss_m(:,:) + zts(:,:,jp_sal) 156 !$OMP END PARALLEL WORKSHARE 126 157 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 127 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 128 ELSE ; ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 129 ENDIF 130 ! 158 IF( ln_apr_dyn ) THEN 159 !$OMP PARALLEL WORKSHARE 160 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 161 !$OMP END PARALLEL WORKSHARE 162 ELSE 163 !$OMP PARALLEL WORKSHARE 164 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 165 !$OMP END PARALLEL WORKSHARE 166 ENDIF 167 ! 168 !$OMP PARALLEL WORKSHARE 131 169 e3t_m(:,:) = e3t_m(:,:) + e3t_n(:,:,1) 132 170 ! 133 171 frq_m(:,:) = frq_m(:,:) + fraqsr_1lev(:,:) 172 !$OMP END PARALLEL WORKSHARE 134 173 135 174 ! ! ---------------------------------------- ! … … 137 176 ! ! ---------------------------------------- ! 138 177 zcoef = 1. / REAL( nn_fsbc, wp ) 178 !$OMP PARALLEL WORKSHARE 139 179 sst_m(:,:) = sst_m(:,:) * zcoef ! mean SST [Celcius] 140 180 sss_m(:,:) = sss_m(:,:) * zcoef ! mean SSS [psu] … … 144 184 e3t_m(:,:) = e3t_m(:,:) * zcoef ! mean vertical scale factor [m] 145 185 frq_m(:,:) = frq_m(:,:) * zcoef ! mean fraction of solar net radiation absorbed in the 1st T level [-] 186 !$OMP END PARALLEL WORKSHARE 146 187 ! 147 188 ENDIF … … 223 264 & 'from ', zf_sbc, ' to ', nn_fsbc 224 265 zcoef = REAL( nn_fsbc - 1, wp ) / zf_sbc 266 !$OMP PARALLEL WORKSHARE 225 267 ssu_m(:,:) = zcoef * ssu_m(:,:) 226 268 ssv_m(:,:) = zcoef * ssv_m(:,:) … … 230 272 e3t_m(:,:) = zcoef * e3t_m(:,:) 231 273 frq_m(:,:) = zcoef * frq_m(:,:) 274 !$OMP END PARALLEL WORKSHARE 232 275 ELSE 233 276 IF(lwp) WRITE(numout,*) '~~~~~~~ mean fields read in the ocean restart file' … … 239 282 ! 240 283 IF(lwp) WRITE(numout,*) ' default initialisation of ss?_m arrays' 284 !$OMP PARALLEL WORKSHARE 241 285 ssu_m(:,:) = ub(:,:,1) 242 286 ssv_m(:,:) = vb(:,:,1) 287 !$OMP END PARALLEL WORKSHARE 243 288 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( tsn(:,:,1,jp_tem), tsn(:,:,1,jp_sal) ) 244 289 ELSE ; sst_m(:,:) = tsn(:,:,1,jp_tem) 245 290 ENDIF 291 !$OMP PARALLEL WORKSHARE 246 292 sss_m(:,:) = tsn (:,:,1,jp_sal) 247 293 ssh_m(:,:) = sshn (:,:) 248 294 e3t_m(:,:) = e3t_n(:,:,1) 249 295 frq_m(:,:) = 1._wp 296 !$OMP END PARALLEL WORKSHARE 250 297 ! 251 298 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/eosbn2.F90
r6505 r6748 237 237 CASE( np_teos10, np_eos80 ) !== polynomial TEOS-10 / EOS-80 ==! 238 238 ! 239 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn3, zn2, zn1, zn0, zn) 239 240 DO jk = 1, jpkm1 240 241 DO jj = 1, jpj … … 277 278 CASE( np_seos ) !== simplified EOS ==! 278 279 ! 280 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn) 279 281 DO jk = 1, jpkm1 280 282 DO jj = 1, jpj … … 345 347 END DO 346 348 ! 349 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, jsmp, jdof, zh, zt, zstemp, zs, ztm, zn3, zn2, zn1) 347 350 DO jk = 1, jpkm1 348 351 DO jj = 1, jpj … … 399 402 ! Non-stochastic equation of state 400 403 ELSE 404 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn3, zn2, zn1, zn0, zn) 401 405 DO jk = 1, jpkm1 402 406 DO jj = 1, jpj … … 441 445 CASE( np_seos ) !== simplified EOS ==! 442 446 ! 447 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn) 443 448 DO jk = 1, jpkm1 444 449 DO jj = 1, jpj … … 589 594 CASE( np_teos10, np_eos80 ) !== polynomial TEOS-10 / EOS-80 ==! 590 595 ! 596 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn3, zn2, zn1, zn0, zn) 591 597 DO jk = 1, jpkm1 592 598 DO jj = 1, jpj … … 646 652 CASE( np_seos ) !== simplified EOS ==! 647 653 ! 654 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn) 648 655 DO jk = 1, jpkm1 649 656 DO jj = 1, jpj … … 917 924 IF( nn_timing == 1 ) CALL timing_start('bn2') 918 925 ! 926 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zrw, zaw, zbw) 919 927 DO jk = 2, jpkm1 ! interior points only (2=< jk =< jpkm1 ) 920 928 DO jj = 1, jpj ! surface and bottom value set to zero one for all in istate.F90 … … 1134 1142 CASE( np_teos10, np_eos80 ) !== polynomial TEOS-10 / EOS-80 ==! 1135 1143 ! 1144 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn2, zn1, zn0, zn) 1136 1145 DO jk = 1, jpkm1 1137 1146 DO jj = 1, jpj … … 1197 1206 CASE( np_seos ) !== Vallis (2006) simplified EOS ==! 1198 1207 ! 1208 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zh, zt, zs, ztm, zn) 1199 1209 DO jk = 1, jpkm1 1200 1210 DO jj = 1, jpj -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv.F90
r6140 r6748 100 100 ! 101 101 ! !== effective transport ==! 102 !$OMP PARALLEL DO schedule(static) private(jk) 102 103 DO jk = 1, jpkm1 103 104 zun(:,:,jk) = e2u (:,:) * e3u_n(:,:,jk) * un(:,:,jk) ! eulerian transport only -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_fct.F90
r6140 r6748 98 98 IF( l_trd ) THEN 99 99 CALL wrk_alloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz ) 100 !$OMP PARALLEL WORKSHARE 100 101 ztrdx(:,:,:) = 0._wp ; ztrdy(:,:,:) = 0._wp ; ztrdz(:,:,:) = 0._wp 102 !$OMP END PARALLEL WORKSHARE 101 103 ENDIF 102 104 ! 103 105 ! ! surface & bottom value : flux set to zero one for all 106 !$OMP PARALLEL WORKSHARE 104 107 zwz(:,:, 1 ) = 0._wp 105 108 zwx(:,:,jpk) = 0._wp ; zwy(:,:,jpk) = 0._wp ; zwz(:,:,jpk) = 0._wp 106 109 ! 107 110 zwi(:,:,:) = 0._wp 111 !$OMP END PARALLEL WORKSHARE 108 112 ! 109 113 DO jn = 1, kjpt !== loop over the tracers ==! … … 111 115 ! !== upstream advection with initial mass fluxes & intermediate update ==! 112 116 ! !* upstream tracer flux in the i and j direction 117 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zfp_vj, zfm_vj, zfp_ui,zfm_ui) 113 118 DO jk = 1, jpkm1 114 119 DO jj = 1, jpjm1 … … 125 130 END DO 126 131 ! !* upstream tracer flux in the k direction *! 132 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zfp_wk, zfm_wk) 127 133 DO jk = 2, jpkm1 ! Interior value ( multiplied by wmask) 128 134 DO jj = 1, jpj … … 146 152 ENDIF 147 153 ! 154 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ztra) 148 155 DO jk = 1, jpkm1 !* trend and after field with monotonic scheme 149 156 DO jj = 2, jpjm1 … … 163 170 ! 164 171 IF( l_trd ) THEN ! trend diagnostics (contribution of upstream fluxes) 172 !$OMP PARALLEL WORKSHARE 165 173 ztrdx(:,:,:) = zwx(:,:,:) ; ztrdy(:,:,:) = zwy(:,:,:) ; ztrdz(:,:,:) = zwz(:,:,:) 174 !$OMP END PARALLEL WORKSHARE 166 175 END IF 167 176 ! ! "Poleward" heat and salt transports (contribution of upstream fluxes) … … 176 185 ! 177 186 CASE( 2 ) !- 2nd order centered 187 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 178 188 DO jk = 1, jpkm1 179 189 DO jj = 1, jpjm1 … … 186 196 ! 187 197 CASE( 4 ) !- 4th order centered 198 !$OMP PARALLEL WORKSHARE 188 199 zltu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 189 200 zltv(:,:,jpk) = 0._wp 201 !$OMP END PARALLEL WORKSHARE 202 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 190 203 DO jk = 1, jpkm1 ! Laplacian 191 204 DO jj = 1, jpjm1 ! 1st derivative (gradient) … … 204 217 CALL lbc_lnk( zltu, 'T', 1. ) ; CALL lbc_lnk( zltv, 'T', 1. ) ! Lateral boundary cond. (unchanged sgn) 205 218 ! 219 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zC2t_u, zC2t_v) 206 220 DO jk = 1, jpkm1 ! Horizontal advective fluxes 207 221 DO jj = 1, jpjm1 … … 217 231 ! 218 232 CASE( 41 ) !- 4th order centered ==>> !!gm coding attempt need to be tested 233 !$OMP PARALLEL WORKSHARE 219 234 ztu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 220 235 ztv(:,:,jpk) = 0._wp 236 !$OMP END PARALLEL WORKSHARE 237 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 221 238 DO jk = 1, jpkm1 ! 1st derivative (gradient) 222 239 DO jj = 1, jpjm1 … … 229 246 CALL lbc_lnk( ztu, 'U', -1. ) ; CALL lbc_lnk( ztv, 'V', -1. ) ! Lateral boundary cond. (unchanged sgn) 230 247 ! 248 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zC2t_u, zC2t_v, zC4t_u, zC4t_v) 231 249 DO jk = 1, jpkm1 ! Horizontal advective fluxes 232 250 DO jj = 2, jpjm1 … … 249 267 ! 250 268 CASE( 2 ) !- 2nd order centered 269 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 251 270 DO jk = 2, jpkm1 252 271 DO jj = 2, jpjm1 … … 260 279 CASE( 4 ) !- 4th order COMPACT 261 280 CALL interp_4th_cpt( ptn(:,:,:,jn) , ztw ) ! zwt = COMPACT interpolation of T at w-point 281 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 262 282 DO jk = 2, jpkm1 263 283 DO jj = 2, jpjm1 … … 282 302 ! !== final trend with corrected fluxes ==! 283 303 ! 304 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 284 305 DO jk = 1, jpkm1 285 306 DO jj = 2, jpjm1 … … 294 315 ! 295 316 IF( l_trd ) THEN ! trend diagnostics (contribution of upstream fluxes) 317 !$OMP PARALLEL WORKSHARE 296 318 ztrdx(:,:,:) = ztrdx(:,:,:) + zwx(:,:,:) ! <<< Add to previously computed 297 319 ztrdy(:,:,:) = ztrdy(:,:,:) + zwy(:,:,:) ! <<< Add to previously computed 298 320 ztrdz(:,:,:) = ztrdz(:,:,:) + zwz(:,:,:) ! <<< Add to previously computed 321 !$OMP END PARALLEL WORKSHARE 299 322 ! 300 323 CALL trd_tra( kt, cdtype, jn, jptra_xad, ztrdx, pun, ptn(:,:,:,jn) ) … … 626 649 zbig = 1.e+40_wp 627 650 zrtrn = 1.e-15_wp 651 !$OMP PARALLEL WORKSHARE 628 652 zbetup(:,:,:) = 0._wp ; zbetdo(:,:,:) = 0._wp 653 !$OMP END PARALLEL WORKSHARE 629 654 630 655 ! Search local extrema … … 636 661 & paft * tmask + zbig * ( 1._wp - tmask ) ) 637 662 663 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ikm1, zup, zdo, zpos, zneg, zbt) 638 664 DO jk = 1, jpkm1 639 665 ikm1 = MAX(jk-1,1) … … 674 700 ! 3. monotonic flux in the i & j direction (paa & pbb) 675 701 ! ---------------------------------------- 702 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, za, zb, zc, zav, zbv, zcv, zau, zbu, zcu) 676 703 DO jk = 1, jpkm1 677 704 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_mle.F90
r6140 r6748 327 327 IF( ierr /= 0 ) CALL ctl_stop( 'tra_adv_mle_init: failed to allocate arrays' ) 328 328 z1_t2 = 1._wp / ( rn_time * rn_time ) 329 !$OMP PARALLEL DO schedule(static) private(jj, ji, zfu, zfv) 329 330 DO jj = 2, jpj ! "coriolis+ time^-1" at u- & v-points 330 331 DO ji = fs_2, jpi ! vector opt. … … 347 348 ! 348 349 z1_t2 = 1._wp / ( rn_time * rn_time ) 350 !$OMP PARALLEL WORKSHARE 349 351 r1_ft(:,:) = 2._wp * omega * SIN( rad * gphit(:,:) ) 350 352 r1_ft(:,:) = 1._wp / SQRT( r1_ft(:,:) * r1_ft(:,:) + z1_t2 ) 353 !$OMP END PARALLEL WORKSHARE 351 354 ! 352 355 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf.F90
r6352 r6748 65 65 IF( l_trdtra ) THEN !* Save ta and sa trends 66 66 CALL wrk_alloc( jpi,jpj,jpk, ztrdt, ztrds ) 67 !$OMP PARALLEL WORKSHARE 67 68 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 68 69 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 70 !$OMP END PARALLEL WORKSHARE 69 71 ENDIF 70 72 ! … … 81 83 ! 82 84 IF( l_trdtra ) THEN !* save the horizontal diffusive trends for further diagnostics 85 !$OMP PARALLEL WORKSHARE 83 86 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 84 87 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 88 !$OMP END PARALLEL WORKSHARE 85 89 CALL trd_tra( kt, 'TRA', jp_tem, jptra_ldf, ztrdt ) 86 90 CALL trd_tra( kt, 'TRA', jp_sal, jptra_ldf, ztrds ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf_iso.F90
r6140 r6748 143 143 IF( kpass == 1 ) THEN !== first pass only ==! 144 144 ! 145 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w) 145 146 DO jk = 2, jpkm1 146 147 DO jj = 2, jpjm1 … … 164 165 ! 165 166 IF( ln_traldf_msc ) THEN ! stabilizing vertical diffusivity coefficient 167 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 166 168 DO jk = 2, jpkm1 167 169 DO jj = 2, jpjm1 … … 177 179 ! 178 180 IF( ln_traldf_blp ) THEN ! bilaplacian operator 181 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 179 182 DO jk = 2, jpkm1 180 183 DO jj = 1, jpjm1 … … 186 189 END DO 187 190 ELSEIF( ln_traldf_lap ) THEN ! laplacian operator 191 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ze3w_2, zcoef0) 188 192 DO jk = 2, jpkm1 189 193 DO jj = 1, jpjm1 … … 198 202 ! 199 203 ELSE ! 33 flux set to zero with akz=ah_wslp2 ==>> computed in full implicit 204 !$OMP PARALLEL WORKSHARE 200 205 akz(:,:,:) = ah_wslp2(:,:,:) 206 !$OMP END PARALLEL WORKSHARE 201 207 ENDIF 202 208 ENDIF … … 210 216 !!---------------------------------------------------------------------- 211 217 !!gm : bug.... why (x,:,:)? (1,jpj,:) and (jpi,1,:) should be sufficient.... 218 !$OMP PARALLEL WORKSHARE 212 219 zdit (1,:,:) = 0._wp ; zdit (jpi,:,:) = 0._wp 213 220 zdjt (1,:,:) = 0._wp ; zdjt (jpi,:,:) = 0._wp 221 !$OMP END PARALLEL WORKSHARE 214 222 !!end 215 223 216 224 ! Horizontal tracer gradient 225 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 217 226 DO jk = 1, jpkm1 218 227 DO jj = 1, jpjm1 … … 224 233 END DO 225 234 IF( ln_zps ) THEN ! botton and surface ocean correction of the horizontal gradient 235 !$OMP PARALLEL DO schedule(static) private(jj, ji) 226 236 DO jj = 1, jpjm1 ! bottom correction (partial bottom cell) 227 237 DO ji = 1, fs_jpim1 ! vector opt. … … 231 241 END DO 232 242 IF( ln_isfcav ) THEN ! first wet level beneath a cavity 243 !$OMP PARALLEL DO schedule(static) private(jj, ji) 233 244 DO jj = 1, jpjm1 234 245 DO ji = 1, fs_jpim1 ! vector opt. … … 243 254 !! II - horizontal trend (full) 244 255 !!---------------------------------------------------------------------- 245 ! 246 DO jk = 1, jpkm1 ! Horizontal slab247 !248 ! !== Vertical tracer gradient249 zdk1t(:,:) = ( ptb(:,:,jk,jn) - ptb(:,:,jk+1,jn) ) * wmask(:,:,jk+1) ! level jk+1250 !251 IF( jk == 1 ) THEN ; zdkt(:,:) = zdk1t(:,:) ! surface: zdkt(jk=1)=zdkt(jk=2)252 ELSE ; zdkt(:,:) = ( ptb(:,:,jk-1,jn) - ptb(:,:,jk,jn) ) * wmask(:,:,jk)253 ENDIF254 DO jj = 1 , jpjm1 !== Horizontal fluxes255 DO ji = 1, fs_jpim1 ! vector opt.256 zabe 1 = pahu(ji,jj,jk) * e2_e1u(ji,jj) * e3u_n(ji,jj,jk)257 zabe2 = pahv(ji,jj,jk) * e1_e2v(ji,jj) * e3v_n(ji,jj,jk)258 !259 zmsku = 1. / MAX( wmask(ji+1,jj,jk ) + wmask(ji,jj,jk+1) &260 & + wmask(ji+1,jj,jk+1) + wmask(ji,jj,jk ), 1. )261 !262 zmskv = 1. / MAX( wmask(ji,jj+1,jk ) + wmask(ji,jj,jk+1) &263 & + wmask(ji,jj+1,jk+1) + wmask(ji,jj,jk ), 1. )264 !265 zcof 1 = - pahu(ji,jj,jk) * e2u(ji,jj) * uslp(ji,jj,jk) * zmsku266 zcof2 = - pahv(ji,jj,jk) * e1v(ji,jj) * vslp(ji,jj,jk) * zmskv267 !268 zftu(ji,jj,jk ) = ( zabe1 * zdit(ji,jj,jk) &269 & + zcof1 * ( zdkt (ji+1,jj) + zdk1t(ji,jj) &270 & + zdk1t(ji+1,jj) + zdkt (ji,jj) ) ) * umask(ji,jj,jk)271 zftv(ji,jj,jk) = ( zabe2 * zdjt(ji,jj,jk) &272 & + zcof2 * ( zdkt (ji,jj+1) + zdk1t(ji,jj) &273 & + zdk1t(ji,jj+1) + zdkt (ji,jj) ) ) * vmask(ji,jj,jk)274 275 END DO276 ! 256 !$OMP PARALLEL DO schedule(static) private(jj, ji) 257 DO jj = 1 , jpj !== Horizontal fluxes 258 DO ji = 1, jpi ! vector opt. 259 zdk1t(ji,jj) = ( ptb(ji,jj,1,jn) - ptb(ji,jj,2,jn) ) * wmask(ji,jj,2) 260 zdkt(ji,jj) = zdk1t(ji,jj) 261 END DO 262 END DO 263 !$OMP PARALLEL DO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2) 264 DO jj = 1 , jpjm1 !== Horizontal fluxes 265 DO ji = 1, fs_jpim1 ! vector opt. 266 zabe1 = pahu(ji,jj,1) * e2_e1u(ji,jj) * e3u_n(ji,jj,1) 267 zabe2 = pahv(ji,jj,1) * e1_e2v(ji,jj) * e3v_n(ji,jj,1) 268 ! 269 zmsku = 1. / MAX( wmask(ji+1,jj,1 ) + wmask(ji,jj,2) & 270 & + wmask(ji+1,jj,2) + wmask(ji,jj,1 ), 1.) 271 ! 272 zmskv = 1. / MAX( wmask(ji,jj+1,1 ) + wmask(ji,jj,2) & 273 & + wmask(ji,jj+1,2) + wmask(ji,jj,1 ), 1.) 274 ! 275 zcof1 = - pahu(ji,jj,1) * e2u(ji,jj) * uslp(ji,jj,1) * zmsku 276 zcof2 = - pahv(ji,jj,1) * e1v(ji,jj) * vslp(ji,jj,1) * zmskv 277 ! 278 zftu(ji,jj,1 ) = ( zabe1 * zdit(ji,jj,1) & 279 & + zcof1 * ( zdkt (ji+1,jj) + zdk1t(ji,jj) & 280 & + zdk1t(ji+1,jj) + zdkt (ji,jj)) ) * umask(ji,jj,1) 281 zftv(ji,jj,1 ) = ( zabe2 * zdjt(ji,jj,1) & 282 & + zcof2 * ( zdkt (ji,jj+1) + zdk1t(ji,jj) & 283 & + zdk1t(ji,jj+1) + zdkt (ji,jj)) ) * vmask(ji,jj,1) 284 END DO 285 END DO 286 ! 287 !$OMP PARALLEL DO schedule(static) private(jj, ji) 277 288 DO jj = 2 , jpjm1 !== horizontal divergence and add to pta 278 289 DO ji = fs_2, fs_jpim1 ! vector opt. 279 pta(ji,jj,jk,jn) = pta(ji,jj,jk,jn) + zsign * ( zftu(ji,jj,jk) - zftu(ji-1,jj,jk) & 280 & + zftv(ji,jj,jk) - zftv(ji,jj-1,jk) ) & 281 & * r1_e1e2t(ji,jj) / e3t_n(ji,jj,jk) 282 END DO 283 END DO 284 END DO ! End of slab 290 pta(ji,jj,1,jn) = pta(ji,jj,1,jn) + zsign * (zftu(ji,jj,1) - zftu(ji-1,jj,1) & 291 & + zftv(ji,jj,1) - zftv(ji,jj-1,1) ) & 292 & * r1_e1e2t(ji,jj) / e3t_n(ji,jj,1) 293 END DO 294 END DO 295 DO jk = 2, jpkm1 296 !$OMP PARALLEL DO schedule(static) private(jj, ji) 297 DO jj = 1 , jpj !== Horizontal fluxes 298 DO ji = 1, jpi ! vector opt. 299 zdk1t(ji,jj) = ( ptb(ji,jj,jk,jn) - ptb(ji,jj,jk+1,jn) ) * wmask(ji,jj,jk+1) 300 zdkt(ji,jj) = ( ptb(ji,jj,jk-1,jn) - ptb(ji,jj,jk,jn) ) * wmask(ji,jj,jk) 301 END DO 302 END DO 303 !$OMP PARALLEL DO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2) 304 DO jj = 1 , jpjm1 !== Horizontal fluxes 305 DO ji = 1, fs_jpim1 ! vector opt. 306 zabe1 = pahu(ji,jj,jk) * e2_e1u(ji,jj) * e3u_n(ji,jj,jk) 307 zabe2 = pahv(ji,jj,jk) * e1_e2v(ji,jj) * e3v_n(ji,jj,jk) 308 ! 309 zmsku = 1. / MAX( wmask(ji+1,jj,jk ) + wmask(ji,jj,jk+1) & 310 & + wmask(ji+1,jj,jk+1) + wmask(ji,jj,jk ), 1.) 311 ! 312 zmskv = 1. / MAX( wmask(ji,jj+1,jk ) + wmask(ji,jj,jk+1) & 313 & + wmask(ji,jj+1,jk+1) + wmask(ji,jj,jk ), 1.) 314 ! 315 zcof1 = - pahu(ji,jj,jk) * e2u(ji,jj) * uslp(ji,jj,jk) * zmsku 316 zcof2 = - pahv(ji,jj,jk) * e1v(ji,jj) * vslp(ji,jj,jk) * zmskv 317 ! 318 zftu(ji,jj,jk ) = ( zabe1 * zdit(ji,jj,jk) & 319 & + zcof1 * ( zdkt (ji+1,jj) + zdk1t(ji,jj) & 320 & + zdk1t(ji+1,jj) + zdkt (ji,jj)) ) * umask(ji,jj,jk) 321 zftv(ji,jj,jk) = ( zabe2 * zdjt(ji,jj,jk) & 322 & + zcof2 * ( zdkt (ji,jj+1) + zdk1t(ji,jj) & 323 & + zdk1t(ji,jj+1) + zdkt (ji,jj)) ) * vmask(ji,jj,jk) 324 END DO 325 END DO 326 ! 327 !$OMP PARALLEL DO schedule(static) private(jj, ji) 328 DO jj = 2 , jpjm1 !== horizontal divergence and add to pta 329 DO ji = fs_2, fs_jpim1 ! vector opt. 330 pta(ji,jj,jk,jn) = pta(ji,jj,jk,jn) + zsign * (zftu(ji,jj,jk) - zftu(ji-1,jj,jk) & 331 & + zftv(ji,jj,jk) - zftv(ji,jj-1,jk) ) & 332 & * r1_e1e2t(ji,jj) / e3t_n(ji,jj,jk) 333 END DO 334 END DO 335 END DO 336 285 337 286 338 !!---------------------------------------------------------------------- … … 288 340 !!---------------------------------------------------------------------- 289 341 ! 342 !$OMP PARALLEL WORKSHARE 290 343 ztfw(1,:,:) = 0._wp ; ztfw(jpi,:,:) = 0._wp 291 344 ! … … 294 347 ! ! Surface and bottom vertical fluxes set to zero 295 348 ztfw(:,:, 1 ) = 0._wp ; ztfw(:,:,jpk) = 0._wp 349 !$OMP END PARALLEL WORKSHARE 296 350 351 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w, zcoef3, zcoef4) 297 352 DO jk = 2, jpkm1 ! interior (2=<jk=<jpk-1) 298 353 DO jj = 2, jpjm1 … … 321 376 ! !== add the vertical 33 flux ==! 322 377 IF( ln_traldf_lap ) THEN ! laplacian case: eddy coef = ah_wslp2 - akz 378 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 323 379 DO jk = 2, jpkm1 324 380 DO jj = 1, jpjm1 … … 334 390 SELECT CASE( kpass ) 335 391 CASE( 1 ) ! 1st pass : eddy coef = ah_wslp2 392 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 336 393 DO jk = 2, jpkm1 337 394 DO jj = 1, jpjm1 … … 344 401 END DO 345 402 CASE( 2 ) ! 2nd pass : eddy flux = ah_wslp2 and akz applied on ptb and ptbb gradients, resp. 403 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 346 404 DO jk = 2, jpkm1 347 405 DO jj = 1, jpjm1 … … 356 414 ENDIF 357 415 ! 416 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 358 417 DO jk = 1, jpkm1 !== Divergence of vertical fluxes added to pta ==! 359 418 DO jj = 2, jpjm1 … … 379 438 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN 380 439 z2d(:,:) = zftu(ji,jj,1) 440 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 381 441 DO jk = 2, jpkm1 382 442 DO jj = 2, jpjm1 … … 388 448 !!gm CAUTION I think there is an error of sign when using BLP operator.... 389 449 !!gm a multiplication by zsign is required (to be checked twice !) 450 !$OMP PARALLEL WORKSHARE 390 451 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 452 !$OMP END PARALLEL WORKSHARE 391 453 CALL lbc_lnk( z2d, 'U', -1. ) 392 454 CALL iom_put( "udiff_heattr", z2d ) ! heat transport in i-direction 393 455 ! 456 !$OMP PARALLEL WORKSHARE 394 457 z2d(:,:) = zftv(ji,jj,1) 458 !$OMP END PARALLEL WORKSHARE 459 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 395 460 DO jk = 2, jpkm1 396 461 DO jj = 2, jpjm1 … … 400 465 END DO 401 466 END DO 467 !$OMP PARALLEL WORKSHARE 402 468 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 469 !$OMP END PARALLEL WORKSHARE 403 470 CALL lbc_lnk( z2d, 'V', -1. ) 404 471 CALL iom_put( "vdiff_heattr", z2d ) ! heat transport in i-direction -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/tranxt.F90
r6140 r6748 123 123 IF( l_trdtra ) THEN ! store now fields before applying the Asselin filter 124 124 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 125 !$OMP PARALLEL WORKSHARE 125 126 ztrdt(:,:,:) = tsn(:,:,:,jp_tem) 126 127 ztrds(:,:,:) = tsn(:,:,:,jp_sal) 128 !$OMP END PARALLEL WORKSHARE 127 129 IF( ln_traldf_iso ) THEN ! diagnose the "pure" Kz diffusive trend 128 130 CALL trd_tra( kt, 'TRA', jp_tem, jptra_zdfp, ztrdt ) … … 133 135 IF( neuler == 0 .AND. kt == nit000 ) THEN ! Euler time-stepping at first time-step (only swap) 134 136 DO jn = 1, jpts 137 !$OMP PARALLEL DO schedule(static) private(jk) 135 138 DO jk = 1, jpkm1 136 139 tsn(:,:,jk,jn) = tsa(:,:,jk,jn) … … 153 156 ! 154 157 IF( l_trdtra ) THEN ! trend of the Asselin filter (tb filtered - tb)/dt 158 !$OMP PARALLEL DO schedule(static) private(jk, zfact) 155 159 DO jk = 1, jpkm1 156 160 zfact = 1._wp / r2dt -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traqsr.F90
r6403 r6748 128 128 IF( l_trdtra ) THEN ! trends diagnostic: save the input temperature trend 129 129 CALL wrk_alloc( jpi,jpj,jpk, ztrdt ) 130 !$OMP PARALLEL WORKSHARE 130 131 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 132 !$OMP END PARALLEL WORKSHARE 131 133 ENDIF 132 134 ! … … 146 148 ELSE !== Swap of qsr heat content ==! 147 149 z1_2 = 0.5_wp 150 !$OMP PARALLEL WORKSHARE 148 151 qsr_hc_b(:,:,:) = qsr_hc(:,:,:) 152 !$OMP END PARALLEL WORKSHARE 149 153 ENDIF 150 154 ! … … 155 159 CASE( np_BIO ) !== bio-model fluxes ==! 156 160 ! 161 !$OMP PARALLEL DO schedule(static) private(jk) 157 162 DO jk = 1, nksr 158 163 qsr_hc(:,:,jk) = r1_rau0_rcp * ( etot3(:,:,jk) - etot3(:,:,jk+1) ) … … 166 171 IF( nqsr == np_RGBc ) THEN !* Variable Chlorophyll 167 172 CALL fld_read( kt, 1, sf_chl ) ! Read Chl data and provides it at the current time step 173 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zchl,zCtot,zze,zpsi,zlogc,zlogc2,zlogc3,zCb,zCmax,zpsimax,zdelpsi,zCze) 168 174 DO jk = 1, nksr + 1 169 175 DO jj = 2, jpjm1 ! Separation in R-G-B depending of the surface Chl … … 190 196 END DO 191 197 ELSE !* constant chrlorophyll 198 !$OMP PARALLEL DO schedule(static) private(jk) 192 199 DO jk = 1, nksr + 1 193 200 zchl3d(:,:,jk) = 0.05 … … 206 213 END DO 207 214 ! 215 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zchl,irgb,zc0,zc1,zc2,zc3) 208 216 DO jk = 2, nksr+1 !* interior equi-partition in R-G-B depending of vertical profile of Chl 209 217 DO jj = 2, jpjm1 … … 232 240 END DO 233 241 ! 242 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 234 243 DO jk = 1, nksr !* now qsr induced heat content 235 244 DO jj = 2, jpjm1 … … 247 256 zz0 = rn_abs * r1_rau0_rcp ! surface equi-partition in 2-bands 248 257 zz1 = ( 1. - rn_abs ) * r1_rau0_rcp 258 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zc0,zc1) 249 259 DO jk = 1, nksr ! solar heat absorbed at T-point in the top 400m 250 260 DO jj = 2, jpjm1 … … 260 270 ! 261 271 ! !-----------------------------! 272 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 262 273 DO jk = 1, nksr ! update to the temp. trend ! 263 274 DO jj = 2, jpjm1 !-----------------------------! … … 426 437 END SELECT 427 438 ! 439 !$OMP PARALLEL WORKSHARE 428 440 qsr_hc(:,:,:) = 0._wp ! now qsr heat content set to zero where it will not be computed 441 !$OMP END PARALLEL WORKSHARE 429 442 ! 430 443 ! 1st ocean level attenuation coefficient (used in sbcssm) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trasbc.F90
r6472 r6748 88 88 IF( l_trdtra ) THEN !* Save ta and sa trends 89 89 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 90 !$OMP PARALLEL WORKSHARE 90 91 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 91 92 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 93 !$OMP END PARALLEL WORKSHARE 92 94 ENDIF 93 95 ! 94 96 !!gm This should be moved into sbcmod.F90 module ? (especially now that ln_traqsr is read in namsbc namelist) 95 97 IF( .NOT.ln_traqsr ) THEN ! no solar radiation penetration 98 !$OMP PARALLEL WORKSHARE 96 99 qns(:,:) = qns(:,:) + qsr(:,:) ! total heat flux in qns 97 100 qsr(:,:) = 0._wp ! qsr set to zero 101 !$OMP END PARALLEL WORKSHARE 98 102 ENDIF 99 103 … … 111 115 ELSE ! No restart or restart not found: Euler forward time stepping 112 116 zfact = 1._wp 117 !$OMP PARALLEL WORKSHARE 113 118 sbc_tsc_b(:,:,:) = 0._wp 119 !$OMP END PARALLEL WORKSHARE 114 120 ENDIF 115 121 ELSE !* other time-steps: swap of forcing fields 116 122 zfact = 0.5_wp 123 !$OMP PARALLEL WORKSHARE 117 124 sbc_tsc_b(:,:,:) = sbc_tsc(:,:,:) 125 !$OMP END PARALLEL WORKSHARE 118 126 ENDIF 119 127 ! !== Now sbc tracer content fields ==! 128 !$OMP PARALLEL DO schedule(static) private(jj, ji) 120 129 DO jj = 2, jpj 121 130 DO ji = fs_2, fs_jpim1 ! vector opt. … … 125 134 END DO 126 135 IF( ln_linssh ) THEN !* linear free surface 136 !$OMP PARALLEL DO schedule(static) private(jj, ji) 127 137 DO jj = 2, jpj !==>> add concentration/dilution effect due to constant volume cell 128 138 DO ji = fs_2, fs_jpim1 ! vector opt. … … 136 146 ! 137 147 DO jn = 1, jpts !== update tracer trend ==! 148 !$OMP PARALLEL DO schedule(static) private(jj, ji) 138 149 DO jj = 2, jpj 139 150 DO ji = fs_2, fs_jpim1 ! vector opt. … … 217 228 ! 218 229 IF( ln_iscpl .AND. ln_hsb) THEN ! input of heat and salt due to river runoff 230 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zdep) 219 231 DO jk = 1,jpk 220 232 DO jj = 2, jpj … … 231 243 232 244 IF( l_trdtra ) THEN ! save the horizontal diffusive trends for further diagnostics 245 !$OMP PARALLEL WORKSHARE 233 246 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 234 247 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 248 !$OMP END PARALLEL WORKSHARE 235 249 CALL trd_tra( kt, 'TRA', jp_tem, jptra_nsr, ztrdt ) 236 250 CALL trd_tra( kt, 'TRA', jp_sal, jptra_nsr, ztrds ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf.F90
r6140 r6748 72 72 IF( l_trdtra ) THEN !* Save ta and sa trends 73 73 CALL wrk_alloc( jpi, jpj, jpk, ztrdt, ztrds ) 74 !$OMP PARALLEL WORKSHARE 74 75 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 75 76 ztrds(:,:,:) = tsa(:,:,:,jp_sal) 77 !$OMP END PARALLEL WORKSHARE 76 78 ENDIF 77 79 ! … … 88 90 89 91 IF( l_trdtra ) THEN ! save the vertical diffusive trends for further diagnostics 92 !$OMP PARALLEL DO schedule(static) private(jk) 90 93 DO jk = 1, jpkm1 91 94 ztrdt(:,:,jk) = ( ( tsa(:,:,jk,jp_tem) - tsb(:,:,jk,jp_tem) ) / r2dt ) - ztrdt(:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf_imp.F90
r6140 r6748 109 109 ELSE ; zwt(:,:,2:jpk) = fsavs(:,:,2:jpk) 110 110 ENDIF 111 !$OMP PARALLEL WORKSHARE 111 112 zwt(:,:,1) = 0._wp 113 !$OMP END PARALLEL WORKSHARE 112 114 ! 113 115 IF( l_ldfslp ) THEN ! isoneutral diffusion: add the contribution 114 116 IF( ln_traldf_msc ) THEN ! MSC iso-neutral operator 117 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 115 118 DO jk = 2, jpkm1 116 119 DO jj = 2, jpjm1 … … 121 124 END DO 122 125 ELSE ! standard or triad iso-neutral operator 126 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 123 127 DO jk = 2, jpkm1 124 128 DO jj = 2, jpjm1 … … 132 136 ! 133 137 ! Diagonal, lower (i), upper (s) (including the bottom boundary condition since avt is masked) 138 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 134 139 DO jk = 1, jpkm1 135 140 DO jj = 2, jpjm1 … … 162 167 ! used as a work space array: its value is modified. 163 168 ! 169 !$OMP PARALLEL DO schedule(static) private(jj, ji) 164 170 DO jj = 2, jpjm1 !* 1st recurrence: Tk = Dk - Ik Sk-1 / Tk-1 (increasing k) 165 171 DO ji = fs_2, fs_jpim1 ! done one for all passive tracers (so included in the IF instruction) … … 168 174 END DO 169 175 DO jk = 2, jpkm1 176 !$OMP PARALLEL DO schedule(static) private(jj, ji) 170 177 DO jj = 2, jpjm1 171 178 DO ji = fs_2, fs_jpim1 … … 177 184 ENDIF 178 185 ! 186 !$OMP PARALLEL DO schedule(static) private(jj, ji) 179 187 DO jj = 2, jpjm1 !* 2nd recurrence: Zk = Yk - Ik / Tk-1 Zk-1 180 188 DO ji = fs_2, fs_jpim1 … … 183 191 END DO 184 192 DO jk = 2, jpkm1 193 !$OMP PARALLEL DO schedule(static) private(jj, ji, zrhs) 185 194 DO jj = 2, jpjm1 186 195 DO ji = fs_2, fs_jpim1 … … 191 200 END DO 192 201 ! 202 !$OMP PARALLEL DO schedule(static) private(jj, ji) 193 203 DO jj = 2, jpjm1 !* 3d recurrence: Xk = (Zk - Sk Xk+1 ) / Tk (result is the after tracer) 194 204 DO ji = fs_2, fs_jpim1 … … 197 207 END DO 198 208 DO jk = jpk-2, 1, -1 209 !$OMP PARALLEL DO schedule(static) private(jj, ji) 199 210 DO jj = 2, jpjm1 200 211 DO ji = fs_2, fs_jpim1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfbfr.F90
r6140 r6748 112 112 IF ( ln_loglayer.AND. .NOT.ln_linssh ) THEN ! "log layer" bottom friction coefficient 113 113 114 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp) 114 115 DO jj = 1, jpj 115 116 DO ji = 1, jpi … … 123 124 ! (ISF) 124 125 IF ( ln_isfcav ) THEN 126 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp) 125 127 DO jj = 1, jpj 126 128 DO ji = 1, jpi … … 135 137 ! 136 138 ELSE 139 !$OMP PARALLEL WORKSHARE 137 140 zbfrt(:,:) = bfrcoef2d(:,:) 138 141 ztfrt(:,:) = tfrcoef2d(:,:) 139 ENDIF 140 142 !$OMP END PARALLEL WORKSHARE 143 ENDIF 144 145 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv) 141 146 DO jj = 2, jpjm1 142 147 DO ji = 2, jpim1 … … 173 178 174 179 IF( ln_isfcav ) THEN 180 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zvu,zuv,zecu,zecv) 175 181 DO jj = 2, jpjm1 176 182 DO ji = 2, jpim1 … … 266 272 CASE( 0 ) 267 273 IF(lwp) WRITE(numout,*) ' free-slip ' 274 !$OMP PARALLEL WORKSHARE 268 275 bfrua(:,:) = 0.e0 269 276 bfrva(:,:) = 0.e0 270 277 tfrua(:,:) = 0.e0 271 278 tfrva(:,:) = 0.e0 279 !$OMP END PARALLEL WORKSHARE 272 280 ! 273 281 CASE( 1 ) … … 296 304 ENDIF 297 305 ! 306 !$OMP PARALLEL WORKSHARE 298 307 bfrua(:,:) = - bfrcoef2d(:,:) 299 308 bfrva(:,:) = - bfrcoef2d(:,:) 309 !$OMP END PARALLEL WORKSHARE 300 310 ! 301 311 IF ( ln_isfcav ) THEN … … 310 320 ENDIF 311 321 ! 322 !$OMP PARALLEL WORKSHARE 312 323 tfrua(:,:) = - tfrcoef2d(:,:) 313 324 tfrva(:,:) = - tfrcoef2d(:,:) 325 !$OMP END PARALLEL WORKSHARE 314 326 END IF 315 327 ! … … 371 383 ! 372 384 IF( ln_loglayer.AND. ln_linssh ) THEN ! set "log layer" bottom friction once for all 385 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp) 373 386 DO jj = 1, jpj 374 387 DO ji = 1, jpi … … 380 393 END DO 381 394 IF ( ln_isfcav ) THEN 395 !$OMP PARALLEL DO private(jj,ji,ikbt,ztmp) 382 396 DO jj = 1, jpj 383 397 DO ji = 1, jpi … … 419 433 zmaxtfr = -1.e10_wp ! initialise tracker for maximum of bottom friction coefficient 420 434 ! 435 !$OMP PARALLEL DO private(jj,ji,ikbu,ikbv,zfru,zfrv,ictu,ictv,zminbfr,zmaxbfr,zmintfr,zmaxtfr) 421 436 DO jj = 2, jpjm1 422 437 DO ji = 2, jpim1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfevd.F90
r6140 r6748 76 76 zavm_evd(:,:,:) = avm(:,:,:) ! set avm prior to evd application 77 77 ! 78 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 78 79 DO jk = 1, jpkm1 79 80 DO jj = 2, jpj ! no vector opt. … … 97 98 ! 98 99 CASE DEFAULT ! enhance vertical eddy diffusivity only (if rn2<-1.e-12) 100 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 99 101 DO jk = 1, jpkm1 100 102 !!! WHERE( rn2(:,:,jk) <= -1.e-12 ) avt(:,:,jk) = tmask(:,:,jk) * avevd ! agissant sur T SEUL! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfmxl.F90
r6352 r6748 96 96 97 97 ! w-level of the mixing and mixed layers 98 !$OMP PARALLEL WORKSHARE 98 99 nmln(:,:) = nlb10 ! Initialization to the number of w ocean point 99 100 hmlp(:,:) = 0._wp ! here hmlp used as a dummy variable, integrating vertically N^2 101 !$OMP END PARALLEL WORKSHARE 100 102 zN2_c = grav * rho_c * r1_rau0 ! convert density criteria into N^2 criteria 101 103 DO jk = nlb10, jpkm1 … … 110 112 ! 111 113 ! w-level of the turbocline and mixing layer (iom_use) 114 !$OMP PARALLEL WORKSHARE 112 115 imld(:,:) = mbkt(:,:) + 1 ! Initialization to the number of w ocean point 116 !$OMP END PARALLEL WORKSHARE 117 113 118 DO jk = jpkm1, nlb10, -1 ! from the bottom to nlb10 114 119 DO jj = 1, jpj … … 119 124 END DO 120 125 ! depth of the mixing and mixed layers 126 !$OMP PARALLEL DO schedule(static) private(jj, ji, iiki, iikn) 121 127 DO jj = 1, jpj 122 128 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftke.F90
r6497 r6748 179 179 ! 180 180 IF( kt /= nit000 ) THEN ! restore before value to compute tke 181 !$OMP PARALLEL WORKSHARE 181 182 avt (:,:,:) = avt_k (:,:,:) 182 183 avm (:,:,:) = avm_k (:,:,:) 183 184 avmu(:,:,:) = avmu_k(:,:,:) 184 185 avmv(:,:,:) = avmv_k(:,:,:) 186 !$OMP END PARALLEL WORKSHARE 185 187 ENDIF 186 188 ! … … 189 191 CALL tke_avn ! now avt, avm, avmu, avmv 190 192 ! 193 !$OMP PARALLEL WORKSHARE 191 194 avt_k (:,:,:) = avt (:,:,:) 192 195 avm_k (:,:,:) = avm (:,:,:) 193 196 avmu_k(:,:,:) = avmu(:,:,:) 194 197 avmv_k(:,:,:) = avmv(:,:,:) 198 !$OMP END PARALLEL WORKSHARE 195 199 ! 196 200 #if defined key_agrif … … 253 257 ! !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 254 258 IF ( ln_isfcav ) THEN 259 !$OMP PARALLEL DO schedule(static) private(jj, ji) 255 260 DO jj = 2, jpjm1 ! en(mikt(ji,jj)) = rn_emin 256 261 DO ji = fs_2, fs_jpim1 ! vector opt. … … 259 264 END DO 260 265 END IF 266 !$OMP PARALLEL DO schedule(static) private(jj, ji) 261 267 DO jj = 2, jpjm1 ! en(1) = rn_ebb taum / rau0 (min value rn_emin0) 262 268 DO ji = fs_2, fs_jpim1 ! vector opt. … … 295 301 zpelc(:,:,1) = MAX( rn2b(:,:,1), 0._wp ) * gdepw_n(:,:,1) * e3w_n(:,:,1) 296 302 DO jk = 2, jpk 297 zpelc(:,:,jk) = zpelc(:,:,jk-1) + MAX( rn2b(:,:,jk), 0._wp ) * gdepw_n(:,:,jk) * e3w_n(:,:,jk) 303 !$OMP PARALLEL DO schedule(static) private(jj, ji) 304 DO jj =1, jpj 305 DO ji=1, jpi 306 zpelc(ji,jj,jk) = zpelc(ji,jj,jk-1) + MAX( rn2b(ji,jj,jk), 0._wp ) * gdepw_n(ji,jj,jk) * e3w_n(ji,jj,jk) 307 END DO 308 END DO 298 309 END DO 299 310 ! !* finite Langmuir Circulation depth … … 309 320 END DO 310 321 ! ! finite LC depth 322 !$OMP PARALLEL DO schedule(static) private(jj, ji) 311 323 DO jj = 1, jpj 312 324 DO ji = 1, jpi … … 315 327 END DO 316 328 zcof = 0.016 / SQRT( zrhoa * zcdrag ) 329 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zus, zind, zwlc) 317 330 DO jk = 2, jpkm1 !* TKE Langmuir circulation source term added to en 318 331 DO jj = 2, jpjm1 … … 338 351 ! ! zdiag : diagonal zd_up : upper diagonal zd_lw : lower diagonal 339 352 ! 353 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 340 354 DO jk = 2, jpkm1 !* Shear production at uw- and vw-points (energy conserving form) 341 355 DO jj = 1, jpjm1 … … 356 370 ! Note that zesh2 is also computed in the next loop. 357 371 ! We decided to compute it twice to keep code readability and avoid an IF case in the DO loops 372 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zesh2, zri) 358 373 DO jk = 2, jpkm1 359 374 DO jj = 2, jpjm1 … … 372 387 ENDIF 373 388 ! 389 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zcof, zzd_up, zzd_lw, zesh2) 374 390 DO jk = 2, jpkm1 !* Matrix and right hand side in en 375 391 DO jj = 2, jpjm1 … … 405 421 ! !* Matrix inversion from level 2 (tke prescribed at level 1) 406 422 DO jk = 3, jpkm1 ! First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 423 !$OMP PARALLEL DO schedule(static) private(jj, ji) 407 424 DO jj = 2, jpjm1 408 425 DO ji = fs_2, fs_jpim1 ! vector opt. … … 411 428 END DO 412 429 END DO 430 !$OMP PARALLEL DO schedule(static) private(jj, ji) 413 431 DO jj = 2, jpjm1 ! Second recurrence : Lk = RHSk - Lk / Dk-1 * Lk-1 414 432 DO ji = fs_2, fs_jpim1 ! vector opt. … … 417 435 END DO 418 436 DO jk = 3, jpkm1 437 !$OMP PARALLEL DO schedule(static) private(jj, ji) 419 438 DO jj = 2, jpjm1 420 439 DO ji = fs_2, fs_jpim1 ! vector opt. … … 423 442 END DO 424 443 END DO 444 !$OMP PARALLEL DO schedule(static) private(jj, ji) 425 445 DO jj = 2, jpjm1 ! thrid recurrence : Ek = ( Lk - Uk * Ek+1 ) / Dk 426 446 DO ji = fs_2, fs_jpim1 ! vector opt. … … 429 449 END DO 430 450 DO jk = jpk-2, 2, -1 451 !$OMP PARALLEL DO schedule(static) private(jj, ji) 431 452 DO jj = 2, jpjm1 432 453 DO ji = fs_2, fs_jpim1 ! vector opt. … … 435 456 END DO 436 457 END DO 458 !$OMP PARALLEL DO schedule(static) private(jk,jj, ji) 437 459 DO jk = 2, jpkm1 ! set the minimum value of tke 438 460 DO jj = 2, jpjm1 … … 450 472 451 473 IF( nn_etau == 1 ) THEN !* penetration below the mixed layer (rn_efr fraction) 474 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 452 475 DO jk = 2, jpkm1 453 476 DO jj = 2, jpjm1 … … 459 482 END DO 460 483 ELSEIF( nn_etau == 2 ) THEN !* act only at the base of the mixed layer (jk=nmln) (rn_efr fraction) 484 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 461 485 DO jj = 2, jpjm1 462 486 DO ji = fs_2, fs_jpim1 ! vector opt. … … 467 491 END DO 468 492 ELSEIF( nn_etau == 3 ) THEN !* penetration belox the mixed layer (HF variability) 493 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ztx2, zty2, ztau, zdif) 469 494 DO jk = 2, jpkm1 470 495 DO jj = 2, jpjm1 … … 545 570 ! 546 571 ! initialisation of interior minimum value (avoid a 2d loop with mikt) 572 !$OMP PARALLEL WORKSHARE 547 573 zmxlm(:,:,:) = rmxl_min 548 574 zmxld(:,:,:) = rmxl_min 575 !$OMP END PARALLEL WORKSHARE 549 576 ! 550 577 IF( ln_mxl0 ) THEN ! surface mixing length = F(stress) : l=vkarmn*2.e5*taum/(rau0*g) 578 !$OMP PARALLEL DO schedule(static) private(jj, ji, zraug) 551 579 DO jj = 2, jpjm1 552 580 DO ji = fs_2, fs_jpim1 … … 556 584 END DO 557 585 ELSE 586 !$OMP PARALLEL WORKSHARE 558 587 zmxlm(:,:,1) = rn_mxl0 588 !$OMP END PARALLEL WORKSHARE 559 589 ENDIF 560 590 ! 591 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zrn2) 561 592 DO jk = 2, jpkm1 ! interior value : l=sqrt(2*e/n^2) 562 593 DO jj = 2, jpjm1 … … 570 601 ! !* Physical limits for the mixing length 571 602 ! 603 !$OMP PARALLEL WORKSHARE 572 604 zmxld(:,:, 1 ) = zmxlm(:,:,1) ! surface set to the minimum value 573 605 zmxld(:,:,jpk) = rmxl_min ! last level set to the minimum value 606 !$OMP END PARALLEL WORKSHARE 574 607 ! 575 608 SELECT CASE ( nn_mxl ) … … 578 611 ! where wmask = 0 set zmxlm == e3w_n 579 612 CASE ( 0 ) ! bounded by the distance to surface and bottom 613 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zemxl) 580 614 DO jk = 2, jpkm1 581 615 DO jj = 2, jpjm1 … … 591 625 ! 592 626 CASE ( 1 ) ! bounded by the vertical scale factor 627 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zemxl) 593 628 DO jk = 2, jpkm1 594 629 DO jj = 2, jpjm1 … … 603 638 CASE ( 2 ) ! |dk[xml]| bounded by e3t : 604 639 DO jk = 2, jpkm1 ! from the surface to the bottom : 640 !$OMP PARALLEL DO schedule(static) private(jj, ji) 605 641 DO jj = 2, jpjm1 606 642 DO ji = fs_2, fs_jpim1 ! vector opt. … … 610 646 END DO 611 647 DO jk = jpkm1, 2, -1 ! from the bottom to the surface : 648 !$OMP PARALLEL DO schedule(static) private(jj, ji, zemxl) 612 649 DO jj = 2, jpjm1 613 650 DO ji = fs_2, fs_jpim1 ! vector opt. … … 621 658 CASE ( 3 ) ! lup and ldown, |dk[xml]| bounded by e3t : 622 659 DO jk = 2, jpkm1 ! from the surface to the bottom : lup 660 !$OMP PARALLEL DO schedule(static) private(jj, ji) 623 661 DO jj = 2, jpjm1 624 662 DO ji = fs_2, fs_jpim1 ! vector opt. … … 628 666 END DO 629 667 DO jk = jpkm1, 2, -1 ! from the bottom to the surface : ldown 668 !$OMP PARALLEL DO schedule(static) private(jj, ji) 630 669 DO jj = 2, jpjm1 631 670 DO ji = fs_2, fs_jpim1 ! vector opt. … … 634 673 END DO 635 674 END DO 675 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zemlm, zemlp) 636 676 DO jk = 2, jpkm1 637 677 DO jj = 2, jpjm1 … … 648 688 ! 649 689 # if defined key_c1d 690 !$OMP PARALLEL WORKSHARE 650 691 e_dis(:,:,:) = zmxld(:,:,:) ! c1d configuration : save mixing and dissipation turbulent length scales 651 692 e_mix(:,:,:) = zmxlm(:,:,:) 693 !$OMP END PARALLEL WORKSHARE 652 694 # endif 653 695 … … 655 697 ! ! Vertical eddy viscosity and diffusivity (avmu, avmv, avt) 656 698 ! !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 699 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zsqen, zav) 657 700 DO jk = 1, jpkm1 !* vertical eddy viscosity & diffivity at w-points 658 701 DO jj = 2, jpjm1 … … 668 711 CALL lbc_lnk( avm, 'W', 1. ) ! Lateral boundary conditions (sign unchanged) 669 712 ! 713 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 670 714 DO jk = 2, jpkm1 !* vertical eddy viscosity at wu- and wv-points 671 715 DO jj = 2, jpjm1 … … 679 723 ! 680 724 IF( nn_pdl == 1 ) THEN !* Prandtl number case: update avt 725 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 681 726 DO jk = 2, jpkm1 682 727 DO jj = 2, jpjm1 … … 804 849 ENDIF 805 850 ! !* set vertical eddy coef. to the background value 851 !$OMP PARALLEL DO schedule(static) private(jk) 806 852 DO jk = 1, jpk 807 853 avt (:,:,jk) = avtb(jk) * wmask (:,:,jk) … … 857 903 ELSE ! No TKE array found: initialisation 858 904 IF(lwp) WRITE(numout,*) ' ===>>>> : previous run without tke scheme, en computed by iterative loop' 905 !$OMP PARALLEL WORKSHARE 859 906 en (:,:,:) = rn_emin * tmask(:,:,:) 907 !$OMP END PARALLEL WORKSHARE 860 908 CALL tke_avn ! recompute avt, avm, avmu, avmv and dissl (approximation) 861 909 ! 910 !$OMP PARALLEL WORKSHARE 862 911 avt_k (:,:,:) = avt (:,:,:) 863 912 avm_k (:,:,:) = avm (:,:,:) 864 913 avmu_k(:,:,:) = avmu(:,:,:) 865 914 avmv_k(:,:,:) = avmv(:,:,:) 915 !$OMP END PARALLEL WORKSHARE 866 916 ! 867 917 DO jit = nit000 + 1, nit000 + 10 ; CALL zdf_tke( jit ) ; END DO 868 918 ENDIF 869 919 ELSE !* Start from rest 920 !$OMP PARALLEL WORKSHARE 870 921 en(:,:,:) = rn_emin * tmask(:,:,:) 922 !$OMP END PARALLEL WORKSHARE 923 !$OMP PARALLEL DO schedule(static) private(jk) 871 924 DO jk = 1, jpk ! set the Kz to the background value 872 925 avt (:,:,jk) = avtb(jk) * wmask (:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/step.F90
r6464 r6748 132 132 IF( lk_zdfgls ) CALL zdf_gls( kstp ) ! GLS closure scheme for Kz 133 133 IF( lk_zdfcst ) THEN ! Constant Kz (reset avt, avm[uv] to the background value) 134 !$OMP PARALLEL WORKSHARE 134 135 avt (:,:,:) = rn_avt0 * wmask (:,:,:) 135 136 avmu(:,:,:) = rn_avm0 * wumask(:,:,:) 136 137 avmv(:,:,:) = rn_avm0 * wvmask(:,:,:) 138 !$OMP END PARALLEL WORKSHARE 137 139 ENDIF 138 140 139 141 IF( ln_rnf_mouth ) THEN ! increase diffusivity at rivers mouths 142 !$OMP PARALLEL DO schedule(static) private(jk) 140 143 DO jk = 2, nkrnf ; avt(:,:,jk) = avt(:,:,jk) + 2._wp * rn_avt_rnf * rnfmsk(:,:) * tmask(:,:,jk) ; END DO 141 144 ENDIF … … 194 197 & rhd, gru , grv , grui, grvi ) ! of t, s, rd at the first ocean level 195 198 !!jc: fs simplification 196 199 !$OMP PARALLEL WORKSHARE 197 200 ua(:,:,:) = 0._wp ! set dynamics trends to zero 198 201 va(:,:,:) = 0._wp 199 202 !$OMP END PARALLEL WORKSHARE 200 203 IF( lk_asminc .AND. ln_asmiau .AND. ln_dyninc ) & 201 204 & CALL dyn_asm_inc ( kstp ) ! apply dynamics assimilation increment … … 250 253 ! Active tracers 251 254 !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 255 !$OMP PARALLEL WORKSHARE 252 256 tsa(:,:,:,:) = 0._wp ! set tracer trends to zero 253 257 !$OMP END PARALLEL WORKSHARE 254 258 IF( lk_asminc .AND. ln_asmiau .AND. & 255 259 & ln_trainc ) CALL tra_asm_inc ( kstp ) ! apply tracer assimilation increment
Note: See TracChangeset
for help on using the changeset viewer.