#if defined key_mpp_mpi #if defined key_sp #define mpivar mpi_real #else #define mpivar mpi_double_precision #endif #endif MODULE obs_mpp !!====================================================================== !! *** MODULE obs_mpp *** !! Observation diagnostics: Various MPP support routines !!====================================================================== !!---------------------------------------------------------------------- !! obs_mpp_bcast_integer : Broadcast an integer array from a processor !! to all processors !! obs_mpp_max_integer : Find maximum on all processors of each !! value in an integer on all processors !! obs_mpp_find_obs_proc : Find processors which should hold the observations !! obs_mpp_sum_integers : Sum an integer array from all processors !! obs_mpp_sum_integer : Sum an integer from all processors !!---------------------------------------------------------------------- !! * Modules used USE dom_oce, ONLY : & ! Ocean space and time domain variables & nproc, & & mig,mjg USE mpp_map, ONLY : & & mppmap USE in_out_manager #if defined key_mpp_mpi USE lib_mpp, ONLY : & ! MPP library & mpi_comm_opa #endif IMPLICIT NONE !! * Routine accessibility PRIVATE PUBLIC obs_mpp_bcast_integer, & ! Broadcast an integer array from a proc to all procs & obs_mpp_max_integer, & ! Find maximum across processors in an integer array & obs_mpp_find_obs_proc, & ! Find processors which should hold the observations & obs_mpp_sum_integers, & ! Sum an integer array from all processors & obs_mpp_sum_integer, & ! Sum an integer from all processors & mpp_alltoall_int, & & mpp_alltoallv_int, & & mpp_alltoallv_real, & & mpp_global_max !!---------------------------------------------------------------------- !! NEMO/OPA 3.3 , NEMO Consortium (2010) !! $Id$ !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) !!---------------------------------------------------------------------- CONTAINS SUBROUTINE obs_mpp_bcast_integer(kvals,kno,kroot) !!---------------------------------------------------------------------- !! *** ROUTINE obs_mpp_bcast_integer *** !! !! ** Purpose : Send array kvals to all processors !! !! ** Method : MPI broadcast !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! MPI_COMM_OPA needs to be replace for OASIS4.! !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-03 (K. Mogensen) Original code !! ! 06-05 (K. Mogensen) Reformatted !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: kno ! Number of elements in array INTEGER, INTENT(IN) :: kroot ! Processor to send data INTEGER, DIMENSION(kno), INTENT(INOUT) :: & & kvals ! Array to send on kroot, receive for non-kroot #if defined key_mpp_mpi !! * Local declarations INTEGER :: ierr INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to broadcast data !----------------------------------------------------------------------- CALL mpi_bcast( kvals, kno, mpi_integer, & & kroot, mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #endif END SUBROUTINE obs_mpp_bcast_integer SUBROUTINE obs_mpp_max_integer( kvals, kno ) !!---------------------------------------------------------------------- !! *** ROUTINE obs_mpp_bcast_integer *** !! !! ** Purpose : Find maximum across processors in an integer array. !! !! ** Method : MPI all reduce. !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! MPI_COMM_OPA needs to be replace for OASIS4.! !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-03 (K. Mogensen) Original code !! ! 06-05 (K. Mogensen) Reformatted !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) ::kno ! Number of elements in array INTEGER, DIMENSION(kno), INTENT(INOUT) :: & & kvals ! Array to send on kroot, receive for non-kroot #if defined key_mpp_mpi !! * Local declarations INTEGER :: ierr INTEGER, DIMENSION(kno) :: & & ivals INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to find the maximum across processors !----------------------------------------------------------------------- CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, & & mpi_max, mpi_comm_opa, ierr ) kvals(:) = ivals(:) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #endif END SUBROUTINE obs_mpp_max_integer SUBROUTINE obs_mpp_find_obs_proc(kobsp,kobsi,kobsj,kno) !!---------------------------------------------------------------------- !! *** ROUTINE obs_mpp_find_obs_proc *** !! !! ** Purpose : From the array kobsp containing the results of the grid !! grid search on each processor the processor return a !! decision of which processors should hold the observation. !! !! ** Method : A temporary 2D array holding all the decisions is !! constructed using mpi_allgather on each processor. !! If more than one processor has found the observation !! with the observation in the inner domain gets it !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-07 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: kno INTEGER, DIMENSION(kno), INTENT(IN) :: & & kobsi, & & kobsj INTEGER, DIMENSION(kno), INTENT(INOUT) :: & & kobsp #if defined key_mpp_mpi !! * Local declarations INTEGER :: ji INTEGER :: jj INTEGER :: size INTEGER :: ierr INTEGER :: iobsip INTEGER :: iobsjp INTEGER :: num_sus_obs INTEGER, DIMENSION(kno) :: & & iobsig, & & iobsjg INTEGER, ALLOCATABLE, DIMENSION(:,:) :: & & iobsp, iobsi, iobsj INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to find the maximum accross processors !----------------------------------------------------------------------- CALL mpi_comm_size( mpi_comm_opa, size, ierr ) !----------------------------------------------------------------------- ! Convert local grids points to global grid points !----------------------------------------------------------------------- DO ji = 1, kno IF ( ( kobsi(ji) >= 1 ) .AND. ( kobsi(ji) <= jpi ) .AND. & & ( kobsj(ji) >= 1 ) .AND. ( kobsj(ji) <= jpj ) ) THEN iobsig(ji) = mig( kobsi(ji) ) iobsjg(ji) = mjg( kobsj(ji) ) ELSE iobsig(ji) = -1 iobsjg(ji) = -1 ENDIF END DO !----------------------------------------------------------------------- ! Get the decisions from all processors !----------------------------------------------------------------------- ALLOCATE( iobsp(kno,size) ) ALLOCATE( iobsi(kno,size) ) ALLOCATE( iobsj(kno,size) ) CALL mpi_allgather( kobsp, kno, mpi_integer, & & iobsp, kno, mpi_integer, & & mpi_comm_opa, ierr ) CALL mpi_allgather( iobsig, kno, mpi_integer, & & iobsi, kno, mpi_integer, & & mpi_comm_opa, ierr ) CALL mpi_allgather( iobsjg, kno, mpi_integer, & & iobsj, kno, mpi_integer, & & mpi_comm_opa, ierr ) !----------------------------------------------------------------------- ! Find the processor with observations from the lowest processor ! number among processors holding the observation. !----------------------------------------------------------------------- kobsp(:) = -1 num_sus_obs = 0 DO ji = 1, kno DO jj = 1, size IF ( ( kobsp(ji) == -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN kobsp(ji) = iobsp(ji,jj) iobsip = iobsi(ji,jj) iobsjp = iobsj(ji,jj) ENDIF IF ( ( kobsp(ji) /= -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN IF ( ( iobsip /= iobsi(ji,jj) ) .OR. & & ( iobsjp /= iobsj(ji,jj) ) ) THEN IF ( ( kobsp(ji) < 1000000 ) .AND. & & ( iobsp(ji,jj) < 1000000 ) ) THEN num_sus_obs=num_sus_obs+1 ENDIF ENDIF IF ( mppmap(iobsip,iobsjp) /= ( kobsp(ji)+1 ) ) THEN IF ( ( iobsi(ji,jj) /= -1 ) .AND. & & ( iobsj(ji,jj) /= -1 ) ) THEN IF ((mppmap(iobsi(ji,jj),iobsj(ji,jj)) == (iobsp(ji,jj)+1))& & .OR. ( iobsp(ji,jj) < kobsp(ji) ) ) THEN kobsp(ji) = iobsp(ji,jj) iobsip = iobsi(ji,jj) iobsjp = iobsj(ji,jj) ENDIF ENDIF ENDIF ENDIF END DO END DO IF (lwp) WRITE(numout,*) 'Number of suspicious observations: ',num_sus_obs DEALLOCATE( iobsj ) DEALLOCATE( iobsi ) DEALLOCATE( iobsp ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #endif END SUBROUTINE obs_mpp_find_obs_proc SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno ) !!---------------------------------------------------------------------- !! *** ROUTINE obs_mpp_sum_integers *** !! !! ** Purpose : Sum an integer array. !! !! ** Method : MPI all reduce. !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-07 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: kno INTEGER, DIMENSION(kno), INTENT(IN) :: & & kvalsin INTEGER, DIMENSION(kno), INTENT(OUT) :: & & kvalsout #if defined key_mpp_mpi !! * Local declarations INTEGER :: ierr INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to find the sum across processors !----------------------------------------------------------------------- CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, & & mpi_sum, mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #else !----------------------------------------------------------------------- ! For no-MPP just return input values !----------------------------------------------------------------------- kvalsout(:) = kvalsin(:) #endif END SUBROUTINE obs_mpp_sum_integers SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout ) !!---------------------------------------------------------------------- !! *** ROUTINE obs_mpp_sum_integers *** !! !! ** Purpose : Sum a single integer !! !! ** Method : MPI all reduce. !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-07 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: kvalin INTEGER, INTENT(OUT) :: kvalout #if defined key_mpp_mpi !! * Local declarations INTEGER :: ierr INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to find the sum across processors !----------------------------------------------------------------------- CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, & & mpi_sum, mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #else !----------------------------------------------------------------------- ! For no-MPP just return input values !----------------------------------------------------------------------- kvalout = kvalin #endif END SUBROUTINE obs_mpp_sum_integer SUBROUTINE mpp_global_max( pval ) !!---------------------------------------------------------------------- !! *** ROUTINE mpp_global_or *** !! !! ** Purpose : Get the maximum value across processors for a global !! real array !! !! ** Method : MPI allreduce !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 08-01 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(INOUT) :: & & pval !! * Local declarations INTEGER :: ierr #if defined key_mpp_mpi INCLUDE 'mpif.h' REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: & & zcp ! Copy data for input to MPI ALLOCATE( & & zcp(jpiglo,jpjglo) & & ) zcp(:,:) = pval(:,:) ! Call the MPI library to find the coast lines globally CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, & & mpi_max, mpi_comm_opa, ierr ) DEALLOCATE( & & zcp & & ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #endif END SUBROUTINE mpp_global_max SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout ) !!---------------------------------------------------------------------- !! *** ROUTINE mpp_allgatherv *** !! !! ** Purpose : all to all. !! !! ** Method : MPI alltoall !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-09 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: kno INTEGER, DIMENSION(kno*jpnij), INTENT(IN) :: & & kvalsin INTEGER, DIMENSION(kno*jpnij), INTENT(OUT) :: & & kvalsout !! * Local declarations INTEGER :: ierr #if defined key_mpp_mpi INCLUDE 'mpif.h' !----------------------------------------------------------------------- ! Call the MPI library to do the all to all operation of the data !----------------------------------------------------------------------- CALL mpi_alltoall( kvalsin, kno, mpi_integer, & & kvalsout, kno, mpi_integer, & & mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #else !----------------------------------------------------------------------- ! For no-MPP just return input values !----------------------------------------------------------------------- kvalsout = kvalsin #endif END SUBROUTINE mpp_alltoall_int SUBROUTINE mpp_alltoallv_int( kvalsin, knoin, kinv, kvalsout, & & knoout, koutv ) !!---------------------------------------------------------------------- !! *** ROUTINE mpp_alltoallv_int *** !! !! ** Purpose : all to all (integer version). !! !! ** Method : MPI alltoall !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-09 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: knoin INTEGER, INTENT(IN) :: knoout INTEGER, DIMENSION(jpnij) :: & & kinv, & & koutv INTEGER, DIMENSION(knoin), INTENT(IN) :: & & kvalsin INTEGER, DIMENSION(knoout), INTENT(OUT) :: & & kvalsout !! * Local declarations INTEGER :: ierr INTEGER :: jproc #if defined key_mpp_mpi INCLUDE 'mpif.h' INTEGER, DIMENSION(jpnij) :: & & irdsp, & & isdsp !----------------------------------------------------------------------- ! Compute displacements !----------------------------------------------------------------------- irdsp(1) = 0 isdsp(1) = 0 DO jproc = 2, jpnij isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) END DO !----------------------------------------------------------------------- ! Call the MPI library to do the all to all operation of the data !----------------------------------------------------------------------- CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, & & kvalsout, koutv, irdsp, mpi_integer, & & mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #else !----------------------------------------------------------------------- ! For no-MPP just return input values !----------------------------------------------------------------------- kvalsout = kvalsin #endif END SUBROUTINE mpp_alltoallv_int SUBROUTINE mpp_alltoallv_real( pvalsin, knoin, kinv, pvalsout, & & knoout, koutv ) !!---------------------------------------------------------------------- !! *** ROUTINE mpp_alltoallv_real *** !! !! ** Purpose : all to all (integer version). !! !! ** Method : MPI alltoall !! !! ** Action : This does only work for MPI. !! It does not work for SHMEM. !! !! References : http://www.mpi-forum.org !! !! History : !! ! 06-09 (K. Mogensen) Original code !!---------------------------------------------------------------------- !! * Arguments INTEGER, INTENT(IN) :: knoin INTEGER, INTENT(IN) :: knoout INTEGER, DIMENSION(jpnij) :: & & kinv, & & koutv REAL(KIND=wp), DIMENSION(knoin), INTENT(IN) :: & & pvalsin REAL(KIND=wp), DIMENSION(knoout), INTENT(OUT) :: & & pvalsout !! * Local declarations INTEGER :: ierr INTEGER :: jproc #if defined key_mpp_mpi INCLUDE 'mpif.h' INTEGER, DIMENSION(jpnij) :: & & irdsp, & & isdsp !----------------------------------------------------------------------- ! Compute displacements !----------------------------------------------------------------------- irdsp(1) = 0 isdsp(1) = 0 DO jproc = 2, jpnij isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) END DO !----------------------------------------------------------------------- ! Call the MPI library to do the all to all operation of the data !----------------------------------------------------------------------- CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, & & pvalsout, koutv, irdsp, mpivar, & & mpi_comm_opa, ierr ) #elif defined key_mpp_shmem error "Only MPI support for MPP in NEMOVAR" #else !----------------------------------------------------------------------- ! For no-MPP just return input values !----------------------------------------------------------------------- pvalsout = pvalsin #endif END SUBROUTINE mpp_alltoallv_real END MODULE obs_mpp