123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498 |
- MODULE obs_mpp
- !!======================================================================
- !! *** MODULE obs_mpp ***
- !! Observation diagnostics: Various MPP support routines
- !!======================================================================
- !! History : 2.0 ! 2006-03 (K. Mogensen) Original code
- !! - ! 2006-05 (K. Mogensen) Reformatted
- !! - ! 2008-01 (K. Mogensen) add mpp_global_max
- !!----------------------------------------------------------------------
- # define mpivar mpi_double_precision
- !!----------------------------------------------------------------------
- !! obs_mpp_bcast_integer : Broadcast an integer array from a processor to all processors
- !! obs_mpp_max_integer : Find maximum on all processors of each value in an integer on all processors
- !! obs_mpp_find_obs_proc : Find processors which should hold the observations
- !! obs_mpp_sum_integers : Sum an integer array from all processors
- !! obs_mpp_sum_integer : Sum an integer from all processors
- !!----------------------------------------------------------------------
- USE dom_oce, ONLY : nproc, mig, mjg ! Ocean space and time domain variables
- USE mpp_map, ONLY : mppmap
- USE in_out_manager
- #if defined key_mpp_mpi
- USE lib_mpp, ONLY : mpi_comm_opa ! MPP library
- #endif
- IMPLICIT NONE
- PRIVATE
- PUBLIC obs_mpp_bcast_integer, & !: Broadcast an integer array from a proc to all procs
- & obs_mpp_max_integer, & !: Find maximum across processors in an integer array
- & obs_mpp_find_obs_proc, & !: Find processors which should hold the observations
- & obs_mpp_sum_integers, & !: Sum an integer array from all processors
- & obs_mpp_sum_integer, & !: Sum an integer from all processors
- & mpp_alltoall_int, &
- & mpp_alltoallv_int, &
- & mpp_alltoallv_real, &
- & mpp_global_max
- !!----------------------------------------------------------------------
- !! NEMO/OPA 3.3 , NEMO Consortium (2010)
- !! $Id: obs_mpp.F90 2513 2010-12-23 16:01:47Z smasson $
- !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
- !!----------------------------------------------------------------------
- CONTAINS
- SUBROUTINE obs_mpp_bcast_integer( kvals, kno, kroot )
- !!----------------------------------------------------------------------
- !! *** ROUTINE obs_mpp_bcast_integer ***
- !!
- !! ** Purpose : Send array kvals to all processors
- !!
- !! ** Method : MPI broadcast
- !!
- !! ** Action : This does only work for MPI.
- !! MPI_COMM_OPA needs to be replace for OASIS4.!
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: kno ! Number of elements in array
- INTEGER , INTENT(in ) :: kroot ! Processor to send data
- INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
- !
- #if defined key_mpp_mpi
- !
- INTEGER :: ierr
- !
- INCLUDE 'mpif.h'
- !!----------------------------------------------------------------------
- ! Call the MPI library to broadcast data
- CALL mpi_bcast( kvals, kno, mpi_integer, &
- & kroot, mpi_comm_opa, ierr )
- #else
- ! no MPI: empty routine
- #endif
- !
- END SUBROUTINE obs_mpp_bcast_integer
-
- SUBROUTINE obs_mpp_max_integer( kvals, kno )
- !!----------------------------------------------------------------------
- !! *** ROUTINE obs_mpp_bcast_integer ***
- !!
- !! ** Purpose : Find maximum across processors in an integer array.
- !!
- !! ** Method : MPI all reduce.
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !! MPI_COMM_OPA needs to be replace for OASIS4.!
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: kno ! Number of elements in array
- INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
- !
- #if defined key_mpp_mpi
- !
- INTEGER :: ierr
- INTEGER, DIMENSION(kno) :: ivals
- !
- INCLUDE 'mpif.h'
- !!----------------------------------------------------------------------
- ! Call the MPI library to find the maximum across processors
- CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, &
- & mpi_max, mpi_comm_opa, ierr )
- kvals(:) = ivals(:)
- #else
- ! no MPI: empty routine
- #endif
- END SUBROUTINE obs_mpp_max_integer
- SUBROUTINE obs_mpp_find_obs_proc( kobsp, kobsi, kobsj, kno )
- !!----------------------------------------------------------------------
- !! *** ROUTINE obs_mpp_find_obs_proc ***
- !!
- !! ** Purpose : From the array kobsp containing the results of the grid
- !! grid search on each processor the processor return a
- !! decision of which processors should hold the observation.
- !!
- !! ** Method : A temporary 2D array holding all the decisions is
- !! constructed using mpi_allgather on each processor.
- !! If more than one processor has found the observation
- !! with the observation in the inner domain gets it
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: kno
- INTEGER, DIMENSION(kno), INTENT(in ) :: kobsi, kobsj
- INTEGER, DIMENSION(kno), INTENT(inout) :: kobsp
- !
- #if defined key_mpp_mpi
- !
- INTEGER :: ji
- INTEGER :: jj
- INTEGER :: size
- INTEGER :: ierr
- INTEGER :: iobsip
- INTEGER :: iobsjp
- INTEGER :: num_sus_obs
- INTEGER, DIMENSION(kno) :: iobsig, iobsjg
- INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iobsp, iobsi, iobsj
- !!
- INCLUDE 'mpif.h'
- !!----------------------------------------------------------------------
- !-----------------------------------------------------------------------
- ! Call the MPI library to find the maximum accross processors
- !-----------------------------------------------------------------------
- CALL mpi_comm_size( mpi_comm_opa, size, ierr )
- !-----------------------------------------------------------------------
- ! Convert local grids points to global grid points
- !-----------------------------------------------------------------------
- DO ji = 1, kno
- IF ( ( kobsi(ji) >= 1 ) .AND. ( kobsi(ji) <= jpi ) .AND. &
- & ( kobsj(ji) >= 1 ) .AND. ( kobsj(ji) <= jpj ) ) THEN
- iobsig(ji) = mig( kobsi(ji) )
- iobsjg(ji) = mjg( kobsj(ji) )
- ELSE
- iobsig(ji) = -1
- iobsjg(ji) = -1
- ENDIF
- END DO
- !-----------------------------------------------------------------------
- ! Get the decisions from all processors
- !-----------------------------------------------------------------------
- ALLOCATE( iobsp(kno,size) )
- ALLOCATE( iobsi(kno,size) )
- ALLOCATE( iobsj(kno,size) )
- CALL mpi_allgather( kobsp, kno, mpi_integer, &
- & iobsp, kno, mpi_integer, &
- & mpi_comm_opa, ierr )
- CALL mpi_allgather( iobsig, kno, mpi_integer, &
- & iobsi, kno, mpi_integer, &
- & mpi_comm_opa, ierr )
- CALL mpi_allgather( iobsjg, kno, mpi_integer, &
- & iobsj, kno, mpi_integer, &
- & mpi_comm_opa, ierr )
- !-----------------------------------------------------------------------
- ! Find the processor with observations from the lowest processor
- ! number among processors holding the observation.
- !-----------------------------------------------------------------------
- kobsp(:) = -1
- num_sus_obs = 0
- DO ji = 1, kno
- DO jj = 1, size
- IF ( ( kobsp(ji) == -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN
- kobsp(ji) = iobsp(ji,jj)
- iobsip = iobsi(ji,jj)
- iobsjp = iobsj(ji,jj)
- ENDIF
- IF ( ( kobsp(ji) /= -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN
- IF ( ( iobsip /= iobsi(ji,jj) ) .OR. &
- & ( iobsjp /= iobsj(ji,jj) ) ) THEN
- IF ( ( kobsp(ji) < 1000000 ) .AND. &
- & ( iobsp(ji,jj) < 1000000 ) ) THEN
- num_sus_obs=num_sus_obs+1
- ENDIF
- ENDIF
- IF ( mppmap(iobsip,iobsjp) /= ( kobsp(ji)+1 ) ) THEN
- IF ( ( iobsi(ji,jj) /= -1 ) .AND. &
- & ( iobsj(ji,jj) /= -1 ) ) THEN
- IF ((mppmap(iobsi(ji,jj),iobsj(ji,jj)) == (iobsp(ji,jj)+1))&
- & .OR. ( iobsp(ji,jj) < kobsp(ji) ) ) THEN
- kobsp(ji) = iobsp(ji,jj)
- iobsip = iobsi(ji,jj)
- iobsjp = iobsj(ji,jj)
- ENDIF
- ENDIF
- ENDIF
- ENDIF
- END DO
- END DO
- IF (lwp) WRITE(numout,*) 'Number of suspicious observations: ',num_sus_obs
- DEALLOCATE( iobsj )
- DEALLOCATE( iobsi )
- DEALLOCATE( iobsp )
- #else
- ! no MPI: empty routine
- #endif
- !
- END SUBROUTINE obs_mpp_find_obs_proc
- SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno )
- !!----------------------------------------------------------------------
- !! *** ROUTINE obs_mpp_sum_integers ***
- !!
- !! ** Purpose : Sum an integer array.
- !!
- !! ** Method : MPI all reduce.
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: kno
- INTEGER, DIMENSION(kno), INTENT(in ) :: kvalsin
- INTEGER, DIMENSION(kno), INTENT( out) :: kvalsout
- !
- #if defined key_mpp_mpi
- !
- INTEGER :: ierr
- !
- INCLUDE 'mpif.h'
- !!----------------------------------------------------------------------
- !
- !-----------------------------------------------------------------------
- ! Call the MPI library to find the sum across processors
- !-----------------------------------------------------------------------
- CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, &
- & mpi_sum, mpi_comm_opa, ierr )
- #else
- !-----------------------------------------------------------------------
- ! For no-MPP just return input values
- !-----------------------------------------------------------------------
- kvalsout(:) = kvalsin(:)
- #endif
- !
- END SUBROUTINE obs_mpp_sum_integers
- SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout )
- !!----------------------------------------------------------------------
- !! *** ROUTINE obs_mpp_sum_integers ***
- !!
- !! ** Purpose : Sum a single integer
- !!
- !! ** Method : MPI all reduce.
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER, INTENT(in ) :: kvalin
- INTEGER, INTENT( out) :: kvalout
- !
- #if defined key_mpp_mpi
- !
- INTEGER :: ierr
- !
- INCLUDE 'mpif.h'
- !!----------------------------------------------------------------------
- !
- !-----------------------------------------------------------------------
- ! Call the MPI library to find the sum across processors
- !-----------------------------------------------------------------------
- CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, &
- & mpi_sum, mpi_comm_opa, ierr )
- #else
- !-----------------------------------------------------------------------
- ! For no-MPP just return input values
- !-----------------------------------------------------------------------
- kvalout = kvalin
- #endif
- !
- END SUBROUTINE obs_mpp_sum_integer
- SUBROUTINE mpp_global_max( pval )
- !!----------------------------------------------------------------------
- !! *** ROUTINE mpp_global_or ***
- !!
- !! ** Purpose : Get the maximum value across processors for a global
- !! real array
- !!
- !! ** Method : MPI allreduce
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(inout) :: pval
- !
- INTEGER :: ierr
- !
- #if defined key_mpp_mpi
- !
- INCLUDE 'mpif.h'
- REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: zcp
- !!----------------------------------------------------------------------
- ! Copy data for input to MPI
- ALLOCATE( &
- & zcp(jpiglo,jpjglo) &
- & )
- zcp(:,:) = pval(:,:)
- ! Call the MPI library to find the coast lines globally
- CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, &
- & mpi_max, mpi_comm_opa, ierr )
- DEALLOCATE( &
- & zcp &
- & )
- #else
- ! no MPI: empty routine
- #endif
- !
- END SUBROUTINE mpp_global_max
- SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout )
- !!----------------------------------------------------------------------
- !! *** ROUTINE mpp_allgatherv ***
- !!
- !! ** Purpose : all to all.
- !!
- !! ** Method : MPI alltoall
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: kno
- INTEGER, DIMENSION(kno*jpnij), INTENT(in ) :: kvalsin
- INTEGER, DIMENSION(kno*jpnij), INTENT( out) :: kvalsout
- !!
- INTEGER :: ierr
- !
- #if defined key_mpp_mpi
- !
- INCLUDE 'mpif.h'
- !-----------------------------------------------------------------------
- ! Call the MPI library to do the all to all operation of the data
- !-----------------------------------------------------------------------
- CALL mpi_alltoall( kvalsin, kno, mpi_integer, &
- & kvalsout, kno, mpi_integer, &
- & mpi_comm_opa, ierr )
- #else
- !-----------------------------------------------------------------------
- ! For no-MPP just return input values
- !-----------------------------------------------------------------------
- kvalsout = kvalsin
- #endif
- !
- END SUBROUTINE mpp_alltoall_int
- SUBROUTINE mpp_alltoallv_int( kvalsin, knoin , kinv , kvalsout, &
- & knoout, koutv )
- !!----------------------------------------------------------------------
- !! *** ROUTINE mpp_alltoallv_int ***
- !!
- !! ** Purpose : all to all (integer version).
- !!
- !! ** Method : MPI alltoall
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in) :: knoin
- INTEGER , INTENT(in) :: knoout
- INTEGER, DIMENSION(jpnij) :: kinv, koutv
- INTEGER, DIMENSION(knoin) , INTENT(in ) :: kvalsin
- INTEGER, DIMENSION(knoout), INTENT( out) :: kvalsout
- !!
- INTEGER :: ierr
- INTEGER :: jproc
- !
- #if defined key_mpp_mpi
- !
- INCLUDE 'mpif.h'
- INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
- !-----------------------------------------------------------------------
- ! Compute displacements
- !-----------------------------------------------------------------------
- irdsp(1) = 0
- isdsp(1) = 0
- DO jproc = 2, jpnij
- isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
- irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
- END DO
- !-----------------------------------------------------------------------
- ! Call the MPI library to do the all to all operation of the data
- !-----------------------------------------------------------------------
- CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, &
- & kvalsout, koutv, irdsp, mpi_integer, &
- & mpi_comm_opa, ierr )
- #else
- !-----------------------------------------------------------------------
- ! For no-MPP just return input values
- !-----------------------------------------------------------------------
- kvalsout = kvalsin
- #endif
- !
- END SUBROUTINE mpp_alltoallv_int
- SUBROUTINE mpp_alltoallv_real( pvalsin, knoin , kinv , pvalsout, &
- & knoout, koutv )
- !!----------------------------------------------------------------------
- !! *** ROUTINE mpp_alltoallv_real ***
- !!
- !! ** Purpose : all to all (integer version).
- !!
- !! ** Method : MPI alltoall
- !!
- !! ** Action : This does only work for MPI.
- !! It does not work for SHMEM.
- !!
- !! References : http://www.mpi-forum.org
- !!----------------------------------------------------------------------
- INTEGER , INTENT(in ) :: knoin
- INTEGER , INTENT(in ) :: knoout
- INTEGER , DIMENSION(jpnij) :: kinv, koutv
- REAL(wp), DIMENSION(knoin) , INTENT(in ) :: pvalsin
- REAL(wp), DIMENSION(knoout), INTENT( out) :: pvalsout
- !!
- INTEGER :: ierr
- INTEGER :: jproc
- !
- #if defined key_mpp_mpi
- !
- INCLUDE 'mpif.h'
- INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
- !!----------------------------------------------------------------------
- !
- !-----------------------------------------------------------------------
- ! Compute displacements
- !-----------------------------------------------------------------------
- irdsp(1) = 0
- isdsp(1) = 0
- DO jproc = 2, jpnij
- isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
- irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
- END DO
- !-----------------------------------------------------------------------
- ! Call the MPI library to do the all to all operation of the data
- !-----------------------------------------------------------------------
- CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, &
- & pvalsout, koutv, irdsp, mpivar, &
- & mpi_comm_opa, ierr )
- #else
- !-----------------------------------------------------------------------
- ! For no-MPP just return input values
- !-----------------------------------------------------------------------
- pvalsout = pvalsin
- #endif
- !
- END SUBROUTINE mpp_alltoallv_real
- !!======================================================================
- END MODULE obs_mpp
|