! #define TRACEBACK write (gol,'("in ",a," (",a,", line",i5,")")') rname, __FILE__, __LINE__; call goErr #define IF_NOTOK_RETURN(action) if (status/=0) then; TRACEBACK; action; return; end if #define IF_ERROR_RETURN(action) if (status> 0) then; TRACEBACK; action; return; end if ! #define IF_NOTOK_MPI(action) if (ierr/=MPI_SUCCESS) then; TRACEBACK; action; return; end if ! #include "tm5.inc" ! !---------------------------------------------------------------------------- ! TM5 ! !---------------------------------------------------------------------------- !BOP ! ! !MODULE: DOMAIN_DECOMP ! ! !DESCRIPTION: Define a distributed grid object and its methods. ! Horizontal grid is decomposed across longitudes and latitudes. !\\ !\\ ! !INTERFACE: ! MODULE DOMAIN_DECOMP ! ! !USES: ! use grid, only : TllGridInfo ! Type with Lon/Lat Grid Info use Go, only : goErr, goPr, gol ! go = general objects use dims, only : okdebug use partools ! to include mpif.h, ierr, localComm,... IMPLICIT NONE PRIVATE ! ! !PUBLIC MEMBER FUNCTIONS: ! public :: Init_DistGrid, Done_DistGrid ! life cycle routines public :: Get_DistGrid ! get bounds & grids public :: Print_DistGrid ! print utility (debug) public :: GATHER, SCATTER, UPDATE_HALO ! communication routines public :: GATHER_I_BAND, GATHER_J_BAND ! communication routines public :: SCATTER_I_BAND, SCATTER_J_BAND! for distributed slices public :: UPDATE_HALO_JBAND ! public :: UPDATE_HALO_IBAND ! public :: REDUCE ! mimic MPI_REDUCE / MPI_ALLREDUCE public :: DIST_ARR_STAT ! basic statitics of a distributed array public :: testcomm ! test communication (debug) ! ! !PUBLIC TYPES: ! TYPE, PUBLIC :: DIST_GRID PRIVATE ! parameters for global domain integer :: im_region ! number of longitudes integer :: jm_region ! number of latitudes ! parameters for local domain integer :: i_strt ! begin local domain longitude index integer :: i_stop ! end local domain longitude index integer :: i_strt_halo ! begin halo longitude index integer :: i_stop_halo ! end halo longitude index integer :: j_strt ! begin local domain latitude index integer :: j_stop ! end local domain latitude index integer :: j_strt_halo ! begin halo latitude index integer :: j_stop_halo ! end halo latitude index type(TllGridInfo) :: lli ! local Lat-Lon grid info type(TllGridInfo) :: glbl_lli ! global Lat-Lon grid info type(TllGridInfo) :: lli_z ! global meridional grid info integer :: neighbors(4) ! rank of (west, north, east, south) processes logical :: has_south_pole ! south pole is in local domain logical :: has_north_pole ! north pole is in local domain logical :: has_south_boundary ! south boundary is in local domain logical :: has_north_boundary ! north boundary is in local domain logical :: has_west_boundary ! west boundary is in local domain logical :: has_east_boundary ! east boundary is in local domain logical :: is_periodic ! covers [-180, 180] ! i_start, i_stop, j_start, j_stop of all PEs: (4,npes) integer, pointer :: bounds(:,:) END TYPE DIST_GRID ! ! !PRIVATE DATA MEMBERS: ! character(len=*), parameter :: mname='Domain_Decomp_MOD_' ! ! !INTERFACE: ! INTERFACE Init_DistGrid MODULE PROCEDURE INIT_DISTGRID_FROM_REGION MODULE PROCEDURE INIT_DISTGRID_FROM_LLI END INTERFACE INTERFACE Update_halo ! for arrays distributed accross both I and J (1st and 2nd dim) MODULE PROCEDURE Update_halo_2d_r MODULE PROCEDURE Update_halo_3d_r MODULE PROCEDURE Update_halo_4d_r MODULE PROCEDURE Update_halo_2d_i END INTERFACE INTERFACE Update_halo_jband ! for arrays distributed accross I (1st dim) MODULE PROCEDURE Update_halo_jband_1d_r MODULE PROCEDURE Update_halo_jband_2d_r MODULE PROCEDURE Update_halo_jband_3d_r MODULE PROCEDURE Update_halo_jband_4d_r END INTERFACE INTERFACE Update_halo_iband ! for arrays distributed accross J (1st dim) MODULE PROCEDURE Update_halo_iband_1d_r END INTERFACE INTERFACE Gather MODULE PROCEDURE gather_2d_i MODULE PROCEDURE gather_2d_r MODULE PROCEDURE gather_3d_r MODULE PROCEDURE gather_4d_r MODULE PROCEDURE gather_5d_r END INTERFACE INTERFACE Scatter MODULE PROCEDURE scatter_2d_r MODULE PROCEDURE scatter_3d_r MODULE PROCEDURE scatter_4d_r END INTERFACE INTERFACE Gather_I_Band MODULE PROCEDURE gather_iband_1d_r ! MODULE PROCEDURE gather_iband_2d_r MODULE PROCEDURE gather_iband_3d_r END INTERFACE INTERFACE Gather_J_Band ! MODULE PROCEDURE gather_jband_1d_r MODULE PROCEDURE gather_jband_2d_r MODULE PROCEDURE gather_jband_3d_r MODULE PROCEDURE gather_jband_4d_r END INTERFACE INTERFACE Scatter_I_Band MODULE PROCEDURE scatter_iband_1d_r MODULE PROCEDURE scatter_iband_2d_r END INTERFACE INTERFACE Scatter_J_Band MODULE PROCEDURE scatter_jband_1d_r MODULE PROCEDURE scatter_jband_2d_r MODULE PROCEDURE scatter_jband_3d_r MODULE PROCEDURE scatter_jband_4d_r END INTERFACE INTERFACE Reduce MODULE PROCEDURE reduce_2d_r MODULE PROCEDURE reduce_3d_r MODULE PROCEDURE reduce_4d_r END INTERFACE INTERFACE Dist_Arr_Stat MODULE PROCEDURE dist_arr_stat_2d_r END INTERFACE ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 13 Feb 2013 - P. Le Sager - Remove deprecated MPI_GET_EXTENT and ! MPI_TYPE_HVECTOR. ! - Extra garbage clean. ! 21 Jun 2013 - P. Le Sager - Replace all MPI_SEND with MPI_SSEND in all ! scatter routines to avoid buffering. ! ! !REMARKS: ! (1) There is two categories of subroutines here : one to define the ! distributed grid objects, the other for communication b/w processes ! (update_halo for one-to-one comm, and scatter/gather/reduce for ! collective comm) ! Communications routines are for data DECOMPOSED on the domain. For ! more general comm, see partools module. ! ! (2) GATHER & SCATTER : ! - global arrays have to really be global on root only, and can be ! (1,1,..) on other processes. ! - global arrays are without halo. ! - if not using MPI, gather and scatter just copy arrays, so it may be ! better to not call them to save memory in that case. ! ! (3) Be careful when passing a slice (or a pointer to a slice) as argument: ! ! Passing a subarray will *NOT* work most of the time, unless it is a ! slice on the last dimension(s). This will give erroneous results: ! ! allocate( gbl_arr(-3:imr, 1:jmr )) ! call gather( dgrid, local_arr, gbl_arr(1:imr,:), halo, status) ! ! but passing full size array will work: ! ! allocate( gbl_arr(-3:imr, 1:jmr )) ! allocate( temp(1:imr,1:jmr) ) ! call gather( dgrid, local_arr, temp, halo, status) ! gbl_arr(1:imr,:) = temp ! ! (4) Scatter[Gather]_I[J]_band are communications between a row or column ! of processors and the root processor (optionally the row_root in few ! cases). !EOP !------------------------------------------------------------------------ CONTAINS !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: DISTGRID_RANGE ! ! !DESCRIPTION: Give range of indices covered by rank when using nprocs. ! This is used for one dimension, and thus called twice. ! Distribution is done evenly. Eg: it will distribute 11 boxes ! across 3 processes as 4,4,3 on each pe. !\\ !\\ ! !INTERFACE: ! SUBROUTINE DISTGRID_RANGE(ij, rank, nprocs, istart, iend) ! ! !INPUT PARAMETERS: ! integer, intent(in) :: ij ! defines range (1,..,ij) to be distributed integer, intent(in) :: rank ! current process, in (0,.., nprocs-1) integer, intent(in) :: nprocs ! total # of processes ! ! !OUTPUT PARAMETERS: ! integer, intent(out):: istart, iend ! index range covered by rank ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC integer :: iwork1, iwork2 iwork1 = ij/nprocs iwork2 = mod(ij,nprocs) istart = rank * iwork1 + 1 + min(rank, iwork2) iend = istart + iwork1 - 1 if (iwork2 > rank) iend = iend + 1 END SUBROUTINE DISTGRID_RANGE !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: INIT_DISTGRID_FROM_REGION ! ! !DESCRIPTION: initialize a distributed grid object for a TM5 region !\\ !\\ ! !INTERFACE: ! SUBROUTINE INIT_DISTGRID_FROM_REGION( DistGrid, region, rank, nplon, nplat, halo, status) ! ! !USES: ! use grid, only : init use dims, only : xbeg, xend, dx, im, xcyc use dims, only : ybeg, yend, dy, jm, touch_np, touch_sp ! ! !RETURN VALUE: ! type(dist_grid), intent(inout) :: DistGrid ! ! !INPUT PARAMETERS: ! integer, intent(in) :: region ! TM5 region number integer, intent(in) :: rank ! current process in (0,.., nprocs-1) integer, intent(in) :: nplon ! number of processes along longitude integer, intent(in) :: nplat ! number of processes along latitude integer, intent(in), optional :: halo ! halo size (default=0) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! ! (1) Ideally: to call with WIDTH=... but not the ! same halo is used for all data sets. Then, in many subroutines, halo has ! to be independent of the halo carried by the distributed grid. We could ! simplify things by using a halo of 2 for all distributed data, or ! disregard halo in the dist_grid, but we just keep it general (as is). ! ! (2) npe_lat/lon are also available thru partools, but may not have been ! initialized yet, so we keep the nplon/nplat inputs here. Could do ! without though... ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'init_distgrid' integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4) real :: yb, xb, dlon, dlat character(len=39) :: name ! for pretty print integer, parameter :: maxrow=5 integer, parameter :: maxcol=5 integer :: i1, j1, j2, k, work character(len=3) :: str1 ! for virtual topology integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat) integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1) integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon) integer :: rank_lat ! index of current process in meridional set (1,..,np_lat) !--------------------------------------------- ! 2D process topology, and mapping 1D <-> 2D !--------------------------------------------- ! besides the periodicity, topology is independent of the region allocate(dist_map(nplon, nplat)) npes = nplat*nplon dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /)) ! store iwork = maxloc(dist_map, mask=dist_map.eq.rank) rank_lon = iwork(1) ! in [1..nplon] rank_lat = iwork(2) ! in [1..nplat] ! Neighbors = 2d process map with extra neighbors allocate(dist_mapw(0:nplon+1, 0:nplat+1)) dist_mapw = MPI_PROC_NULL dist_mapw(1:nplon,1:nplat) = dist_map ! East-West periodicity DistGrid%is_periodic =.false. if (xcyc(region)==1) then dist_mapw( 0,1:nplat) = dist_map(nplon,:) dist_mapw(nplon+1,1:nplat) = dist_map(1,:) DistGrid%is_periodic =.true. end if DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west dist_mapw(rank_lon, rank_lat+1), & ! north dist_mapw(rank_lon+1, rank_lat ), & ! east dist_mapw(rank_lon, rank_lat-1) /) ! south !--------------------------------------------- ! fill in distributed grid info !--------------------------------------------- ! region boundaries DistGrid%has_south_boundary = (rank_lat == 1 ) DistGrid%has_north_boundary = (rank_lat == nplat) DistGrid%has_west_boundary = (rank_lon == 1 ) DistGrid%has_east_boundary = (rank_lon == nplon) ! poles DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (touch_sp(region) == 1) DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (touch_np(region) == 1) ! max halo that will be used in the code iwidth=0 if (present(halo)) iwidth=halo ! index ranges covered by current process call DistGrid_range(im(region), rank_lon-1, nplon, start, end) ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1. if ( (end-start+1) < max(1,iwidth) ) then write(gol,*)" Too much processors in X (longitude) direction! ", nplon, iwidth, start, end call goErr status=1; TRACEBACK; return end if DistGrid%im_region = im(region) DistGrid%i_strt = start DistGrid%i_stop = end DistGrid%i_strt_halo = start-iwidth DistGrid%i_stop_halo = end+iwidth ! To think about it when/if needed: ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start call DistGrid_range(jm(region), rank_lat-1, nplat, start, end) if ( (end-start+1) < max(1,iwidth) ) then write(gol,*)" Too much processors in Y (latitude) direction! ", nplat, iwidth, start, end call goErr status=1; TRACEBACK; return end if DistGrid%jm_region = jm(region) DistGrid%j_strt = start DistGrid%j_stop = end DistGrid%j_strt_halo = start-iwidth DistGrid%j_stop_halo = end+iwidth #ifdef parallel_cplng if ( modulo(im(region),nplon) /= 0) then write(gol,'("number of X processors ",i3," does not divide evenly the number of grid boxes",i3)') & nplon, im(region) ; call goErr status=1; TRACEBACK; return endif if ( modulo(jm(region),nplat) /= 0) then write(gol,'("number of Y processors ",i3," does not divide evenly the number of grid boxes ",i3)') & nplat, jm(region) ; call goErr status=1; TRACEBACK; return endif #endif !--------------------------------------------- ! geophysical grids !--------------------------------------------- ! grid spacing: dlon = real(xend(region)-xbeg(region))/im(region) dlat = real(yend(region)-ybeg(region))/jm(region) !------ local iml = DistGrid%i_stop - DistGrid%i_strt + 1 jml = DistGrid%j_stop - DistGrid%j_strt + 1 xb = xbeg(region) + ( DistGrid%i_strt - 0.5 ) * dlon yb = ybeg(region) + ( DistGrid%j_strt - 0.5 ) * dlat write(name, '("distributed grid on process ", i5.5)') rank call INIT( DistGrid%lli, xb, dlon, iml, yb, dlat, jml, status, name=name ) IF_NOTOK_RETURN(status=1) !------ global xb = xbeg(region) + 0.5 * dlon yb = ybeg(region) + 0.5 * dlat write(name, '("global grid on process ", i5.5)') rank call INIT( DistGrid%glbl_lli, xb, dlon, im(region), yb, dlat, jm(region), status, name=name ) IF_NOTOK_RETURN(status=1) !------ global meridional grid name = "merid " // trim(name) call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, dlat, jm(region), status, name=name ) IF_NOTOK_RETURN(status=1) !--------------------------------------------- ! store shapes info of all PE on all PE (used only on root so far, but may ! become useful if we gather/scatter to/from non-root in the future) !--------------------------------------------- #ifdef MPI allocate(DistGrid%bounds(4,0:npes-1)) lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /) call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr) #endif !--------------------------------------------- ! PRINT (Debug) 2D process topology !--------------------------------------------- if(okdebug)then write(gol,*) '------------- world view ----------------------' ; call goPr write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr i1=min(maxcol,nplon) str1='...' if (i1==nplon) str1='' work=nplat/2 j1=min(work, maxrow) j2=max(nplat-maxrow+1, work+1) do k=nplat,j2,-1 write(gol,*) dist_map(1:i1,k),str1 ; call goPr enddo if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr do k=j1,1,-1 write(gol,*) dist_map(1:i1,k),str1 ; call goPr enddo write(gol,*) '' write(gol,*) '-----------------------------------------------' ; call goPr end if ! Done deallocate(dist_map) deallocate(dist_mapw) status = 0 END SUBROUTINE INIT_DISTGRID_FROM_REGION !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: INIT_DISTGRID_FROM_LLI ! ! !DESCRIPTION: initialize a distributed grid object from a lli object !\\ !\\ ! !INTERFACE: ! SUBROUTINE INIT_DISTGRID_FROM_LLI( DistGrid, lli, rank, halo, status) ! ! !USES: ! use grid, only : init, assignment(=) ! ! !INPUT/OUTPUT PARAMETERS: ! type(dist_grid), intent(inout) :: DistGrid ! ! !INPUT PARAMETERS: ! type(TllGridInfo), intent(in) :: lli integer, intent(in) :: rank ! current process in (0,.., nprocs-1) integer, optional, intent(in) :: halo ! halo size (default=0) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 18 Nov 2013 - Ph. Le Sager - v0, adapted from init_distgrid_from_region ! ! !REMARKS: ! ! (1) See INIT_DISTGRID_FROM_REGION for comment about halo input. ! (2) Uses npe_lat/lon from partools module : TM5_MPI_Init2 must have been ! called before. !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'init_distgrid_from_lli' integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4) real :: yb, xb, dlon, dlat character(len=39) :: name integer :: nplon ! number of processes along longitude integer :: nplat ! number of processes along latitude ! for pretty print integer, parameter :: maxrow=5 integer, parameter :: maxcol=5 integer :: i1, j1, j2, k, work character(len=3) :: str1 ! for virtual topology integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat) integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1) integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon) integer :: rank_lat ! index of current process in meridional set (1,..,np_lat) !--------------------------------------------- ! test for valid input !--------------------------------------------- if (.not.(associated( lli%lon_deg ))) then write(gol,*)" The LLI object has not been initialized. " ; call goErr write(gol,*)" A distributed grid for MPI cannot be build." ; call goErr status=1; TRACEBACK; return end if nplat = npe_lat nplon = npe_lon !--------------------------------------------- ! 2D process topology, and mapping 1D <-> 2D !--------------------------------------------- ! besides the periodicity, topology is independent of the region allocate(dist_map(nplon, nplat)) npes = nplat*nplon dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /)) ! store iwork = maxloc(dist_map, mask=dist_map.eq.rank) rank_lon = iwork(1) ! in [1..nplon] rank_lat = iwork(2) ! in [1..nplat] ! Neighbors = 2d process map with extra neighbors allocate(dist_mapw(0:nplon+1, 0:nplat+1)) dist_mapw = MPI_PROC_NULL dist_mapw(1:nplon,1:nplat) = dist_map ! East-West periodicity DistGrid%is_periodic =.false. if ( lli%dlon_deg*lli%im == 360. ) then dist_mapw( 0,1:nplat) = dist_map(nplon,:) dist_mapw(nplon+1,1:nplat) = dist_map(1,:) DistGrid%is_periodic =.true. end if DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west dist_mapw(rank_lon, rank_lat+1), & ! north dist_mapw(rank_lon+1, rank_lat ), & ! east dist_mapw(rank_lon, rank_lat-1) /) ! south !--------------------------------------------- ! fill in distributed grid info !--------------------------------------------- ! region boundaries DistGrid%has_south_boundary = (rank_lat == 1 ) DistGrid%has_north_boundary = (rank_lat == nplat) DistGrid%has_west_boundary = (rank_lon == 1 ) DistGrid%has_east_boundary = (rank_lon == nplon) ! poles DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (lli%blat_deg(0) == -90.0) DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (lli%blat_deg(lli%jm) == 90.0) ! max halo that will be used in the code iwidth=0 if (present(halo)) iwidth=halo ! index ranges covered by current process call DistGrid_range(lli%im, rank_lon-1, nplon, start, end) ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1. if ( (end-start+1) < max(1,iwidth) ) then write(gol,*)" Too much processors in X (longitude) direction:", nplon, iwidth, start, end call goErr status=1; TRACEBACK; return end if DistGrid%im_region = lli%im DistGrid%i_strt = start DistGrid%i_stop = end DistGrid%i_strt_halo = start-iwidth DistGrid%i_stop_halo = end+iwidth ! To think about it when/if needed: ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start call DistGrid_range(lli%jm, rank_lat-1, nplat, start, end) if ( (end-start+1) < max(1,iwidth) ) then write(gol,*)" Too much processors in Y (latitude) direction:", nplat, iwidth, start, end call goErr status=1; TRACEBACK; return end if DistGrid%jm_region = lli%jm DistGrid%j_strt = start DistGrid%j_stop = end DistGrid%j_strt_halo = start-iwidth DistGrid%j_stop_halo = end+iwidth !--------------------------------------------- ! geophysical grids !--------------------------------------------- !------ tile iml = DistGrid%i_stop - DistGrid%i_strt + 1 jml = DistGrid%j_stop - DistGrid%j_strt + 1 xb = lli%lon_deg(1) + ( DistGrid%i_strt - 1 ) * lli%dlon_deg yb = lli%lat_deg(1) + ( DistGrid%j_strt - 1 ) * lli%dlat_deg write(name, '("distributed grid on process ", i5.5)') rank call INIT( DistGrid%lli, xb, lli%dlon_deg, iml, yb, lli%dlat_deg, jml, status, name=name ) IF_NOTOK_RETURN(status=1) !------ world DistGrid%glbl_lli = lli !------ world meridional grid name = "merid " call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, lli%dlat_deg, lli%jm, status, name=name ) IF_NOTOK_RETURN(status=1) !--------------------------------------------- ! store shapes info of all PE on all PE (used only on root so far, but may ! become useful if we gather/scatter to/from non-root in the future) !--------------------------------------------- #ifdef MPI allocate(DistGrid%bounds(4,0:npes-1)) lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /) call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr) #endif !--------------------------------------------- ! PRINT (Debug) 2D process topology !--------------------------------------------- if(okdebug)then write(gol,*) '------------- world view ----------------------' ; call goPr write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr i1=min(maxcol,nplon) str1='...' if (i1==nplon) str1='' work=nplat/2 j1=min(work, maxrow) j2=max(nplat-maxrow+1, work+1) do k=nplat,j2,-1 write(gol,*) dist_map(1:i1,k),str1 ; call goPr enddo if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr do k=j1,1,-1 write(gol,*) dist_map(1:i1,k),str1 ; call goPr enddo write(gol,*) '' write(gol,*) '-----------------------------------------------' ; call goPr end if ! Done deallocate(dist_map) deallocate(dist_mapw) status = 0 END SUBROUTINE INIT_DISTGRID_FROM_LLI !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: DONE_DISTGRID ! ! !DESCRIPTION: deallocate distributed grid object elements !\\ !\\ ! !INTERFACE: ! SUBROUTINE DONE_DISTGRID( DistGrid, STATUS ) ! ! !USES: ! use grid, only : done ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(inout) :: DistGrid ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'Done_Distgrid' call done(DistGrid%lli, status) IF_NOTOK_RETURN(status=1) call done(DistGrid%lli_z, status) IF_NOTOK_RETURN(status=1) call done(DistGrid%glbl_lli, status) IF_NOTOK_RETURN(status=1) if (associated(DistGrid%bounds)) deallocate(DistGrid%bounds) status=0 END SUBROUTINE DONE_DISTGRID !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GET_DISTGRID ! ! !DESCRIPTION: provide quick access to object elements (bounds and grids), ! while keeping them private. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GET_DISTGRID(DistGrid, & i_strt, i_stop, i_strt_halo, i_stop_halo, & j_strt, j_stop, j_strt_halo, j_stop_halo, & i_wrld, j_wrld, & hasSouthPole, hasNorthPole, & hasSouthBorder, hasNorthBorder, & hasEastBorder, hasWestBorder, & lli, lli_z, global_lli ) ! ! !USES: ! use grid, only : assignment(=) ! to copy lli ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, optional :: i_strt, i_stop, i_strt_halo, i_stop_halo integer, optional :: j_strt, j_stop, j_strt_halo, j_stop_halo integer, optional :: i_wrld, j_wrld logical, optional :: hasSouthPole, hasNorthPole logical, optional :: hasSouthBorder, hasNorthBorder logical, optional :: hasWestBorder, hasEastBorder type(TllGridInfo), optional :: lli ! local Lat-Lon grid info type(TllGridInfo), optional :: global_lli ! global Lat-Lon grid info type(TllGridInfo), optional :: lli_z ! global zonal grid info ! ! !REMARKS: You must "call DONE(lli, stat)" if you request lli, once you ! are done(!) with it, to avoid memory leak. ! !EOP !------------------------------------------------------------------------ !BOC if (present(i_strt)) i_strt = DistGrid%i_strt if (present(i_stop)) i_stop = DistGrid%i_stop if (present(i_strt_halo)) i_strt_halo = DistGrid%i_strt_halo if (present(i_stop_halo)) i_stop_halo = DistGrid%i_stop_halo if (present(j_strt)) j_strt = DistGrid%j_strt if (present(j_stop)) j_stop = DistGrid%j_stop if (present(j_strt_halo)) j_strt_halo = DistGrid%j_strt_halo if (present(j_stop_halo)) j_stop_halo = DistGrid%j_stop_halo if (present(i_wrld)) i_wrld = DistGrid%im_region if (present(j_wrld)) j_wrld = DistGrid%jm_region if (present(hasSouthPole)) hasSouthPole = DistGrid%has_south_pole if (present(hasNorthPole)) hasNorthPole = DistGrid%has_north_pole if (present(hasSouthBorder)) hasSouthBorder = DistGrid%has_south_boundary if (present(hasNorthBorder)) hasNorthBorder = DistGrid%has_north_boundary if (present(hasEastBorder )) hasEastBorder = DistGrid%has_east_boundary if (present(hasWestBorder )) hasWestBorder = DistGrid%has_west_boundary if (present(lli)) lli = DistGrid%lli if (present(global_lli)) global_lli = DistGrid%glbl_lli if (present(lli_z)) lli_z = DistGrid%lli_z END SUBROUTINE GET_DISTGRID !EOC #ifdef MPI /* MPI TYPES */ !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GET_HALO_TYPE ! ! !DESCRIPTION: Returns derived MPI types needed for halo communications, ! and the start indices for the send & receive communications. ! Four of each are returned, one for each side of the domain, ! in the following order: West, North, East, South. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GET_HALO_TYPE( DistGrid, arr_shape, halo, datatype, srtype, ijsr, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: arr_shape(:) ! shape of local array integer, intent(in) :: halo ! halo size integer, intent(in) :: datatype ! basic MPI datatype ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: srtype(4) ! derived MPI datatypes for send/recv integer, intent(out) :: ijsr(4,4) ! start indices for send/recv integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) Not tested on all imaginable cases, but should work with any size ! GE 2, and any of the basic numerical MPI_xxx datatype ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'get_halo_type' integer :: nsslice, weslice, nstype, wetype ! MPI derived datatypes integer :: n, m, hstride ! sizes to build datatypes integer :: ndims, sz, i0, j0, i1, j1 integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vstride sz = size(arr_shape) ! collapse third and above dimensions ndims = 1 if (sz > 2) ndims = product(arr_shape(3:)) ! strides CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr) IF_NOTOK_MPI(status=1) hstride = arr_shape(1) vstride = arr_shape(1)*arr_shape(2)*sizeoftype ! size of data slice to carry n = DistGrid%I_STOP - DistGrid%I_STRT + 1 m = DistGrid%J_STOP - DistGrid%J_STRT + 1 ! Type for North and South halo regions ! -------------------------------------- ! halo lines of lenght N, separated by hstride call MPI_TYPE_VECTOR (halo, n, hstride, datatype, nsslice, ierr) IF_NOTOK_MPI(status=1) ! stack 3rd (and above) dimension if any if (ndims == 1) then nstype = nsslice else ! note : also works with ndims=1 call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, nsslice, nstype, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(nsslice, ierr) IF_NOTOK_MPI(status=1) end if call MPI_TYPE_COMMIT (nstype, ierr) IF_NOTOK_MPI(status=1) ! Type for West and East halo regions ! ------------------------------------ ! M lines of lenght 'halo', separated by hstride call MPI_TYPE_VECTOR (m, halo, hstride, datatype, weslice, ierr) IF_NOTOK_MPI(status=1) ! stack 3rd (and above) dimension if (ndims == 1) then wetype = weslice else ! note : also works with ndims=1 call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, weslice, wetype, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(weslice, ierr) IF_NOTOK_MPI(status=1) end if call MPI_TYPE_COMMIT (wetype, ierr) IF_NOTOK_MPI(status=1) ! Buffers anchors ! ---------------- ! Assuming that neighbors are stored in (West, North, East, South) order i0 = DistGrid%i_strt j0 = DistGrid%j_strt i1 = DistGrid%i_stop j1 = DistGrid%j_stop srtype = (/ wetype, nstype, wetype, nstype /) ijsr(:,1) = (/ i0, i0, i1+1-halo, i0 /) ! i-start location of buffer to send ijsr(:,2) = (/ j0, j1-halo+1, j0, j0 /) ! j-start location of buffer to send ijsr(:,3) = (/ i0-halo, i0, i1+1, i0 /) ! i-start location of buffer to receive ijsr(:,4) = (/ j0, j1+1, j0, j0-halo /) ! j-start location of buffer to receive status=0 END SUBROUTINE GET_HALO_TYPE !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GET_INTERIOR_TYPE ! ! !DESCRIPTION: Returns derived MPI types that describe the interior domains ! needed for collective communications. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GET_INTERIOR_TYPE( DistGrid, shp, datatype, linterior, ginterior, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: shp(:) ! shape of local array integer, intent(in) :: datatype ! basic MPI datatype ! ! !OUTPUT PARAMETERS: ! ! derived MPI datatypes describing distributed interior domains: integer, intent(out) :: ginterior(npes-1) ! within global array (used by root, as many as NPES-1) integer, intent(out) :: linterior ! within local array (used by non-root) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) input must be checked before by calling CHECK_DIST_ARR first ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'get_interior_type' integer :: gslice, lslice ! intermediate datatypes integer :: n, m ! sizes to build datatypes integer :: hlstride, hgstride ! strides to build datatypes integer :: stack, sz, klm integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vlstride, vgstride ! init : number of dimensions, default value sz = size(shp) ginterior = MPI_DATATYPE_NULL linterior = MPI_DATATYPE_NULL ! collapse third and above dimensions stack = 1 if (sz > 2) stack = product(shp(3:)) ! size of data slice to carry n = DistGrid%I_STOP - DistGrid%I_STRT + 1 m = DistGrid%J_STOP - DistGrid%J_STRT + 1 CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr) IF_NOTOK_MPI(status=1) ! horizontal global stride (in data) hgstride = DistGrid%im_region ! vertical global stride (in byte) vgstride = DistGrid%im_region * DistGrid%jm_region * sizeoftype ! local strides (may be different than n and n*m because of halo) hlstride = shp(1) ! in data vlstride = shp(1)*shp(2)*sizeoftype ! in byte if ( isRoot ) then do klm=1,npes-1 ! horizontal chunk is M lines of lenght N, separated by a global ! stride n = DistGrid%bounds(2,klm) - DistGrid%bounds(1,klm) + 1 m = DistGrid%bounds(4,klm) - DistGrid%bounds(3,klm) + 1 call MPI_TYPE_VECTOR (m, n, hgstride, datatype, gslice, ierr) IF_NOTOK_MPI(status=1) ! stack 3rd and above dimensions if (stack == 1) then ginterior(klm) = gslice else ! note : also works with stack=1 call MPI_TYPE_CREATE_HVECTOR(stack, 1, vgstride, gslice, ginterior(klm), ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(gslice, ierr) IF_NOTOK_MPI(status=1) end if call MPI_TYPE_COMMIT (ginterior(klm), ierr) IF_NOTOK_MPI(status=1) end do else ! local interior is basically M lines of lenght N, separated by a local ! stride call MPI_TYPE_VECTOR (m, n, hlstride, datatype, lslice, ierr) IF_NOTOK_MPI(status=1) ! stack 3rd and above dimensions if (stack == 1) then linterior = lslice else ! note : also works with stack=1 call MPI_TYPE_CREATE_HVECTOR (stack, 1, vlstride, lslice, linterior, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(lslice, ierr) IF_NOTOK_MPI(status=1) end if call MPI_TYPE_COMMIT (linterior, ierr) IF_NOTOK_MPI(status=1) end if status=0 END SUBROUTINE GET_INTERIOR_TYPE !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: FREE_DERIVED_TYPE ! ! !DESCRIPTION: free all MPI derived datatypes in a vector !\\ !\\ ! !INTERFACE: ! SUBROUTINE FREE_DERIVED_TYPE( datatype ) ! ! !INPUT/OUTPUT PARAMETERS: ! integer, intent(inout) :: datatype(:) ! set of derived MPI datatypes ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC integer :: i, j integer :: res(size(datatype)) ! hold unique elements integer :: k ! number of unique elements ! Get list of unique handle(s) ! ---------------------------- k = 1 res(1) = 1 outer: do i=2,size(datatype) ! look for a match do j=1,k if (datatype(res(j)) == datatype(i)) cycle outer ! match end do ! no match : add it to the list k = k + 1 res(k) = i end do outer ! Free handle(s) ! --------------------------- do i=1,k if (datatype(res(i)) /= MPI_DATATYPE_NULL) & call MPI_TYPE_FREE(datatype(res(i)), ierr) end do END SUBROUTINE FREE_DERIVED_TYPE !EOC #endif /* MPI TYPES */ !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: CHECK_DIST_ARR ! ! !DESCRIPTION: Check that the shape of a local array correspond to an array ! distributed on current process. This check is done on the ! first 2 dimensions only, along which the domain ! decomposition is done. ! ! Optionally: check shape of a global array. If arrays are 3D ! or more, the 3rd and above dimensions of local and global ! arrays are also compared. (This becomes "2D or more" for I- ! and J-Slices.) !\\ !\\ ! !INTERFACE: ! SUBROUTINE CHECK_DIST_ARR( DistGrid, shp, halo, status, glbl_shp, jband, iband, has_global ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: shp(:) ! shape of local array integer, intent(in) :: halo ! halo size ! integer, intent(in), optional :: glbl_shp(:) ! shape of global array logical, intent(in), optional :: jband, iband ! is it a zonal or meridional slice? logical, intent(in), optional :: has_global ! current proc hold global array (default is root) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: i-band refers to a unique i value, i.e. a meridional dataset. ! j-band refers to a unique j value, i.e. a zonal dataset. ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'check_dist_arr ' integer :: n, m, sz integer, allocatable :: glbsz(:) logical :: x_jband, x_iband, hold_global status = 0 ! check inputs x_jband=.false. x_iband=.false. if(present(iband))x_iband=iband if(present(jband))x_jband=jband if(x_iband.and.x_jband)then write (gol,*) "CHECK_DIST_ARR: cannot have **both** I- and J-Slices" ; call goErr TRACEBACK; status=1; return end if ! by default global array is expected on root hold_global = isRoot if (present(has_global)) hold_global=has_global ! required size w/o halo n = DistGrid%I_STOP - DistGrid%I_STRT + 1 m = DistGrid%J_STOP - DistGrid%J_STRT + 1 ! check shape of array if (x_iband) then if (shp(1) /= m+2*halo) then write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr write (gol,'(" local array : ", i4)') shp(1) ; call goErr write (gol,'(" should be : ", i4)') m+2*halo ; call goErr write (gol,'(" with J-stop : ", i4)') DistGrid%J_STOP ; call goErr write (gol,'(" J-start: ", i4)') DistGrid%J_STRT ; call goErr TRACEBACK; status=1; return end if else if (x_jband) then if (shp(1) /= n+2*halo) then write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr write (gol,'(" local array : ",2i4)') shp(1) ; call goErr write (gol,'(" should be : ",2i4)') n+2*halo ; call goErr write (gol,'(" with J-stop : ", i4)') DistGrid%I_STOP ; call goErr write (gol,'(" J-start: ", i4)') DistGrid%I_STRT ; call goErr TRACEBACK; status=1; return end if else if ((shp(1) /= n+2*halo).or.(shp(2) /= m+2*halo)) then write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr write (gol,'(" local array : ",2i4)') shp(1:2) ; call goErr write (gol,'(" should be : ",2i4)') n+2*halo, m+2*halo ; call goErr write (gol,'(" w/ I/J-stop : ", i4)') DistGrid%I_STOP, DistGrid%J_STOP ; call goErr write (gol,'(" I/J-start: ", i4)') DistGrid%I_STRT, DistGrid%J_STRT ; call goErr TRACEBACK; status=1; return end if end if ! check shape of global array on root if (present(glbl_shp) .and. hold_global) then sz = size(shp) if (sz /= size(glbl_shp)) then write (gol,'("CHECK_DIST_ARR : global and local arrays have different rank:")') ; call goErr write (gol,'(" local rank : ", i4)') sz ; call goErr write (gol,'(" global rank : ", i4)') size(glbl_shp) ; call goErr TRACEBACK; status=1; return end if allocate(glbsz(sz)) glbsz = shp if (x_iband) then glbsz(1) = DistGrid%jm_region else if (x_jband) then glbsz(1) = DistGrid%im_region else glbsz(1:2) = (/ DistGrid%im_region, DistGrid%jm_region /) end if if ( any (glbl_shp /= glbsz) ) then write (gol,'("CHECK_DIST_ARR : global array has wrong shape:")') ; call goErr write (gol,*) " shape(global) : ", glbl_shp ; call goErr write (gol,*) " im [and/or] jm_region/-extra dims- : ", glbsz ; call goErr TRACEBACK; status=1; return end if deallocate(glbsz) end if END SUBROUTINE CHECK_DIST_ARR !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_IBAND_1D_R ! ! !DESCRIPTION: update halo cells of a vector distributed along latitudes !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_IBAND_1D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%j_strt-halo:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 7 Jan 2016 - Ph. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_iband_1d_r' integer :: j0, j1 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,4), req(4) integer :: k, sz(1), tag_snd(2), tag_rcv(2) integer :: ijsr(2,2), nghbr(2) ! check input if ( halo == 0 ) return sz = shape(dist_array) j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! degenerate case if (npe_lat==1) return if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, iband=.true. ) IF_NOTOK_RETURN(status=1) end if ! Buffers anchors ! ---------------- ijsr(:,1) = (/ j1-halo+1, j0 /) ! j-start location of north and south buffers to send ijsr(:,2) = (/ j1+1, j0-halo /) ! j-start location of north and south buffers to receive ! Communicate ! ---------------- ! only south and north tag_snd = (/2,4/) tag_rcv = (/4,2/) nghbr = (/2,4/) neigh : do k=1,2 call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr) end do neigh call MPI_WAITALL(4, req, stat, ierr) IF_NOTOK_MPI(status=1) #endif status = 0 END SUBROUTINE UPDATE_HALO_IBAND_1D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_JBAN_1D_R ! ! !DESCRIPTION: update halo cells of a decomposed zonal vector !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_JBAND_1D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 20 Feb 2012 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_jband_1d_r' integer :: i0, i1 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,4), req(4) integer :: k, sz(1), tag_snd(2), tag_rcv(2) integer :: ijsr(2,2), nghbr(2) ! check input if ( halo == 0 ) return sz = shape(dist_array) i0 = DistGrid%i_strt i1 = DistGrid%i_stop ! degenerate case if (npe_lon==1) then if (DistGrid%is_periodic) then dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1) dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1) end if return end if if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. ) IF_NOTOK_RETURN(status=1) end if ! Buffers anchors ! ---------------- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive ! Communicate ! ---------------- ! only east and west tag_snd = (/1,3/) tag_rcv = (/3,1/) nghbr = (/1,3/) neigh : do k=1,2 call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr) end do neigh call MPI_WAITALL(4, req, stat, ierr) IF_NOTOK_MPI(status=1) #else if ((halo/=0).and.DistGrid%is_periodic) then i0 = DistGrid%i_strt i1 = DistGrid%i_stop dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1) dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_JBAND_1D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_JBAND_2D_R ! ! !DESCRIPTION: update halo cells of a decomposed zonal 2d slice !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_JBAND_2D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 20 Feb 2012 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_jband_2d_r' integer :: i0, i1 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype integer :: k, sz(2), tag_snd(2), tag_rcv(2) integer :: ijsr(2,2), nghbr(2) status = 0 ! check input if ( halo == 0 ) return sz = shape(dist_array) i0 = DistGrid%i_strt i1 = DistGrid%i_stop ! degenerate case if (npe_lon==1) then if (DistGrid%is_periodic) then dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:) dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:) end if return end if if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. ) IF_NOTOK_RETURN(status=1) end if ! Buffers anchors ! ---------------- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive ! pack data !---------- call MPI_TYPE_VECTOR (sz(2), halo, sz(1), my_real, wetype, ierr) call MPI_TYPE_COMMIT (wetype, ierr) ! Communicate ! ---------------- tag_snd = (/1,3/) tag_rcv = (/3,1/) nghbr = (/1,3/) neigh : do k=1,2 ! only east and west call MPI_ISEND( dist_array(ijsr(k,1),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array(ijsr(k,2),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr) end do neigh call MPI_WAITALL(4, req, stat, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(wetype, ierr) #else if ((halo/=0).and.DistGrid%is_periodic) then i0 = DistGrid%i_strt i1 = DistGrid%i_stop dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:) dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_JBAND_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_JBAND_3D_R ! ! !DESCRIPTION: update halo cells of a decomposed zonal 3d slice !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_JBAND_3D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 20 Feb 2012 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_jband_3d_r' integer :: i0, i1 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype integer :: k, sz(3), tag_snd(2), tag_rcv(2) integer :: ijsr(2,2), nghbr(2) status = 0 ! check input if ( halo == 0 ) return sz = shape(dist_array) i0 = DistGrid%i_strt i1 = DistGrid%i_stop ! degenerate case if (npe_lon==1) then if (DistGrid%is_periodic) then dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:) dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:) end if return end if if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. ) IF_NOTOK_RETURN(status=1) end if ! Buffers anchors ! ---------------- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive ! pack data !---------- call MPI_TYPE_VECTOR (sz(2)*sz(3), halo, sz(1), my_real, wetype, ierr) call MPI_TYPE_COMMIT (wetype, ierr) ! Communicate ! ---------------- tag_snd = (/1,3/) tag_rcv = (/3,1/) nghbr = (/1,3/) neigh : do k=1,2 ! only east and west call MPI_ISEND( dist_array(ijsr(k,1),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array(ijsr(k,2),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr) end do neigh call MPI_WAITALL(4, req, stat, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(wetype, ierr) #else if ((halo/=0).and.DistGrid%is_periodic) then i0 = DistGrid%i_strt i1 = DistGrid%i_stop dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:) dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_JBAND_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_JBAND_4D_R ! ! !DESCRIPTION: update halo cells of a decomposed zonal 4d slice !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_JBAND_4D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:,:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 20 Feb 2012 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_jband_4d_r' integer :: i0, i1 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype integer :: k, sz(4), tag_snd(2), tag_rcv(2) integer :: ijsr(2,2), nghbr(2) status = 0 ! check input if ( halo == 0 ) return sz = shape(dist_array) i0 = DistGrid%i_strt i1 = DistGrid%i_stop ! degenerate case if (npe_lon==1) then if (DistGrid%is_periodic) then dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:) dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:) end if return end if if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. ) IF_NOTOK_RETURN(status=1) end if ! Buffers anchors ! ---------------- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive ! pack data !---------- call MPI_TYPE_VECTOR (sz(2)*sz(3)*sz(4), halo, sz(1), my_real, wetype, ierr) call MPI_TYPE_COMMIT (wetype, ierr) ! Communicate ! ---------------- tag_snd = (/1,3/) tag_rcv = (/3,1/) nghbr = (/1,3/) neigh : do k=1,2 ! only east and west call MPI_ISEND( dist_array(ijsr(k,1),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array(ijsr(k,2),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr) end do neigh call MPI_WAITALL(4, req, stat, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(wetype, ierr) #else if ((halo/=0).and.DistGrid%is_periodic) then i0 = DistGrid%i_strt i1 = DistGrid%i_stop dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:) dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_JBAND_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_2D_R ! ! !DESCRIPTION: update halo cells of a distributed 2D real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_2D_R( DistGrid, dist_array, halo, status, i_only, j_only ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) logical, optional, intent(in) :: i_only ! update East & West halo only logical, optional, intent(in) :: j_only ! update North & South halo only ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_2d_r' integer :: i1, i2, j1, j2 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,8), req(8) integer :: k, sz(2), tag_snd(4), tag_rcv(4) integer :: srtype(4), ijsr(4,4) ! check input if ( halo == 0 ) return sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status ) IF_NOTOK_RETURN(status=1) end if ! get types and I/J start CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status ) IF_NOTOK_RETURN(status=1) ! Communicate tag_snd = (/1,2,3,4/) tag_rcv = (/3,4,1,2/) neigh : do k=1,4 call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), & tag_rcv(k), localComm, req(k+4), ierr) end do neigh call MPI_WAITALL(8, req, stat, ierr) IF_NOTOK_MPI(status=1) call FREE_DERIVED_TYPE( srtype ) #else if ((halo/=0).and.DistGrid%is_periodic) then i1 = DistGrid%i_strt i2 = DistGrid%i_stop j1 = DistGrid%j_strt j2 = DistGrid%j_stop dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2) dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_2D_I ! ! !DESCRIPTION: update halo cells of a distributed 2D integer array !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_2D_I( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo integer, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) not tested yet, but the version for 'real' has been... ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_2D_i' integer :: i1, i2, j1, j2 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(2) integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4) ! check input if ( halo == 0 ) return sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status ) IF_NOTOK_RETURN(status=1) end if ! get types and I/J start CALL GET_HALO_TYPE( distGrid, sz, halo, MPI_INTEGER, srtype, ijsr, status ) IF_NOTOK_RETURN(status=1) ! Communicate tag_snd = (/1,2,3,4/) tag_rcv = (/3,4,1,2/) neigh : do k=1,4 call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), & tag_rcv(k), localComm, req(k+4), ierr) end do neigh call MPI_WAITALL(8, req, stat, ierr) call FREE_DERIVED_TYPE( srtype ) #else if ((halo/=0).and.DistGrid%is_periodic) then i1 = DistGrid%i_strt i2 = DistGrid%i_stop j1 = DistGrid%j_strt j2 = DistGrid%j_stop dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2) dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_2D_I !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_3D_R ! ! !DESCRIPTION: update halo cells of a distributed 3D real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_3D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_3d_r' integer :: i1, i2, j1, j2 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(3) integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4) ! check input if ( halo == 0 ) return sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status ) IF_NOTOK_RETURN(status=1) end if ! get types and I/J start CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status ) IF_NOTOK_RETURN(status=1) ! Communicate tag_snd = (/1,2,3,4/) tag_rcv = (/3,4,1,2/) neigh : do k=1,4 call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, & req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), localComm, & req(k+4), ierr) end do neigh call MPI_WAITALL(8, req, stat, ierr) IF_NOTOK_MPI(status=1) call FREE_DERIVED_TYPE( srtype ) #else if ((halo/=0).and.DistGrid%is_periodic) then i1 = DistGrid%i_strt i2 = DistGrid%i_stop j1 = DistGrid%j_strt j2 = DistGrid%j_stop dist_array(i1-halo:i1-1,j1:j2,:) = dist_array(i2-halo+1:i2,j1:j2,:) dist_array(i2+1:i2+halo,j1:j2,:) = dist_array(i1:i1+halo-1,j1:j2,:) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: UPDATE_HALO_4D_R ! ! !DESCRIPTION: update halo cells of a distributed 4D real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE UPDATE_HALO_4D_R( DistGrid, dist_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'update_halo_4D_r' integer :: i1, i2, j1, j2 #ifdef MPI integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(4) integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4) ! check input if ( halo == 0 ) return sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status ) IF_NOTOK_RETURN(status=1) end if ! get types and I/J start CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status ) IF_NOTOK_RETURN(status=1) ! Communicate tag_snd = (/1,2,3,4/) tag_rcv = (/3,4,1,2/) neigh : do k=1,4 call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), & localComm, req(k), ierr) call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), & localComm, req(k+4), ierr) end do neigh call MPI_WAITALL(8, req, stat, ierr) IF_NOTOK_MPI(status=1) call FREE_DERIVED_TYPE( srtype ) #else if ((halo/=0).and.DistGrid%is_periodic) then i1 = DistGrid%i_strt i2 = DistGrid%i_stop j1 = DistGrid%j_strt j2 = DistGrid%j_stop dist_array(i1-halo:i1-1,j1:j2,:,:) = dist_array(i2-halo+1:i2,j1:j2,:,:) dist_array(i2+1:i2+halo,j1:j2,:,:) = dist_array(i1:i1+halo-1,j1:j2,:,:) end if #endif status = 0 END SUBROUTINE UPDATE_HALO_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_2D_R ! ! !DESCRIPTION: gather local 2D arrays into a global 2D array !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_2D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) I have not use mpi_gatherv because of varying *receiving* size ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_2d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop ) status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(2) status=0 ! check input, get derived types !------------------------------- sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- GATHER ------------- if ( isRoot ) then ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1) ! get remaining chunks from other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, & localComm, stat, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE GATHER_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_2D_I ! ! !DESCRIPTION: gather local 2D arrays into a global 2D array (integer version) !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_2D_I( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo integer, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: glbl_array(:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_2d_i' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop ) status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(2) status=0 ! check input, get derived types !------------------------------- sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, MPI_INTEGER, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- GATHER ------------- if ( isRoot ) then ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1) ! get remaining chunks from other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, & localComm, stat, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE GATHER_2D_I !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_3D_R ! ! !DESCRIPTION: gather local 3D arrays into a global 3D array !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_3D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_3d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, : ) status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(3) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- GATHER ------------- if ( isRoot ) then ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop glbl_array(i0:i1,j0:j1,:) = dist_array(i0:i1,j0:j1,:) ! get remaining chunks from other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_RECV( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, & localComm, stat, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_SEND( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE GATHER_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_4D_R ! ! !DESCRIPTION: gather local 4D arrays into a global 4D array !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_4D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) global array has to really be global on root only ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_4d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :, :) status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(4) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- GATHER ------------- if ( isRoot ) then ! RECV ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop glbl_array(i0:i1,j0:j1,:,:) = dist_array(i0:i1,j0:j1,:,:) ! send remaining chunks to other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_RECV( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, & localComm, stat, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else !SEND call MPI_SEND( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE GATHER_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_5D_R ! ! !DESCRIPTION: gather local 5D arrays into a global 5D array !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_5D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:,:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) global array has to really be global on root only ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_5d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :,:,:) status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(5) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- GATHER ------------- if ( isRoot ) then ! RECV ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop glbl_array(i0:i1,j0:j1,:,:,:) = dist_array(i0:i1,j0:j1,:,:,:) ! send remaining chunks to other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_RECV( glbl_array(i,j,1,1,1), 1, ginterior(klm), klm, 1, & localComm, stat, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else !SEND call MPI_SEND( dist_array(i0,j0,1,1,1), 1, linterior, root, 1, localComm, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE GATHER_5D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_2D_R ! ! !DESCRIPTION: scatter a 2D global real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_2D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:,:) integer, intent(in) :: halo ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: exactly the same as GATHER_2D_R, but inverting send/recv ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_2d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(2) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- SCATTER ------------- if ( isRoot ) then ! send ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop dist_array(i0:i1,j0:j1) = glbl_array(i0:i1,j0:j1) ! send remaining chunks to other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_SSEND( glbl_array(i,j), 1, ginterior(klm), klm, klm, localComm, ierr) IF_NOTOK_MPI(status=1) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_COMM_RANK(localComm, klm, ierr) IF_NOTOK_MPI(status=1) call MPI_RECV( dist_array(i0,j0), 1, linterior, root, klm, localComm, stat, ierr) IF_NOTOK_MPI(status=1) call MPI_TYPE_FREE(linterior, ierr) IF_NOTOK_MPI(status=1) end if #endif END SUBROUTINE SCATTER_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_3D_R ! ! !DESCRIPTION: scatter 3D global real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_3D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: glbl_array(:,:,:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: global array has to really be global on root only ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_3d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(3) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- SCATTER ------------- if ( isRoot ) then ! send ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop dist_array(i0:i1,j0:j1,:) = glbl_array(i0:i1,j0:j1,:) ! send remaining chunks to other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_SSEND( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, localComm, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_RECV( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, stat, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE SCATTER_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_4D_R ! ! !DESCRIPTION: scatter 4D real array !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_4D_R( DistGrid, dist_array, glbl_array, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: glbl_array(:,:,:,:) ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_4d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop,:,:) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1) integer :: i, j, klm, i0, j0, i1, j1, sz(4) status=0 ! ------- Check input & get derived types sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array)) IF_NOTOK_RETURN(status=1) end if CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status ) IF_NOTOK_RETURN(status=1) i0 = DistGrid%i_strt j0 = DistGrid%j_strt ! ------- SCATTER ------------- if ( isRoot ) then ! send ! get first chunk locally i1 = DistGrid%i_stop j1 = DistGrid%j_stop dist_array(i0:i1,j0:j1,:,:) = glbl_array(i0:i1,j0:j1,:,:) ! send remaining chunks to other pes do klm=1,npes-1 i = DistGrid%bounds(1,klm) j = DistGrid%bounds(3,klm) call MPI_SSEND( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, localComm, ierr) end do call FREE_DERIVED_TYPE( ginterior ) else call MPI_RECV( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, stat, ierr) call MPI_TYPE_FREE(linterior, ierr) end if #endif END SUBROUTINE SCATTER_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_IBAND_1D_R ! ! !DESCRIPTION: scatter a meridional real vector (1D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:) integer, optional, intent(in) :: iref ! to find targets, default=1 ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%j_strt:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: 1D version, along J index, of scatter_2d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_iband_1d_r' #ifndef MPI dist_array( DistGrid%j_strt:DistGrid%j_stop ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE) integer :: x_iref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t status=0 ! ------- Check inputs x_iref=1 if(present(iref)) x_iref=iref sz = shape(dist_array) i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), iband=.true.) IF_NOTOK_RETURN(status=1) end if if ( isRoot ) then ! local case if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1) = glbl_array(j0:j1) ! send remaining chunks to other pes do klm=1,npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! is klm a target processor? if((x_irefi1t))cycle n=j1t-j0t+1 call MPI_SSEND( glbl_array(j0t), n, my_real, klm, 1, localComm, ierr) end do else ! are we on a target processor? if((x_irefi1))return n=j1-j0+1 call MPI_RECV( dist_array(j0), n, my_real, root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_IBAND_1D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_JBAND_1D_R ! ! !DESCRIPTION: scatter a zonal real vector (1D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_JBAND_1D_R( DistGrid, dist_array, glbl_array, status, jref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:) integer, optional, intent(in) :: jref ! to find targets, default=1 ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: 1D version, along I index, of scatter_2d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_jband_1d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE) integer :: x_jref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t status=0 ! ------- Check inputs x_jref=1 if(present(jref)) x_jref=jref sz = shape(dist_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), jband=.true.) IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then ! local case if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1) = glbl_array(i0:i1) ! send remaining chunks to other pes do klm=1,npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! is klm a target processor? if((x_jrefj1t))cycle n=i1t-i0t+1 call MPI_SSEND( glbl_array(i0t), n, my_real, klm, 1, localComm, ierr) end do else ! are we on a target processor? if((x_jrefj1))return n=i1-i0+1 call MPI_RECV( dist_array(i0), n, my_real, root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_JBAND_1D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_JBAND_2D_R ! ! !DESCRIPTION: scatter a zonal slice (2D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:,:) integer, optional, intent(in) :: jref ! to find targets, default=1 ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: 2D version, along I index, of scatter_3d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_jband_2d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop,: ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: x_jref, n, klm, sz(2), gsz(2) status=0 ! ------- Check inputs x_jref=1 if(present(jref)) x_jref=jref sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.) IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then !local case if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:) = glbl_array(i0:i1,:) ! send remaining chunks to other pes do klm=1,npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a target processor, pack and send if((x_jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_SSEND( glbl_array(i0t,1), 1, subarr, klm, 1, localComm, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a target processor? if((x_jrefj1))return n=i1-i0+1 call MPI_RECV( dist_array(i0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_JBAND_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_IBAND_2D_R ! ! !DESCRIPTION: scatter a meridional real array (2D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_IBAND_2D_R( DistGrid, dist_array, glbl_array, status, iref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:,:) integer, optional, intent(in) :: iref ! to find targets, default=1 ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%j_strt:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering ! ! !REMARKS: 2D version, along J index, of scatter_3d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_iband_2d_r' #ifndef MPI dist_array( DistGrid%j_strt:DistGrid%j_stop,: ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: x_iref, n, klm, sz(2), gsz(2) status=0 ! ------- Check inputs x_iref=1 if(present(iref)) x_iref=iref sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.) IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then ! local case if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1,:) = glbl_array(j0:j1,:) ! send remaining chunks to other pes do klm=1,npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! is klm a target processor? if((x_irefi1t))cycle n=j1t-j0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_SSEND( glbl_array(j0t,1), 1, subarr, klm, 1, localComm, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a target processor? if((x_irefi1))return n=j1-j0+1 call MPI_RECV( dist_array(j0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_IBAND_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_JBAND_3D_R ! ! !DESCRIPTION: scatter a zonal slice (2D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:,:,:) integer, optional, intent(in) :: jref ! to find targets, default=1 logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! ! !REMARKS: 2D version, along I index, of scatter_3d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_jband_3d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop,:,: ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: x_jref, n, klm, sz(3), gsz(3), slab, tgt_root integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride logical :: selectRoot status=0 ! ------- Check inputs x_jref=1 if(present(jref)) x_jref=jref i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! by default scatter from global root selectRoot = isRoot tgt_root = root if (present(rowcom)) then if (rowcom) then selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1) tgt_root = RowRootId endif endif sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot) IF_NOTOK_RETURN(status=1) end if ! ------- SEND/RECV ------------- if ( selectRoot ) then !local case if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:) = glbl_array(i0:i1,:,:) ! send remaining chunks to other pes do klm=0,npes-1 if (klm==myid) cycle ! skip local case i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a target processor, pack and send if((x_jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr) CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr) stride = gsz(1)*gsz(2)*sizeoftype call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr) call MPI_TYPE_FREE (slab, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_SSEND( glbl_array(i0t,1,1), 1, subarr, klm, 1, localComm, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a target processor? if((x_jrefj1))return n=i1-i0+1 call MPI_RECV( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_JBAND_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: SCATTER_JBAND_4D_R ! ! !DESCRIPTION: scatter a zonal slice (4D) from root !\\ !\\ ! !INTERFACE: ! SUBROUTINE SCATTER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: glbl_array(:,:,:,:) integer, optional, intent(in) :: jref ! to find targets, default=1 logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: dist_array(DistGrid%i_strt:,:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 17 Feb 2015 - Ph. Le Sager - v0 ! ! !REMARKS: 2D version, along I index, of scatter_4d_r ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'scatter_jband_4d_r' #ifndef MPI dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:,: ) = glbl_array status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: x_jref, n, klm, sz(4), gsz(4), slab, tgt_root integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride logical :: selectRoot status=0 ! ------- Check inputs x_jref=1 if(present(jref)) x_jref=jref i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! by default scatter from global root selectRoot = isRoot tgt_root = root if (present(rowcom)) then if (rowcom) then selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1) tgt_root = RowRootId endif endif sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot) IF_NOTOK_RETURN(status=1) end if ! ------- SEND/RECV ------------- if ( selectRoot ) then !local case if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:,:) = glbl_array(i0:i1,:,:,:) ! send remaining chunks to other pes do klm=0,npes-1 if (klm==myid) cycle ! skip local case i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a target processor, pack and send if((x_jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr) CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr) stride = gsz(1)*gsz(2)*sizeoftype call MPI_TYPE_CREATE_HVECTOR (sz(3)*sz(4), 1, stride, slab, subarr, ierr) call MPI_TYPE_FREE (slab, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_SSEND( glbl_array(i0t,1,1,1), 1, subarr, klm, 1, localComm, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a target processor? if((x_jrefj1))return n=i1-i0+1 call MPI_RECV( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, 1, localComm, stat, ierr) end if #endif END SUBROUTINE SCATTER_JBAND_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_JBAND_2D_R ! ! !DESCRIPTION: gather a zonal slice (2D) on root. For 2D arrays, with first ! dimension distributed along I (making it a J-band), and the ! other dimension is *not* distributed along J. For example: ! [i1:i2, lev], or [i1:i2, trac] !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: dist_array(DistGrid%i_strt:,:) integer, intent(in) :: jref ! to find sources ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_jband_2d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,: ) status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: n, klm, sz(2), gsz(2) status=0 ! ------- Check inputs sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.) IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then ! local case if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:) = dist_array(i0:i1,:) ! receive remaining chunks from other pes do klm=1, npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a source processor, receive from it if((jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_RECV( glbl_array(i0t,1), 1, subarr, klm, jref, localComm, stat, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a src processor? if((jrefj1))return n=i1-i0+1 call MPI_SEND( dist_array(i0,1), n*sz(2), my_real, root, jref, localComm, ierr) end if #endif END SUBROUTINE GATHER_JBAND_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_JBAND_3D_R ! ! !DESCRIPTION: Gather a zonal slab (3D) on root or rowroot(jref) [i.e. the ! root of the row of procs]. ! Local arrays [i1:i2, a:b, c:d] are gathered into the root ! proc as [1:im, 1:b-a+1, 1:d-c+1]. Caller has to ensure that at least ! **ALL** the ROW procs call this routine, plus root if needed. ! No halo possible yet. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: dist_array(DistGrid%i_strt:,:,:) integer, intent(in) :: jref ! to find sources (defines the row we want) logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: use in budget for gathering fluxes, advect_cfl ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_jband_3d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:) status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: n, klm, sz(3), gsz(3), slab, tgt_root integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride logical :: selectRoot status=0 ! ------- Check inputs i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! by default gather into global root selectRoot = isRoot tgt_root = root if (present(rowcom)) then if (rowcom) then selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1) tgt_root = RowRootId endif endif sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot) IF_NOTOK_RETURN(status=1) end if ! ------- SEND/RECV ------------- if ( selectRoot ) then ! local case if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:) = dist_array(i0:i1,:,:) ! receive remaining chunks from other pes do klm=0,npes-1 if (klm==myid) cycle ! skip local case i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a source processor, receive from it if((jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr) CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr) stride = gsz(1)*gsz(2)*sizeoftype call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr) call MPI_TYPE_FREE (slab, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_RECV( glbl_array(i0t,1,1), 1, subarr, klm, jref, localComm, stat, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a src processor? if((jrefj1))return n=i1-i0+1 call MPI_SEND( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, jref, localComm, ierr) end if #endif END SUBROUTINE GATHER_JBAND_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_JBAND_4D_R ! ! !DESCRIPTION: Gather a zonal slab (4D) on root or rowroot(jref) [i.e. the ! root of the row of procs]. ! Local arrays [i1:i2, a:b, c:d, e:f] are gathered into the root ! proc as [1:im, 1:b-a+1, 1:d-c+1, 1:f-e+1]. Caller has to ! ensure that at least **ALL** the ROW procs call this routine, ! plus root if needed. ! No halo possible yet. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: dist_array(DistGrid%i_strt:,:,:,:) integer, intent(in) :: jref ! to find sources (defines the row we want) logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 17 Feb 2015 - Ph. Le Sager - v0 ! ! !REMARKS: use in advectx ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_jband_4d_r' #ifndef MPI glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:,:) status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: n, klm, sz(4), gsz(4), slab, tgt_root, stack integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride logical :: selectRoot status=0 ! ------- Check inputs i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! by default gather into global root selectRoot = isRoot tgt_root = root if (present(rowcom)) then if (rowcom) then selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1) tgt_root = RowRootId endif endif sz = shape(dist_array) gsz = shape(glbl_array) stack = sz(3)*sz(4) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot) IF_NOTOK_RETURN(status=1) end if ! ------- SEND/RECV ------------- if ( selectRoot ) then ! local case if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:,:) = dist_array(i0:i1,:,:,:) ! receive remaining chunks from other pes do klm=0,npes-1 if (klm==myid) cycle ! skip local case i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a source processor, receive from it if((jrefj1t))cycle n=i1t-i0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr) CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr) stride = gsz(1)*gsz(2)*sizeoftype call MPI_TYPE_CREATE_HVECTOR (stack, 1, stride, slab, subarr, ierr) call MPI_TYPE_FREE (slab, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_RECV( glbl_array(i0t,1,1,1), 1, subarr, klm, jref, localComm, stat, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a src processor? if((jrefj1))return n=i1-i0+1 call MPI_SEND( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, jref, localComm, ierr) end if #endif END SUBROUTINE GATHER_JBAND_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_IBAND_1D_R ! ! !DESCRIPTION: gather a meridional (with dimension distributed along J) ! vector on root. For example: [j1:j2] !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: dist_array(DistGrid%j_strt:) integer, intent(in) :: iref ! to define source processors ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! - all processors with an i-range containing IREF are used. ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_iband_1d_r' #ifndef MPI glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop ) status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: n, klm, sz(1), gsz(1) status=0 ! ------- Check inputs sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.) ! write(gol,*)"lbound m",lbound(dist_array); call goPr ! write(gol,*)"ubound m",ubound(dist_array); call goPr IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then ! local case if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1) = dist_array(j0:j1) ! receive remaining chunks from other pes do klm=1, npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a source processor, receive from it if((irefi1t))cycle n=j1t-j0t+1 call MPI_RECV( glbl_array(j0t), n, my_real, klm, iref, localComm, stat, ierr) end do else ! are we on a src processor? if((irefi1)) return n=j1-j0+1 call MPI_SEND( dist_array(j0), n, my_real, root, iref, localComm, ierr) end if #endif END SUBROUTINE GATHER_IBAND_1D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: GATHER_IBAND_3D_R ! ! !DESCRIPTION: gather a meridional slice (3D) on root. For arrays like: ! [j1:j2, lev, trac], that is without a distributed I dim. !\\ !\\ ! !INTERFACE: ! SUBROUTINE GATHER_IBAND_3D_R( DistGrid, dist_array, glbl_array, status, iref ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid real, intent(in) :: dist_array(DistGrid%j_strt:,:,:) integer, intent(in) :: iref ! to find sources ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: glbl_array(:,:,:) integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: use in budget for gathering fluxes ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'gather_iband_3d_r' #ifndef MPI glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop,:,:) status = 0 #else integer :: stat(MPI_STATUS_SIZE), subarr integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t integer :: n, klm, sz(3), gsz(3), slab integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride status=0 ! ------- Check inputs sz = shape(dist_array) gsz = shape(glbl_array) if(okdebug)then CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.) IF_NOTOK_RETURN(status=1) end if i0 = DistGrid%i_strt i1 = DistGrid%i_stop j0 = DistGrid%j_strt j1 = DistGrid%j_stop ! ------- SEND/RECV ------------- if ( isRoot ) then ! local case if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1,:,:) = dist_array(j0:j1,:,:) ! receive remaining chunks from other pes do klm=1,npes-1 i0t = DistGrid%bounds(1,klm) i1t = DistGrid%bounds(2,klm) j0t = DistGrid%bounds(3,klm) j1t = DistGrid%bounds(4,klm) ! if klm is a source processor, receive from it if((irefi1t))cycle n=j1t-j0t+1 call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr) CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr) stride = gsz(1)*gsz(2)*sizeoftype call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr) call MPI_TYPE_FREE (slab, ierr) call MPI_TYPE_COMMIT (subarr, ierr) call MPI_RECV( glbl_array(j0t,1,1), 1, subarr, klm, iref, localComm, stat, ierr) call MPI_TYPE_FREE (subarr, ierr) end do else ! are we on a src processor? if((irefi1))return n=j1-j0+1 call MPI_SEND( dist_array(j0,1,1), n*sz(2)*sz(3), my_real, root, iref, localComm, ierr) end if #endif END SUBROUTINE GATHER_IBAND_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: REDUCE_2D_R ! ! !DESCRIPTION: Global reduction. Data are gathered on root, where OP is ! then done, instead of OPing on each proc and then calling ! MPI_REDUCE. This ensures bitwise agreement with serial code ! results in case of SUM. !\\ !\\ ! !INTERFACE: ! SUBROUTINE REDUCE_2D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM' logical, intent(in), optional :: all ! mimic mpi_allreduce instead of mpi_reduce logical, intent(in), optional :: debug ! print additional information: location of Min/Max ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: resultat integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'REDUCE_2D_R' logical :: x_debug integer :: shp(2) real, allocatable :: glbl_array(:,:) x_debug=.false. if(present(debug)) x_debug=debug #ifdef MPI if (isRoot) then allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region )) else allocate( glbl_array(1,1) ) end if call gather(DistGrid, dist_array, glbl_array, halo, status) IF_NOTOK_RETURN(status=1) if (isRoot) then select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(glbl_array) case('max', 'Max', 'MAX') resultat = maxval(glbl_array) if(x_debug) then shp=maxloc(glbl_array) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(glbl_array) if(x_debug) then shp=minloc(glbl_array) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select end if if (present(all)) then if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr) end if deallocate(glbl_array) #else select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region)) case('max', 'Max', 'MAX') resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region)) if(x_debug) then shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region)) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region)) if(x_debug) then shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region)) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select #endif status=0 END SUBROUTINE REDUCE_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: REDUCE_3D_R ! ! !DESCRIPTION: same as REDUCE_2D_R, for 3D arrays. !\\ !\\ ! !INTERFACE: ! SUBROUTINE REDUCE_3D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:) character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM' logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE logical, intent(in), optional :: debug ! print additional information: location of Min/Max ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: resultat integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'REDUCE_3D_R' integer :: shp(3) logical :: x_debug real, allocatable :: glbl_array(:,:,:) x_debug=.false. if(present(debug)) x_debug=debug #ifdef MPI shp = shape( dist_array ) if (isRoot) then allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3)) ) else allocate( glbl_array(1,1,1) ) end if call gather(DistGrid, dist_array, glbl_array, halo, status) IF_NOTOK_RETURN(status=1) if (isRoot) then select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(glbl_array) case('max', 'Max', 'MAX') resultat = maxval(glbl_array) if(x_debug) then shp=maxloc(glbl_array) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(glbl_array) if(x_debug) then shp=minloc(glbl_array) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select end if if (present(all)) then if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr) end if #else select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:)) case('max', 'Max', 'MAX') resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:)) if(x_debug) then shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:)) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:)) if(x_debug) then shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:)) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select #endif status=0 END SUBROUTINE REDUCE_3D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: REDUCE_4D_R ! ! !DESCRIPTION: same as REDUCE_2D_R, for 4D arrays. !\\ !\\ ! !INTERFACE: ! SUBROUTINE REDUCE_4D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:) character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM' logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE logical, intent(in), optional :: debug ! print additional information: location of Min/Max ! ! !OUTPUT PARAMETERS: ! real, intent(out) :: resultat integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'reduce_4d_r' integer :: shp(4) logical :: x_debug real, allocatable :: glbl_array(:,:,:,:) x_debug=.false. if(present(debug)) x_debug=debug #ifdef MPI shp = shape( dist_array ) if (isRoot) then allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3), shp(4)) ) else allocate( glbl_array(1,1,1,1) ) end if call gather(DistGrid, dist_array, glbl_array, halo, status) IF_NOTOK_RETURN(status=1) if (isRoot) then select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(glbl_array) case('max', 'Max', 'MAX') resultat = maxval(glbl_array) if(x_debug) then shp=maxloc(glbl_array) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(glbl_array) if(x_debug) then shp=minloc(glbl_array) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select end if if (present(all)) then if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr) end if #else select case( op ) case('sum', 'Sum', 'SUM') resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:)) case('max', 'Max', 'MAX') resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:)) if(x_debug) then shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:)) write(gol,*) rname //": Max location =", shp; call goPr end if case('min', 'Min', 'MIN') resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:)) if(x_debug) then shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:)) write(gol,*) rname //": Min location =", shp; call goPr end if case default write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1 IF_NOTOK_RETURN(status=1) end select #endif status=0 END SUBROUTINE REDUCE_4D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: DIST_ARR_STAT_2D_R ! ! !DESCRIPTION: calculate and print the MIN, MEAN, MAX of a 2D distributed ! real field. This is basically a wrapper around several calls ! to REDUCE. ! ! *** SHOULD BE CALLED ONLY FOR DEBUGGING PURPOSES *** !\\ !\\ ! !INTERFACE: ! SUBROUTINE DIST_ARR_STAT_2D_R( DistGrid, name, arr, halo, status) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid character(len=*), intent(in) :: name ! verbose helper integer, intent(in) :: halo real, intent(in) :: arr(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:) ! ! !OUTPUT PARAMETERS: ! integer, intent(out):: status ! ! !REVISION HISTORY: ! 7 Mar 2012 - P. Le Sager - adapted for lon-lat MPI domain ! decomposition, from DD_FIELD_STATISTICS in ! both DIFFUSION.F90 and DRY_DEPOSITION.F90 ! !REMARKS: ! (1) FIXME : does not compute the mean of only non-zero data anymore !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname=mname//'dist_arr_stat_2d_r' integer :: ntel_non_zero, nx, ny real :: maxf, minf, meanf, mean_non_zero call reduce( DistGrid, arr, halo, 'MAX', maxf, status) IF_NOTOK_RETURN(status=1) call reduce( DistGrid, arr, halo, 'MIN', minf, status) IF_NOTOK_RETURN(status=1) call reduce( DistGrid, arr, halo, 'SUM', meanf, status) IF_NOTOK_RETURN(status=1) if(isRoot) then nx = DistGrid%im_region ny = DistGrid%jm_region meanf = meanf / ( nx*ny ) write(gol,'(a10,3(a5,1x,1pe10.3))') name,' max=', maxf,' min=',minf,'mean=',meanf!,'mn0',mean_non_zero call goPr end if status=0 END SUBROUTINE DIST_ARR_STAT_2D_R !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: PRINT_DISTGRID ! ! !DESCRIPTION: utility that prints information about a distributed grid !\\ !\\ ! !INTERFACE: ! SUBROUTINE PRINT_DISTGRID ( DistGrid ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! !EOP !------------------------------------------------------------------------ !BOC integer, parameter :: maxrow=5 integer, parameter :: maxcol=5 integer :: sz1(1), i1 ! header write(gol,*) "========== Start Distributed Grid ==================="; call goPr ! dist_grid write(gol,*) "im_region :" , DistGrid%im_region ; call goPr write(gol,*) "jm_region :" , DistGrid%jm_region ; call goPr write(gol,*) "i_strt :" , DistGrid%i_strt ; call goPr write(gol,*) "i_stop :" , DistGrid%i_stop ; call goPr write(gol,*) "i_strt_halo :" , DistGrid%i_strt_halo ; call goPr write(gol,*) "i_stop_halo :" , DistGrid%i_stop_halo ; call goPr write(gol,*) "j_strt :" , DistGrid%j_strt ; call goPr write(gol,*) "j_stop :" , DistGrid%j_stop ; call goPr write(gol,*) "j_strt_halo :" , DistGrid%j_strt_halo ; call goPr write(gol,*) "j_stop_halo :" , DistGrid%j_stop_halo ; call goPr write(gol,*) "has_north_pole:" , DistGrid%has_north_pole ; call goPr write(gol,*) "has_south_pole:" , DistGrid%has_south_pole ; call goPr ! physical grid write(gol,*) '------------- physical grid -------------------' ; call goPr write(gol,*) "llgrid name:", trim(DistGrid%lli%name) ; call goPr write(gol,*) "R[m] :", DistGrid%lli%R ; call goPr write(gol,*) "dlon[deg] :", DistGrid%lli%dlon_deg ; call goPr write(gol,*) "dlat[deg] :", DistGrid%lli%dlat_deg ; call goPr write(gol,*) "im :", DistGrid%lli%im ; call goPr write(gol,*) "jm :", DistGrid%lli%jm ; call goPr if (associated(DistGrid%lli%lon_deg)) then sz1 = shape(DistGrid%lli%lon_deg) i1 = min(maxcol,sz1(1)) write(gol,*) "lon = ",DistGrid%lli%lon_deg(1:i1); call goPr endif if (associated(DistGrid%lli%lat_deg)) then sz1=shape(DistGrid%lli%lat_deg) i1=min(maxrow,sz1(1)) write(gol,*) "lat = ",DistGrid%lli%lat_deg(1:i1); call goPr endif ! footer write(gol,*) "========== End Distributed Grid ===================" ; call goPr END SUBROUTINE PRINT_DISTGRID !EOC !-------------------------------------------------------------------------- ! TM5 ! !-------------------------------------------------------------------------- !BOP ! ! !IROUTINE: TESTCOMM ! ! !DESCRIPTION: check if the communications are working as expected. ! Currently checked: ! - GATHER, SCATTER, UPDATE_HALO (2D, 3D, 4D) ! - SCATTER_I_BAND, SCATTER_J_BAND (1D, 2D) !\\ !\\ ! !INTERFACE: ! SUBROUTINE TESTCOMM( DistGrid, halo, status ) ! ! !INPUT PARAMETERS: ! type(dist_grid), intent(in) :: DistGrid integer, intent(in) :: halo ! ! !OUTPUT PARAMETERS: ! integer, intent(out) :: status ! ! !REVISION HISTORY: ! 01 Nov 2011 - P. Le Sager - v0 ! ! !REMARKS: ! (1) to run with different halo sizes ! (2) note that will not catch some errors in halo_update if using too few processors ! !EOP !------------------------------------------------------------------------ !BOC character(len=*), parameter :: rname = mname//'testcomm' ! real, allocatable :: src1(:), tgt1(:), res1(:) ! real, allocatable :: src2(:,:), tgt2(:,:), res2(:,:) ! real, allocatable :: larray2a(:,:), larray2b(:,:), glb2a(:,:), glb2b(:,:) ! real, allocatable :: larray3a(:,:,:), larray3b(:,:,:), glb3a(:,:,:), glb3b(:,:,:) ! real, allocatable :: larray4a(:,:,:,:), larray4b(:,:,:,:) ! real, allocatable :: glb4a(:,:,:,:), glb4b(:,:,:,:) ! integer :: i0, j0, i1, j1, x_halo, k, levels, l, trac, iref(2), jref(2) ! logical :: south, north, west, east, test ! character(len=*), parameter :: form='(f4.0)' ! levels=17 ! trac=5 ! ! General ! call Get_DistGrid( DistGrid, I_STRT=i0, I_STOP=i1, J_STRT=j0, J_STOP=j1, & ! hasEastBorder=east, hasWestBorder=west, & ! hasSouthBorder=south, hasNorthBorder=north ) ! x_halo=halo ! status=0 ! if(isRoot) print*, "========= TESTING COMM FOR HALO=",x_HALO ! call par_barrier() ! ! *************************** SCATTER BAND *************************** ! iref=(/ 1, DistGrid%im_region/) ! to test i_band along west and east border ! jref=(/ 1, DistGrid%jm_region/) ! to test j_band along south and north border ! if(isRoot) then ! allocate(src1(DistGrid%im_region)) ! else ! allocate(src1(1)) ! end if ! allocate(tgt1(i0:i1), res1(i0:i1)) ! if (isRoot) src1 = (/(k, k=1,DistGrid%im_region)/) ! res1 = (/(k, k=i0,i1)/) ! do trac=1,2 ! tgt1=0 ! call scatter_j_band(distgrid, tgt1, src1, status, jref=jref(trac)) ! IF_NOTOK_RETURN(status=1) ! test=((trac==1).and.south).or.((trac==2).and.north) ! ! diff should be 0 along borders only ! if (maxval(abs(res1-tgt1)) /= 0.) then ! if(test) then ! print*, "test scatter_J_band 1D FAILED at border:" ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1) ! status=1 ! !else ! Expected only if tgt1 has inout attribute in scatter routine ! ! print*, "test scatter_J_band 1D PASSED inside:" ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1) ! end if ! else ! if(test) then ! print*, "test scatter_J_band 1D PASSED at border" ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1) ! !else ! Expected only if tgt1 has inout attribute in scatter routine ! ! print*, "test scatter_J_band 1D FAILED inside" ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1) ! ! status=1 ! end if ! end if ! IF_NOTOK_RETURN(status=1) ! end do ! deallocate(src1, tgt1, res1) ! if(isRoot) then ! allocate(src1(DistGrid%jm_region)) ! else ! allocate(src1(1)) ! end if ! allocate(tgt1(j0:j1), res1(j0:j1)) ! if (isRoot) src1 = (/(k, k=1,DistGrid%jm_region)/) ! res1 = (/(k, k=j0,j1)/) ! do trac=1,2 ! tgt1=0 ! call scatter_i_band(distgrid, tgt1, src1, status, iref=iref(trac)) ! IF_NOTOK_RETURN(status=1) ! test=((trac==1).and.west).or.((trac==2).and.east) ! ! diff should be 0 along borders only ! if (maxval(abs(res1-tgt1)) /= 0.) then ! if(test) then ! print*, "test scatter_I_band 1D FAILED at border" ! status=1 ! !else ! ! print*, "test scatter_I_band 1D PASSED inside" ! end if ! else ! if(test) then ! print*, "test scatter_I_band 1D PASSED at border" ! !else ! ! print*, "test scatter_I_band 1D FAILED inside" ! ! status=1 ! end if ! end if ! IF_NOTOK_RETURN(status=1) ! end do ! deallocate(src1, tgt1, res1) ! ! ---------------- 2D ! if(isRoot) then ! allocate(src2(DistGrid%im_region, levels)) ! else ! allocate(src2(1,1)) ! end if ! allocate(tgt2(i0:i1,levels), res2(i0:i1,levels)) ! do l=1,levels ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%im_region)/) * l ! res2(:,l) = (/(k, k=i0,i1)/)* l ! end do ! do trac=1,2 ! tgt2=0 ! call scatter_j_band(distgrid, tgt2, src2, status, jref=jref(trac)) ! IF_NOTOK_RETURN(status=1) ! test=((trac==1).and.south).or.((trac==2).and.north) ! ! diff should be 0 along borders only ! if (maxval(abs(res2-tgt2)) /= 0.) then ! if(test) then ! print*, "test scatter_J_band 2D FAILED at border" ! status=1 ! !else ! ! print*, "test scatter_J_band 2D PASSED inside" ! end if ! else ! if(test) then ! print*, "test scatter_J_band 2D PASSED at border" ! !else ! ! print*, "test scatter_J_band 2D FAILED inside" ! ! status=1 ! end if ! end if ! IF_NOTOK_RETURN(status=1) ! end do ! deallocate(src2, tgt2, res2) ! if(isRoot) then ! allocate(src2(DistGrid%jm_region,levels)) ! else ! allocate(src2(1,1)) ! end if ! allocate(tgt2(j0:j1,levels), res2(j0:j1,levels)) ! do l=1,levels ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%jm_region)/)*l ! res2(:,l) = (/(k, k=j0,j1)/)*l ! enddo ! do trac=1,2 ! tgt2=0 ! call scatter_i_band(distgrid, tgt2, src2, status, iref=iref(trac)) ! IF_NOTOK_RETURN(status=1) ! test=((trac==1).and.west).or.((trac==2).and.east) ! ! diff should be 0 along borders only ! if (maxval(abs(res2-tgt2)) /= 0.) then ! if(test) then ! print*, "test scatter_I_band 2D FAILED at border" ! status=1 ! !else ! ! print*, "test scatter_I_band 2D PASSED inside" ! end if ! else ! if(test) then ! print*, "test scatter_I_band 2D PASSED at border" ! !else ! ! print*, "test scatter_I_band 2D FAILED inside" ! ! status=1 ! end if ! end if ! IF_NOTOK_RETURN(status=1) ! end do ! deallocate(src2, tgt2, res2) ! ! *************************** GATHER/SCATTER *************************** ! !---------------------- ! ! Allocate 2D ! !---------------------- ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) ) ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) ) ! allocate( glb2a(DistGrid%im_region, DistGrid%jm_region) ) ! ! in halo, 0, elsewhere myid ! larray2a=0. ! larray2a(i0:i1,j0:j1)=real(myid) ! ! glb2b is global array, used in root only ! if (isRoot) then ! allocate( glb2b( DistGrid%im_region, DistGrid%jm_region) ) ! else ! allocate( glb2b(1,1) ) ! end if ! !---------------------- ! ! test GATHER_2D_R ! !---------------------- ! glb2b=0. ! larray2b=0. ! ! gather ! call gather( DistGrid, larray2a, glb2b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! ! broadcast result ! if (isRoot) glb2a = glb2b ! #ifdef MPI ! call MPI_bcast(glb2a, size(glb2a), my_real, root, localComm, ierr) ! #endif ! larray2b(i0:i1,j0:j1) = glb2a(i0:i1,j0:j1) ! larray2b = larray2a-larray2b ! ! diff should be 0 ! if (maxval(abs(larray2b)) /= 0.) then ! print*, "test gather 2d FAILED" ! status=1 ! else ! print*, "test gather 2d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! call par_barrier() ! !---------------------- ! ! test SCATTER_2D_R ! !---------------------- ! larray2b=0. ! call scatter( DistGrid, larray2b, glb2b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! larray2b=larray2a-larray2b ! ! diff should be 0 ! if (maxval(abs(larray2b)) /= 0.) then ! print*, "test scatter 2d FAILED" ! status=1 ! else ! print*, "test scatter 2d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray2a,larray2b,glb2a,glb2b) ! call par_barrier() ! !---------------------- ! ! Allocate 3D ! !---------------------- ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) ) ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) ) ! allocate( glb3a( DistGrid%im_region, DistGrid%jm_region, levels) ) ! ! in halo, 0, elsewhere myid*level ! larray3a=0. ! do k=1,levels ! larray3a(i0:i1,j0:j1,k)=real(myid*k) ! end do ! ! glb2b is global array, used in root only ! if (isRoot) then ! allocate( glb3b( DistGrid%im_region, DistGrid%jm_region, levels) ) ! else ! allocate( glb3b(1,1,1) ) ! end if ! !---------------------- ! ! test GATHER_3D_R ! !---------------------- ! glb3b=0. ! larray3b=0. ! ! gather ! call gather( DistGrid, larray3a, glb3b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! ! broadcast result ! if (isRoot) glb3a = glb3b ! #ifdef MPI ! call MPI_bcast(glb3a, size(glb3a), my_real, root, localComm, ierr) ! #endif ! larray3b(i0:i1,j0:j1,:) = glb3a(i0:i1,j0:j1,:) ! larray3b = larray3a-larray3b ! ! diff should be 0 ! if (maxval(abs(larray3b)) /= 0.) then ! print*, "test gather 3d FAILED" ! status=1 ! else ! print*, "test gather 3d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! call par_barrier() ! !---------------------- ! ! test SCATTER_3D_R ! !---------------------- ! larray3b=0. ! call scatter( DistGrid, larray3b, glb3b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! larray3b=larray3a-larray3b ! ! diff should be 0 ! if (maxval(abs(larray3b)) /= 0.) then ! print*, "test scatter 3d FAILED" ! status=1 ! else ! print*, "test scatter 3d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray3a,larray3b,glb3a,glb3b) ! call par_barrier() ! !---------------------- ! ! Allocate 4D ! !---------------------- ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) ) ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) ) ! allocate( glb4a( DistGrid%im_region, DistGrid%jm_region, levels, trac) ) ! ! in halo, 0, elsewhere (myid+1)*level*trac ! larray4a=0. ! do l=1,trac ! do k=1,levels ! larray4a(i0:i1,j0:j1,k,l)=real((myid+1)*k*l) ! end do ! end do ! ! glb4b is global array, used in root only ! if (isRoot) then ! allocate( glb4b( DistGrid%im_region, DistGrid%jm_region, levels, trac) ) ! else ! allocate( glb4b(1,1,1,1) ) ! end if ! !---------------------- ! ! test GATHER_4D_R ! !---------------------- ! glb4b=0. ! larray4b=0. ! ! gather ! call gather( DistGrid, larray4a, glb4b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! ! broadcast result ! if (isRoot) glb4a = glb4b ! #ifdef MPI ! call MPI_bcast(glb4a, size(glb4a), my_real, root, localComm, ierr) ! #endif ! larray4b(i0:i1,j0:j1,:,:) = glb4a(i0:i1,j0:j1,:,:) ! larray4b = larray4a-larray4b ! ! diff should be 0 ! if (maxval(abs(larray4b)) /= 0.) then ! print*, "test gather 4d FAILED" ! status=1 ! else ! print*, "test gather 4d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! call par_barrier() ! !---------------------- ! ! test SCATTER_4D_R ! !---------------------- ! larray4b=0. ! call scatter( DistGrid, larray4b, glb4b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! larray4b=larray4a-larray4b ! ! diff should be 0 ! if (maxval(abs(larray4b)) /= 0.) then ! print*, "test scatter 4d FAILED" ! status=1 ! else ! print*, "test scatter 4d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray4a,larray4b,glb4a,glb4b) ! call par_barrier() ! ! *************************************** HALO *************************** ! !---------------------- ! ! test UPDATE_HALO_2D_R ! !---------------------- ! ! Allocate 2D ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) ) ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) ) ! ! array to update : in halo set to 0, elsewhere to myid ! larray2b=0. ! larray2b(i0:i1,j0:j1)=real(myid) ! ! test array : fill halo with neighbors rank ! larray2a=0. ! larray2a( i0-x_halo:i0-1, j0:j1 ) = DistGrid%neighbors(1) ! west halo ! larray2a( i1+1:i1+x_halo, j0:j1 ) = DistGrid%neighbors(3) ! east halo ! larray2a( i0:i1, j1+1:j1+x_halo ) = DistGrid%neighbors(2) ! north halo ! larray2a( i0:i1, j0-x_halo:j0-1 ) = DistGrid%neighbors(4) ! south halo ! larray2a(i0:i1,j0:j1)=real(myid) ! where (larray2a == MPI_PROC_NULL) larray2a=0. ! ! update ! CALL UPDATE_HALO( DISTGRID, larray2b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! if (isRoot.and.(x_halo==1)) then ! 32 is size to look good for 2x2 processes and halo=1 ! print*, "----------------------------" ! print '(32F4.0)', larray2a ! print*, "----------------------------" ! print '(32F4.0)', larray2b ! print*, "----------------------------" ! end if ! ! compare (diff should be 0) ! larray2b=larray2a-larray2b ! if (maxval(abs(larray2b)) /= 0.) then ! print*, "test update_halo 2d FAILED" ! status=1 ! else ! print*, "test update_halo 2d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray2a,larray2b) ! call par_barrier() ! !---------------------- ! ! test UPDATE_HALO_3D_R ! !---------------------- ! ! Allocate 3D ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) ) ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) ) ! ! array to update : in halo set to 0, elsewhere to myid ! larray3b=0. ! larray3b(i0:i1,j0:j1,:)=real(myid) ! ! test array : fill halo with neighbors rank ! larray3a=0. ! larray3a( i0-x_halo:i0-1, j0:j1, : ) = DistGrid%neighbors(1) ! west halo ! larray3a( i1+1:i1+x_halo, j0:j1, : ) = DistGrid%neighbors(3) ! east halo ! larray3a( i0:i1, j1+1:j1+x_halo, : ) = DistGrid%neighbors(2) ! north halo ! larray3a( i0:i1, j0-x_halo:j0-1, : ) = DistGrid%neighbors(4) ! south halo ! larray3a(i0:i1,j0:j1,:)=real(myid) ! interior ! where (larray3a == MPI_PROC_NULL) larray3a=0. !if no neighbor ! ! update ! CALL UPDATE_HALO( DISTGRID, larray3b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! ! compare (diff should be 0) ! larray3b=larray3a-larray3b ! if (maxval(abs(larray3b)) /= 0.) then ! print*, "test update_halo 3d FAILED" ! status=1 ! else ! print*, "test update_halo 3d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray3a,larray3b) ! call par_barrier() ! !---------------------- ! ! test UPDATE_HALO_4D_R ! !---------------------- ! ! Allocate 4D ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) ) ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) ) ! ! array to update : in halo set to 0, elsewhere to myid ! larray4b=0. ! larray4b(i0:i1,j0:j1,:,:)=real(myid) ! ! test array : fill halo with neighbors rank ! larray4a=0. ! larray4a( i0-x_halo:i0-1, j0:j1, :,: ) = DistGrid%neighbors(1) ! west halo ! larray4a( i1+1:i1+x_halo, j0:j1, :,: ) = DistGrid%neighbors(3) ! east halo ! larray4a( i0:i1, j1+1:j1+x_halo, :,: ) = DistGrid%neighbors(2) ! north halo ! larray4a( i0:i1, j0-x_halo:j0-1, :,: ) = DistGrid%neighbors(4) ! south halo ! larray4a(i0:i1,j0:j1,:,:)=real(myid) ! interior ! where (larray4a == MPI_PROC_NULL) larray4a=0. !if no neighbor ! ! update ! CALL UPDATE_HALO( DISTGRID, larray4b, x_halo, status) ! IF_NOTOK_RETURN(status=1) ! ! compare (diff should be 0) ! larray4b=larray4a-larray4b ! if (maxval(abs(larray4b)) /= 0.) then ! print*, "test update_halo 4d FAILED" ! status=1 ! else ! print*, "test update_halo 4d PASSED" ! end if ! IF_NOTOK_RETURN(status=1) ! ! CLEANUP ! deallocate(larray4a,larray4b) ! call par_barrier() status=0 END SUBROUTINE TESTCOMM !EOC END MODULE DOMAIN_DECOMP