obs_mpp.F90 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. MODULE obs_mpp
  2. !!======================================================================
  3. !! *** MODULE obs_mpp ***
  4. !! Observation diagnostics: Various MPP support routines
  5. !!======================================================================
  6. !! History : 2.0 ! 2006-03 (K. Mogensen) Original code
  7. !! - ! 2006-05 (K. Mogensen) Reformatted
  8. !! - ! 2008-01 (K. Mogensen) add mpp_global_max
  9. !!----------------------------------------------------------------------
  10. # define mpivar mpi_double_precision
  11. !!----------------------------------------------------------------------
  12. !! obs_mpp_bcast_integer : Broadcast an integer array from a processor to all processors
  13. !! obs_mpp_max_integer : Find maximum on all processors of each value in an integer on all processors
  14. !! obs_mpp_find_obs_proc : Find processors which should hold the observations
  15. !! obs_mpp_sum_integers : Sum an integer array from all processors
  16. !! obs_mpp_sum_integer : Sum an integer from all processors
  17. !!----------------------------------------------------------------------
  18. USE dom_oce, ONLY : nproc, mig, mjg ! Ocean space and time domain variables
  19. USE mpp_map, ONLY : mppmap
  20. USE in_out_manager
  21. #if defined key_mpp_mpi
  22. USE lib_mpp, ONLY : mpi_comm_opa ! MPP library
  23. #endif
  24. IMPLICIT NONE
  25. PRIVATE
  26. PUBLIC obs_mpp_bcast_integer, & !: Broadcast an integer array from a proc to all procs
  27. & obs_mpp_max_integer, & !: Find maximum across processors in an integer array
  28. & obs_mpp_find_obs_proc, & !: Find processors which should hold the observations
  29. & obs_mpp_sum_integers, & !: Sum an integer array from all processors
  30. & obs_mpp_sum_integer, & !: Sum an integer from all processors
  31. & mpp_alltoall_int, &
  32. & mpp_alltoallv_int, &
  33. & mpp_alltoallv_real, &
  34. & mpp_global_max
  35. !!----------------------------------------------------------------------
  36. !! NEMO/OPA 3.3 , NEMO Consortium (2010)
  37. !! $Id: obs_mpp.F90 2513 2010-12-23 16:01:47Z smasson $
  38. !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
  39. !!----------------------------------------------------------------------
  40. CONTAINS
  41. SUBROUTINE obs_mpp_bcast_integer( kvals, kno, kroot )
  42. !!----------------------------------------------------------------------
  43. !! *** ROUTINE obs_mpp_bcast_integer ***
  44. !!
  45. !! ** Purpose : Send array kvals to all processors
  46. !!
  47. !! ** Method : MPI broadcast
  48. !!
  49. !! ** Action : This does only work for MPI.
  50. !! MPI_COMM_OPA needs to be replace for OASIS4.!
  51. !!
  52. !! References : http://www.mpi-forum.org
  53. !!----------------------------------------------------------------------
  54. INTEGER , INTENT(in ) :: kno ! Number of elements in array
  55. INTEGER , INTENT(in ) :: kroot ! Processor to send data
  56. INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
  57. !
  58. #if defined key_mpp_mpi
  59. !
  60. INTEGER :: ierr
  61. !
  62. INCLUDE 'mpif.h'
  63. !!----------------------------------------------------------------------
  64. ! Call the MPI library to broadcast data
  65. CALL mpi_bcast( kvals, kno, mpi_integer, &
  66. & kroot, mpi_comm_opa, ierr )
  67. #else
  68. ! no MPI: empty routine
  69. #endif
  70. !
  71. END SUBROUTINE obs_mpp_bcast_integer
  72. SUBROUTINE obs_mpp_max_integer( kvals, kno )
  73. !!----------------------------------------------------------------------
  74. !! *** ROUTINE obs_mpp_bcast_integer ***
  75. !!
  76. !! ** Purpose : Find maximum across processors in an integer array.
  77. !!
  78. !! ** Method : MPI all reduce.
  79. !!
  80. !! ** Action : This does only work for MPI.
  81. !! It does not work for SHMEM.
  82. !! MPI_COMM_OPA needs to be replace for OASIS4.!
  83. !!
  84. !! References : http://www.mpi-forum.org
  85. !!----------------------------------------------------------------------
  86. INTEGER , INTENT(in ) :: kno ! Number of elements in array
  87. INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot
  88. !
  89. #if defined key_mpp_mpi
  90. !
  91. INTEGER :: ierr
  92. INTEGER, DIMENSION(kno) :: ivals
  93. !
  94. INCLUDE 'mpif.h'
  95. !!----------------------------------------------------------------------
  96. ! Call the MPI library to find the maximum across processors
  97. CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, &
  98. & mpi_max, mpi_comm_opa, ierr )
  99. kvals(:) = ivals(:)
  100. #else
  101. ! no MPI: empty routine
  102. #endif
  103. END SUBROUTINE obs_mpp_max_integer
  104. SUBROUTINE obs_mpp_find_obs_proc( kobsp, kobsi, kobsj, kno )
  105. !!----------------------------------------------------------------------
  106. !! *** ROUTINE obs_mpp_find_obs_proc ***
  107. !!
  108. !! ** Purpose : From the array kobsp containing the results of the grid
  109. !! grid search on each processor the processor return a
  110. !! decision of which processors should hold the observation.
  111. !!
  112. !! ** Method : A temporary 2D array holding all the decisions is
  113. !! constructed using mpi_allgather on each processor.
  114. !! If more than one processor has found the observation
  115. !! with the observation in the inner domain gets it
  116. !!
  117. !! ** Action : This does only work for MPI.
  118. !! It does not work for SHMEM.
  119. !!
  120. !! References : http://www.mpi-forum.org
  121. !!----------------------------------------------------------------------
  122. INTEGER , INTENT(in ) :: kno
  123. INTEGER, DIMENSION(kno), INTENT(in ) :: kobsi, kobsj
  124. INTEGER, DIMENSION(kno), INTENT(inout) :: kobsp
  125. !
  126. #if defined key_mpp_mpi
  127. !
  128. INTEGER :: ji
  129. INTEGER :: jj
  130. INTEGER :: size
  131. INTEGER :: ierr
  132. INTEGER :: iobsip
  133. INTEGER :: iobsjp
  134. INTEGER :: num_sus_obs
  135. INTEGER, DIMENSION(kno) :: iobsig, iobsjg
  136. INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iobsp, iobsi, iobsj
  137. !!
  138. INCLUDE 'mpif.h'
  139. !!----------------------------------------------------------------------
  140. !-----------------------------------------------------------------------
  141. ! Call the MPI library to find the maximum accross processors
  142. !-----------------------------------------------------------------------
  143. CALL mpi_comm_size( mpi_comm_opa, size, ierr )
  144. !-----------------------------------------------------------------------
  145. ! Convert local grids points to global grid points
  146. !-----------------------------------------------------------------------
  147. DO ji = 1, kno
  148. IF ( ( kobsi(ji) >= 1 ) .AND. ( kobsi(ji) <= jpi ) .AND. &
  149. & ( kobsj(ji) >= 1 ) .AND. ( kobsj(ji) <= jpj ) ) THEN
  150. iobsig(ji) = mig( kobsi(ji) )
  151. iobsjg(ji) = mjg( kobsj(ji) )
  152. ELSE
  153. iobsig(ji) = -1
  154. iobsjg(ji) = -1
  155. ENDIF
  156. END DO
  157. !-----------------------------------------------------------------------
  158. ! Get the decisions from all processors
  159. !-----------------------------------------------------------------------
  160. ALLOCATE( iobsp(kno,size) )
  161. ALLOCATE( iobsi(kno,size) )
  162. ALLOCATE( iobsj(kno,size) )
  163. CALL mpi_allgather( kobsp, kno, mpi_integer, &
  164. & iobsp, kno, mpi_integer, &
  165. & mpi_comm_opa, ierr )
  166. CALL mpi_allgather( iobsig, kno, mpi_integer, &
  167. & iobsi, kno, mpi_integer, &
  168. & mpi_comm_opa, ierr )
  169. CALL mpi_allgather( iobsjg, kno, mpi_integer, &
  170. & iobsj, kno, mpi_integer, &
  171. & mpi_comm_opa, ierr )
  172. !-----------------------------------------------------------------------
  173. ! Find the processor with observations from the lowest processor
  174. ! number among processors holding the observation.
  175. !-----------------------------------------------------------------------
  176. kobsp(:) = -1
  177. num_sus_obs = 0
  178. DO ji = 1, kno
  179. DO jj = 1, size
  180. IF ( ( kobsp(ji) == -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN
  181. kobsp(ji) = iobsp(ji,jj)
  182. iobsip = iobsi(ji,jj)
  183. iobsjp = iobsj(ji,jj)
  184. ENDIF
  185. IF ( ( kobsp(ji) /= -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN
  186. IF ( ( iobsip /= iobsi(ji,jj) ) .OR. &
  187. & ( iobsjp /= iobsj(ji,jj) ) ) THEN
  188. IF ( ( kobsp(ji) < 1000000 ) .AND. &
  189. & ( iobsp(ji,jj) < 1000000 ) ) THEN
  190. num_sus_obs=num_sus_obs+1
  191. ENDIF
  192. ENDIF
  193. IF ( mppmap(iobsip,iobsjp) /= ( kobsp(ji)+1 ) ) THEN
  194. IF ( ( iobsi(ji,jj) /= -1 ) .AND. &
  195. & ( iobsj(ji,jj) /= -1 ) ) THEN
  196. IF ((mppmap(iobsi(ji,jj),iobsj(ji,jj)) == (iobsp(ji,jj)+1))&
  197. & .OR. ( iobsp(ji,jj) < kobsp(ji) ) ) THEN
  198. kobsp(ji) = iobsp(ji,jj)
  199. iobsip = iobsi(ji,jj)
  200. iobsjp = iobsj(ji,jj)
  201. ENDIF
  202. ENDIF
  203. ENDIF
  204. ENDIF
  205. END DO
  206. END DO
  207. IF (lwp) WRITE(numout,*) 'Number of suspicious observations: ',num_sus_obs
  208. DEALLOCATE( iobsj )
  209. DEALLOCATE( iobsi )
  210. DEALLOCATE( iobsp )
  211. #else
  212. ! no MPI: empty routine
  213. #endif
  214. !
  215. END SUBROUTINE obs_mpp_find_obs_proc
  216. SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno )
  217. !!----------------------------------------------------------------------
  218. !! *** ROUTINE obs_mpp_sum_integers ***
  219. !!
  220. !! ** Purpose : Sum an integer array.
  221. !!
  222. !! ** Method : MPI all reduce.
  223. !!
  224. !! ** Action : This does only work for MPI.
  225. !! It does not work for SHMEM.
  226. !!
  227. !! References : http://www.mpi-forum.org
  228. !!----------------------------------------------------------------------
  229. INTEGER , INTENT(in ) :: kno
  230. INTEGER, DIMENSION(kno), INTENT(in ) :: kvalsin
  231. INTEGER, DIMENSION(kno), INTENT( out) :: kvalsout
  232. !
  233. #if defined key_mpp_mpi
  234. !
  235. INTEGER :: ierr
  236. !
  237. INCLUDE 'mpif.h'
  238. !!----------------------------------------------------------------------
  239. !
  240. !-----------------------------------------------------------------------
  241. ! Call the MPI library to find the sum across processors
  242. !-----------------------------------------------------------------------
  243. CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, &
  244. & mpi_sum, mpi_comm_opa, ierr )
  245. #else
  246. !-----------------------------------------------------------------------
  247. ! For no-MPP just return input values
  248. !-----------------------------------------------------------------------
  249. kvalsout(:) = kvalsin(:)
  250. #endif
  251. !
  252. END SUBROUTINE obs_mpp_sum_integers
  253. SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout )
  254. !!----------------------------------------------------------------------
  255. !! *** ROUTINE obs_mpp_sum_integers ***
  256. !!
  257. !! ** Purpose : Sum a single integer
  258. !!
  259. !! ** Method : MPI all reduce.
  260. !!
  261. !! ** Action : This does only work for MPI.
  262. !! It does not work for SHMEM.
  263. !!
  264. !! References : http://www.mpi-forum.org
  265. !!----------------------------------------------------------------------
  266. INTEGER, INTENT(in ) :: kvalin
  267. INTEGER, INTENT( out) :: kvalout
  268. !
  269. #if defined key_mpp_mpi
  270. !
  271. INTEGER :: ierr
  272. !
  273. INCLUDE 'mpif.h'
  274. !!----------------------------------------------------------------------
  275. !
  276. !-----------------------------------------------------------------------
  277. ! Call the MPI library to find the sum across processors
  278. !-----------------------------------------------------------------------
  279. CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, &
  280. & mpi_sum, mpi_comm_opa, ierr )
  281. #else
  282. !-----------------------------------------------------------------------
  283. ! For no-MPP just return input values
  284. !-----------------------------------------------------------------------
  285. kvalout = kvalin
  286. #endif
  287. !
  288. END SUBROUTINE obs_mpp_sum_integer
  289. SUBROUTINE mpp_global_max( pval )
  290. !!----------------------------------------------------------------------
  291. !! *** ROUTINE mpp_global_or ***
  292. !!
  293. !! ** Purpose : Get the maximum value across processors for a global
  294. !! real array
  295. !!
  296. !! ** Method : MPI allreduce
  297. !!
  298. !! ** Action : This does only work for MPI.
  299. !! It does not work for SHMEM.
  300. !!
  301. !! References : http://www.mpi-forum.org
  302. !!----------------------------------------------------------------------
  303. REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(inout) :: pval
  304. !
  305. INTEGER :: ierr
  306. !
  307. #if defined key_mpp_mpi
  308. !
  309. INCLUDE 'mpif.h'
  310. REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: zcp
  311. !!----------------------------------------------------------------------
  312. ! Copy data for input to MPI
  313. ALLOCATE( &
  314. & zcp(jpiglo,jpjglo) &
  315. & )
  316. zcp(:,:) = pval(:,:)
  317. ! Call the MPI library to find the coast lines globally
  318. CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, &
  319. & mpi_max, mpi_comm_opa, ierr )
  320. DEALLOCATE( &
  321. & zcp &
  322. & )
  323. #else
  324. ! no MPI: empty routine
  325. #endif
  326. !
  327. END SUBROUTINE mpp_global_max
  328. SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout )
  329. !!----------------------------------------------------------------------
  330. !! *** ROUTINE mpp_allgatherv ***
  331. !!
  332. !! ** Purpose : all to all.
  333. !!
  334. !! ** Method : MPI alltoall
  335. !!
  336. !! ** Action : This does only work for MPI.
  337. !! It does not work for SHMEM.
  338. !!
  339. !! References : http://www.mpi-forum.org
  340. !!----------------------------------------------------------------------
  341. INTEGER , INTENT(in ) :: kno
  342. INTEGER, DIMENSION(kno*jpnij), INTENT(in ) :: kvalsin
  343. INTEGER, DIMENSION(kno*jpnij), INTENT( out) :: kvalsout
  344. !!
  345. INTEGER :: ierr
  346. !
  347. #if defined key_mpp_mpi
  348. !
  349. INCLUDE 'mpif.h'
  350. !-----------------------------------------------------------------------
  351. ! Call the MPI library to do the all to all operation of the data
  352. !-----------------------------------------------------------------------
  353. CALL mpi_alltoall( kvalsin, kno, mpi_integer, &
  354. & kvalsout, kno, mpi_integer, &
  355. & mpi_comm_opa, ierr )
  356. #else
  357. !-----------------------------------------------------------------------
  358. ! For no-MPP just return input values
  359. !-----------------------------------------------------------------------
  360. kvalsout = kvalsin
  361. #endif
  362. !
  363. END SUBROUTINE mpp_alltoall_int
  364. SUBROUTINE mpp_alltoallv_int( kvalsin, knoin , kinv , kvalsout, &
  365. & knoout, koutv )
  366. !!----------------------------------------------------------------------
  367. !! *** ROUTINE mpp_alltoallv_int ***
  368. !!
  369. !! ** Purpose : all to all (integer version).
  370. !!
  371. !! ** Method : MPI alltoall
  372. !!
  373. !! ** Action : This does only work for MPI.
  374. !! It does not work for SHMEM.
  375. !!
  376. !! References : http://www.mpi-forum.org
  377. !!----------------------------------------------------------------------
  378. INTEGER , INTENT(in) :: knoin
  379. INTEGER , INTENT(in) :: knoout
  380. INTEGER, DIMENSION(jpnij) :: kinv, koutv
  381. INTEGER, DIMENSION(knoin) , INTENT(in ) :: kvalsin
  382. INTEGER, DIMENSION(knoout), INTENT( out) :: kvalsout
  383. !!
  384. INTEGER :: ierr
  385. INTEGER :: jproc
  386. !
  387. #if defined key_mpp_mpi
  388. !
  389. INCLUDE 'mpif.h'
  390. INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
  391. !-----------------------------------------------------------------------
  392. ! Compute displacements
  393. !-----------------------------------------------------------------------
  394. irdsp(1) = 0
  395. isdsp(1) = 0
  396. DO jproc = 2, jpnij
  397. isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
  398. irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
  399. END DO
  400. !-----------------------------------------------------------------------
  401. ! Call the MPI library to do the all to all operation of the data
  402. !-----------------------------------------------------------------------
  403. CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, &
  404. & kvalsout, koutv, irdsp, mpi_integer, &
  405. & mpi_comm_opa, ierr )
  406. #else
  407. !-----------------------------------------------------------------------
  408. ! For no-MPP just return input values
  409. !-----------------------------------------------------------------------
  410. kvalsout = kvalsin
  411. #endif
  412. !
  413. END SUBROUTINE mpp_alltoallv_int
  414. SUBROUTINE mpp_alltoallv_real( pvalsin, knoin , kinv , pvalsout, &
  415. & knoout, koutv )
  416. !!----------------------------------------------------------------------
  417. !! *** ROUTINE mpp_alltoallv_real ***
  418. !!
  419. !! ** Purpose : all to all (integer version).
  420. !!
  421. !! ** Method : MPI alltoall
  422. !!
  423. !! ** Action : This does only work for MPI.
  424. !! It does not work for SHMEM.
  425. !!
  426. !! References : http://www.mpi-forum.org
  427. !!----------------------------------------------------------------------
  428. INTEGER , INTENT(in ) :: knoin
  429. INTEGER , INTENT(in ) :: knoout
  430. INTEGER , DIMENSION(jpnij) :: kinv, koutv
  431. REAL(wp), DIMENSION(knoin) , INTENT(in ) :: pvalsin
  432. REAL(wp), DIMENSION(knoout), INTENT( out) :: pvalsout
  433. !!
  434. INTEGER :: ierr
  435. INTEGER :: jproc
  436. !
  437. #if defined key_mpp_mpi
  438. !
  439. INCLUDE 'mpif.h'
  440. INTEGER, DIMENSION(jpnij) :: irdsp, isdsp
  441. !!----------------------------------------------------------------------
  442. !
  443. !-----------------------------------------------------------------------
  444. ! Compute displacements
  445. !-----------------------------------------------------------------------
  446. irdsp(1) = 0
  447. isdsp(1) = 0
  448. DO jproc = 2, jpnij
  449. isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1)
  450. irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1)
  451. END DO
  452. !-----------------------------------------------------------------------
  453. ! Call the MPI library to do the all to all operation of the data
  454. !-----------------------------------------------------------------------
  455. CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, &
  456. & pvalsout, koutv, irdsp, mpivar, &
  457. & mpi_comm_opa, ierr )
  458. #else
  459. !-----------------------------------------------------------------------
  460. ! For no-MPP just return input values
  461. !-----------------------------------------------------------------------
  462. pvalsout = pvalsin
  463. #endif
  464. !
  465. END SUBROUTINE mpp_alltoallv_real
  466. !!======================================================================
  467. END MODULE obs_mpp