twocmp.seqUnvn.F90 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  2. ! Math and Computer Science Division, Argonne National Laboratory !
  3. !-----------------------------------------------------------------------
  4. ! CVS twocmp.seqUnvn.F90,v 1.6 2007-12-19 17:13:17 rloy Exp
  5. ! CVS MCT_2_8_0
  6. !BOP -------------------------------------------------------------------
  7. !
  8. ! !ROUTINE: twocomponentUneven.sequential
  9. !
  10. ! !DESCRIPTION: Provide a simple example of using MCT to connect two components
  11. ! In this case the models are running sequentialy but the second model
  12. ! is only running on 1 processor.
  13. !
  14. ! !INTERFACE:
  15. !
  16. program twosequn
  17. !
  18. ! !USES:
  19. !
  20. !--- Get only the things needed from MCT
  21. use m_MCTWorld,only: MCTWorld_init => init
  22. use m_GlobalSegMap,only: GlobalSegMap
  23. use m_GlobalSegMap,only: MCT_GSMap_init => init
  24. use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize
  25. use m_AttrVect,only : AttrVect
  26. use m_AttrVect,only : MCT_AtrVt_init => init
  27. use m_AttrVect,only : MCT_AtrVt_zero => zero
  28. use m_AttrVect,only : MCT_AtrVt_lsize => lsize
  29. use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA
  30. use m_AttrVect,only : MCT_AtrVt_importRA => importRAttr
  31. use m_Rearranger,only: Rearranger
  32. use m_Rearranger,only: MCT_Rearranger_init => init
  33. use m_Rearranger,only: MCT_Rearrange => Rearrange
  34. implicit none
  35. include 'mpif.h'
  36. integer,parameter :: ngx = 6 ! points in x-direction
  37. integer,parameter :: ngy = 4 ! points in y-direction
  38. integer ier,world_group,model2_group,myrank2,myrank3
  39. integer,dimension(:),pointer :: myids,mycomms,peloc2
  40. integer,dimension(:,:),pointer :: GlobalId
  41. integer :: comm1,comm2,asize,mysize,i,myproc
  42. integer :: commsize
  43. integer,dimension(1) :: start1,length1,ranks
  44. integer,dimension(:),allocatable :: start2,length2
  45. !-----------------------------------------------------------------------
  46. ! The Main program.
  47. ! We are implementing a single-executable, sequential-execution system.
  48. ! Because its sequential, communication occurs through the main using
  49. ! arguments. The second component is only running on 1 processor
  50. type(GlobalSegMap) :: GSmap1,GSmap2
  51. type(AttrVect) :: av1,av2
  52. type(Rearranger) :: Rearr
  53. call MPI_init(ier)
  54. call mpi_comm_size(MPI_COMM_WORLD, mysize,ier)
  55. if(mysize .gt. 12) then
  56. write(6,*)"Must run on less than 12 processors"
  57. stop
  58. endif
  59. call mpi_comm_rank(MPI_COMM_WORLD, myproc,ier)
  60. ! the first model is running on all the processors so give
  61. ! it a dubplicate of MPI_COMM_WORLD for its communicator
  62. call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier)
  63. ! the second model is only running on one processor
  64. ! so use mpi_groups methods to define its communicator
  65. call mpi_comm_group(MPI_COMM_WORLD,world_group,ier)
  66. ! need a communicator that only has the first processor
  67. ranks(1)=0
  68. ! define the group
  69. call mpi_group_incl(world_group,1,ranks,model2_group,ier)
  70. ! now define the communicator
  71. ! first initialize it
  72. comm2=MPI_COMM_NULL
  73. call mpi_comm_create(MPI_COMM_WORLD,model2_group,comm2,ier)
  74. ! don't need the groups anymore
  75. call mpi_group_free(world_group,ier)
  76. call mpi_group_free(model2_group,ier)
  77. ! allocate arrays for the ids and comms
  78. allocate(myids(2),mycomms(2))
  79. ! Set the arrays to their values.
  80. myids(1)=1
  81. myids(2)=2
  82. mycomms(1)=comm1
  83. mycomms(2)=comm2
  84. ! now call the initm_ version of MCTWorld_init
  85. call MCTWorld_init(2,MPI_COMM_WORLD,mycomms,myids)
  86. ! first gsmap is the grid decomposed in one dimension
  87. ! there is 1 segment per processor
  88. length1(1)= (ngx * ngy)/mysize
  89. start1(1)= myproc * length1(1) + 1
  90. write(6,*)'gsmap1', myproc,length1(1),start1(1)
  91. call MCT_GSMap_init(GSMap1,start1,length1,0,comm1,1)
  92. ! second gsmap is the grid on one processor
  93. ! for GSMap init to work, the size of the start and length arrays
  94. ! must equal the number of local segments. So I must allocate
  95. ! size zero arrays on the other processors.
  96. if(myproc .eq. 0) then
  97. allocate(start2(1),length2(1))
  98. length2(1) = ngx*ngy
  99. start2(1) = 1
  100. else
  101. allocate(start2(0),length2(0))
  102. endif
  103. call MCT_GSMap_init(GSMap2,start2,length2,0,comm1,2)
  104. write(6,*)'gsmap2', myproc,GSMap2%ngseg,GSmap2%gsize,GSmap2%start(1), &
  105. GSmap2%pe_loc(1),GSmap2%length(1)
  106. ! initialize an Av on each GSMap
  107. call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap1,comm1))
  108. ! Use comm1 because lsize of GSMap2 on comm1 will return 0 on non-root processors.
  109. ! We need av2 to be full-sized on proc 0 and 0 size on other processors.
  110. call MCT_AtrVt_init(av2,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap2,comm1))
  111. ! create a rearranger. Use the communicator which contains all processors
  112. ! involved in the rearrangement, comm1
  113. call MCT_Rearranger_init(GSMap1,GSMap2,comm1,Rearr)
  114. !-------------end of initialization steps
  115. ! Start up model1 which fills av1 with data.
  116. call model1(comm1,av1)
  117. ! print out Av data
  118. do i=1,MCT_AtrVt_lsize(av1)
  119. write(6,*) "model 1 data", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i)
  120. enddo
  121. ! rearrange data from model1 so that model2 can use it.
  122. call MCT_Rearrange(av1,av2,Rearr)
  123. ! pass data to model2 (which will print it out)
  124. ! model2 should only run on one processor.
  125. if(myproc .eq. 0) then
  126. call model2(comm2,av2)
  127. endif
  128. ! all done
  129. call MPI_Barrier(MPI_COMM_WORLD,ier)
  130. if (myproc==0) write(6,*) 'All Done'
  131. call mpi_finalize(ier)
  132. contains
  133. !-----------------------------------------------------------------------
  134. !-----------------------------------------------------------------------
  135. ! !ROUTINE:
  136. subroutine model1(comm1,mod1av) ! the first model
  137. implicit none
  138. integer :: comm1,mysize,ier,asize,myproc
  139. integer :: fieldindx,avsize,i
  140. integer,dimension(1) :: start,length
  141. real,pointer :: testarray(:)
  142. type(GlobalSegMap) :: GSmap
  143. type(AttrVect) :: mod1av
  144. !---------------------------
  145. ! find local rank and size
  146. call mpi_comm_size(comm1,mysize,ier)
  147. call mpi_comm_rank(comm1,myproc,ier)
  148. write(6,*)"model1 myproc,mysize",myproc,mysize
  149. avsize = MCT_AtrVt_lsize(mod1av)
  150. write(6,*)"model 1 myproc, av size", myproc,avsize
  151. ! Fill Av with some data
  152. ! fill first attribute the direct way
  153. fieldindx = MCT_AtrVt_indexRA(mod1av,"field1")
  154. do i=1,avsize
  155. mod1av%rAttr(fieldindx,i) = float(i+ 20*myproc)
  156. enddo
  157. ! fill second attribute using Av import function
  158. allocate(testarray(avsize))
  159. do i=1,avsize
  160. testarray(i)= cos((float(i+ 20*myproc)/24.) * 3.14)
  161. enddo
  162. call MCT_AtrVt_importRA(mod1av,"field2",testarray)
  163. end subroutine model1
  164. !-----------------------------------------------------------------------
  165. !-----------------------------------------------------------------------
  166. ! !ROUTINE:
  167. subroutine model2(comm2,mod2av)
  168. implicit none
  169. integer :: comm2,mysize,ier,asize,myproc
  170. integer :: i
  171. type(AttrVect) :: mod2av
  172. !---------------------------
  173. ! find local rank and size
  174. call mpi_comm_size(comm2,mysize,ier)
  175. call mpi_comm_rank(comm2,myproc,ier)
  176. write(6,*)"model2 myproc,mysize",myproc,mysize
  177. asize = MCT_AtrVt_lsize(mod2av)
  178. write(6,*)"model 2 myproc, av size", myproc,asize
  179. ! print out Av data
  180. do i=1,asize
  181. write(6,*) "model 2 data after", myproc,i,mod2av%rAttr(1,i),mod2av%rAttr(2,i)
  182. enddo
  183. end subroutine model2
  184. end