lib_mpp.F90 173 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040
  1. MODULE lib_mpp
  2. !!======================================================================
  3. !! *** MODULE lib_mpp ***
  4. !! Ocean numerics: massively parallel processing library
  5. !!=====================================================================
  6. !! History : OPA ! 1994 (M. Guyon, J. Escobar, M. Imbard) Original code
  7. !! 7.0 ! 1997 (A.M. Treguier) SHMEM additions
  8. !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI
  9. !! ! 1998 (J.M. Molines) Open boundary conditions
  10. !! NEMO 1.0 ! 2003 (J.-M. Molines, G. Madec) F90, free form
  11. !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d)
  12. !! - ! 2004 (R. Bourdalle Badie) isend option in mpi
  13. !! ! 2004 (J.M. Molines) minloc, maxloc
  14. !! - ! 2005 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases
  15. !! - ! 2005 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort
  16. !! - ! 2005 (R. Benshila, G. Madec) add extra halo case
  17. !! - ! 2008 (R. Benshila) add mpp_ini_ice
  18. !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd
  19. !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl
  20. !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager
  21. !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add 'mpp_lnk_bdy_3d', 'mpp_lnk_obc_3d',
  22. !! 'mpp_lnk_bdy_2d' and 'mpp_lnk_obc_2d' routines and update
  23. !! the mppobc routine to optimize the BDY and OBC communications
  24. !! 3.5 ! 2013 ( C. Ethe, G. Madec ) message passing arrays as local variables
  25. !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations
  26. !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added 'mpp_lnk_2d_multiple', 'mpp_lbc_north_2d_multiple', 'mpp_max_multiple'
  27. !!----------------------------------------------------------------------
  28. !!----------------------------------------------------------------------
  29. !! ctl_stop : update momentum and tracer Kz from a tke scheme
  30. !! ctl_warn : initialization, namelist read, and parameters control
  31. !! ctl_opn : Open file and check if required file is available.
  32. !! ctl_nam : Prints informations when an error occurs while reading a namelist
  33. !! get_unit : give the index of an unused logical unit
  34. !!----------------------------------------------------------------------
  35. #if defined key_mpp_mpi
  36. !!----------------------------------------------------------------------
  37. !! 'key_mpp_mpi' MPI massively parallel processing library
  38. !!----------------------------------------------------------------------
  39. !! lib_mpp_alloc : allocate mpp arrays
  40. !! mynode : indentify the processor unit
  41. !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d)
  42. !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays
  43. !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e)
  44. !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb)
  45. !! mpprecv :
  46. !! mppsend : SUBROUTINE mpp_ini_znl
  47. !! mppscatter :
  48. !! mppgather :
  49. !! mpp_min : generic interface for mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real
  50. !! mpp_max : generic interface for mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real
  51. !! mpp_sum : generic interface for mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real
  52. !! mpp_minloc :
  53. !! mpp_maxloc :
  54. !! mppsync :
  55. !! mppstop :
  56. !! mpp_ini_north : initialisation of north fold
  57. !! mpp_lbc_north : north fold processors gathering
  58. !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo
  59. !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs
  60. !!----------------------------------------------------------------------
  61. USE dom_oce ! ocean space and time domain
  62. USE lbcnfd ! north fold treatment
  63. USE in_out_manager ! I/O manager
  64. USE wrk_nemo ! work arrays
  65. IMPLICIT NONE
  66. PRIVATE
  67. PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam
  68. PUBLIC mynode, mppstop, mppsync, mpp_comm_free
  69. PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e
  70. PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc
  71. PUBLIC mpp_max_multiple
  72. PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e
  73. PUBLIC mpp_lnk_2d_9 , mpp_lnk_2d_multiple
  74. PUBLIC mppscatter, mppgather
  75. PUBLIC mpp_ini_ice, mpp_ini_znl
  76. PUBLIC mppsize
  77. PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines
  78. PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d
  79. PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb
  80. PUBLIC mpprank
  81. TYPE arrayptr
  82. REAL , DIMENSION (:,:), POINTER :: pt2d
  83. END TYPE arrayptr
  84. PUBLIC arrayptr
  85. !! * Interfaces
  86. !! define generic interface for these routine as they are called sometimes
  87. !! with scalar arguments instead of array arguments, which causes problems
  88. !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ
  89. INTERFACE mpp_min
  90. MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real
  91. END INTERFACE
  92. INTERFACE mpp_max
  93. MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real
  94. END INTERFACE
  95. INTERFACE mpp_sum
  96. MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, &
  97. mppsum_realdd, mppsum_a_realdd
  98. END INTERFACE
  99. INTERFACE mpp_lbc_north
  100. MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d
  101. END INTERFACE
  102. INTERFACE mpp_minloc
  103. MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d
  104. END INTERFACE
  105. INTERFACE mpp_maxloc
  106. MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d
  107. END INTERFACE
  108. INTERFACE mpp_max_multiple
  109. MODULE PROCEDURE mppmax_real_multiple
  110. END INTERFACE
  111. !! ========================= !!
  112. !! MPI variable definition !!
  113. !! ========================= !!
  114. !$AGRIF_DO_NOT_TREAT
  115. INCLUDE 'mpif.h'
  116. !$AGRIF_END_DO_NOT_TREAT
  117. LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag
  118. INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2)
  119. INTEGER :: mppsize ! number of process
  120. INTEGER :: mpprank ! process number [ 0 - size-1 ]
  121. !$AGRIF_DO_NOT_TREAT
  122. INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator
  123. !$AGRIF_END_DO_NOT_TREAT
  124. INTEGER :: MPI_SUMDD
  125. ! variables used in case of sea-ice
  126. INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd)
  127. INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)
  128. INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)
  129. INTEGER :: ndim_rank_ice ! number of 'ice' processors
  130. INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm
  131. INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice
  132. ! variables used for zonal integration
  133. INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average
  134. LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row
  135. INTEGER :: ngrp_znl ! group ID for the znl processors
  136. INTEGER :: ndim_rank_znl ! number of processors on the same zonal average
  137. INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain
  138. ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM)
  139. INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors
  140. INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors
  141. INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold)
  142. INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north
  143. INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !)
  144. INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line
  145. INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm
  146. INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north ! dimension ndim_rank_north
  147. ! Type of send : standard, buffered, immediate
  148. CHARACTER(len=1), PUBLIC :: cn_mpi_send ! type od mpi send/recieve (S=standard, B=bsend, I=isend)
  149. LOGICAL, PUBLIC :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I')
  150. INTEGER, PUBLIC :: nn_buffer ! size of the buffer in case of mpi_bsend
  151. REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend
  152. LOGICAL, PUBLIC :: ln_nnogather ! namelist control of northfold comms
  153. LOGICAL, PUBLIC :: l_north_nogather = .FALSE. ! internal control of northfold comms
  154. INTEGER, PUBLIC :: ityp
  155. !!----------------------------------------------------------------------
  156. !! NEMO/OPA 3.3 , NEMO Consortium (2010)
  157. !! $Id: lib_mpp.F90 4990 2014-12-15 16:42:49Z timgraham $
  158. !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt)
  159. !!----------------------------------------------------------------------
  160. CONTAINS
  161. FUNCTION mynode( ldtxt, ldname, kumnam_ref , kumnam_cfg , kumond , kstop, localComm )
  162. !!----------------------------------------------------------------------
  163. !! *** routine mynode ***
  164. !!
  165. !! ** Purpose : Find processor unit
  166. !!----------------------------------------------------------------------
  167. CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt
  168. CHARACTER(len=*) , INTENT(in ) :: ldname
  169. INTEGER , INTENT(in ) :: kumnam_ref ! logical unit for reference namelist
  170. INTEGER , INTENT(in ) :: kumnam_cfg ! logical unit for configuration namelist
  171. INTEGER , INTENT(inout) :: kumond ! logical unit for namelist output
  172. INTEGER , INTENT(inout) :: kstop ! stop indicator
  173. INTEGER, OPTIONAL , INTENT(in ) :: localComm
  174. !
  175. INTEGER :: mynode, ierr, code, ji, ii, ios
  176. LOGICAL :: mpi_was_called
  177. !
  178. NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij, ln_nnogather
  179. !!----------------------------------------------------------------------
  180. !
  181. ii = 1
  182. WRITE(ldtxt(ii),*) ; ii = ii + 1
  183. WRITE(ldtxt(ii),*) 'mynode : mpi initialisation' ; ii = ii + 1
  184. WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1
  185. !
  186. REWIND( kumnam_ref ) ! Namelist nammpp in reference namelist: mpi variables
  187. READ ( kumnam_ref, nammpp, IOSTAT = ios, ERR = 901)
  188. 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp )
  189. REWIND( kumnam_cfg ) ! Namelist nammpp in configuration namelist: mpi variables
  190. READ ( kumnam_cfg, nammpp, IOSTAT = ios, ERR = 902 )
  191. 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp )
  192. ! ! control print
  193. WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1
  194. WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
  195. WRITE(ldtxt(ii),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer ; ii = ii + 1
  196. #if defined key_agrif
  197. IF( .NOT. Agrif_Root() ) THEN
  198. jpni = Agrif_Parent(jpni )
  199. jpnj = Agrif_Parent(jpnj )
  200. jpnij = Agrif_Parent(jpnij)
  201. ENDIF
  202. #endif
  203. IF(jpnij < 1)THEN
  204. ! If jpnij is not specified in namelist then we calculate it - this
  205. ! means there will be no land cutting out.
  206. jpnij = jpni * jpnj
  207. END IF
  208. IF( (jpni < 1) .OR. (jpnj < 1) )THEN
  209. WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically'; ii = ii + 1
  210. ELSE
  211. WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni; ii = ii + 1
  212. WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj; ii = ii + 1
  213. WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij; ii = ii +1
  214. END IF
  215. WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1
  216. CALL mpi_initialized ( mpi_was_called, code )
  217. IF( code /= MPI_SUCCESS ) THEN
  218. DO ji = 1, SIZE(ldtxt)
  219. IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
  220. END DO
  221. WRITE(*, cform_err)
  222. WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized'
  223. CALL mpi_abort( mpi_comm_world, code, ierr )
  224. ENDIF
  225. IF( mpi_was_called ) THEN
  226. !
  227. SELECT CASE ( cn_mpi_send )
  228. CASE ( 'S' ) ! Standard mpi send (blocking)
  229. WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1
  230. CASE ( 'B' ) ! Buffer mpi send (blocking)
  231. WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1
  232. IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr )
  233. CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
  234. WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1
  235. l_isend = .TRUE.
  236. CASE DEFAULT
  237. WRITE(ldtxt(ii),cform_err) ; ii = ii + 1
  238. WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
  239. kstop = kstop + 1
  240. END SELECT
  241. ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN
  242. WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1
  243. WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1
  244. kstop = kstop + 1
  245. ELSE
  246. SELECT CASE ( cn_mpi_send )
  247. CASE ( 'S' ) ! Standard mpi send (blocking)
  248. WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1
  249. CALL mpi_init( ierr )
  250. CASE ( 'B' ) ! Buffer mpi send (blocking)
  251. WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1
  252. IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr )
  253. CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
  254. WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1
  255. l_isend = .TRUE.
  256. CALL mpi_init( ierr )
  257. CASE DEFAULT
  258. WRITE(ldtxt(ii),cform_err) ; ii = ii + 1
  259. WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1
  260. kstop = kstop + 1
  261. END SELECT
  262. !
  263. ENDIF
  264. IF( PRESENT(localComm) ) THEN
  265. IF( Agrif_Root() ) THEN
  266. mpi_comm_opa = localComm
  267. ENDIF
  268. ELSE
  269. CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code)
  270. IF( code /= MPI_SUCCESS ) THEN
  271. DO ji = 1, SIZE(ldtxt)
  272. IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
  273. END DO
  274. WRITE(*, cform_err)
  275. WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup'
  276. CALL mpi_abort( mpi_comm_world, code, ierr )
  277. ENDIF
  278. ENDIF
  279. #if defined key_agrif
  280. IF (Agrif_Root()) THEN
  281. CALL Agrif_MPI_Init(mpi_comm_opa)
  282. ELSE
  283. CALL Agrif_MPI_set_grid_comm(mpi_comm_opa)
  284. ENDIF
  285. #endif
  286. CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr )
  287. CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr )
  288. mynode = mpprank
  289. IF( mynode == 0 ) THEN
  290. CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 )
  291. WRITE(kumond, nammpp)
  292. ENDIF
  293. !
  294. CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr)
  295. !
  296. END FUNCTION mynode
  297. SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval )
  298. !!----------------------------------------------------------------------
  299. !! *** routine mpp_lnk_3d ***
  300. !!
  301. !! ** Purpose : Message passing manadgement
  302. !!
  303. !! ** Method : Use mppsend and mpprecv function for passing mask
  304. !! between processors following neighboring subdomains.
  305. !! domain parameters
  306. !! nlci : first dimension of the local subdomain
  307. !! nlcj : second dimension of the local subdomain
  308. !! nbondi : mark for "east-west local boundary"
  309. !! nbondj : mark for "north-south local boundary"
  310. !! noea : number for local neighboring processors
  311. !! nowe : number for local neighboring processors
  312. !! noso : number for local neighboring processors
  313. !! nono : number for local neighboring processors
  314. !!
  315. !! ** Action : ptab with update value at its periphery
  316. !!
  317. !!----------------------------------------------------------------------
  318. REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied
  319. CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
  320. ! ! = T , U , V , F , W points
  321. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  322. ! ! = 1. , the sign is kept
  323. CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only
  324. REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)
  325. !!
  326. INTEGER :: ji, jj, jk, jl ! dummy loop indices
  327. INTEGER :: imigr, iihom, ijhom ! temporary integers
  328. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  329. REAL(wp) :: zland
  330. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  331. !
  332. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north
  333. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east
  334. !!----------------------------------------------------------------------
  335. ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), &
  336. & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) )
  337. !
  338. IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value
  339. ELSE ; zland = 0.e0 ! zero by default
  340. ENDIF
  341. ! 1. standard boundary treatment
  342. ! ------------------------------
  343. IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values
  344. !
  345. ! WARNING ptab is defined only between nld and nle
  346. DO jk = 1, jpk
  347. DO jj = nlcj+1, jpj ! added line(s) (inner only)
  348. ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk)
  349. ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk)
  350. ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk)
  351. END DO
  352. DO ji = nlci+1, jpi ! added column(s) (full)
  353. ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk)
  354. ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk)
  355. ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk)
  356. END DO
  357. END DO
  358. !
  359. ELSE ! standard close or cyclic treatment
  360. !
  361. ! ! East-West boundaries
  362. ! !* Cyclic east-west
  363. IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  364. ptab( 1 ,:,:) = ptab(jpim1,:,:)
  365. ptab(jpi,:,:) = ptab( 2 ,:,:)
  366. ELSE !* closed
  367. IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point
  368. ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north
  369. ENDIF
  370. ! ! North-South boundaries (always closed)
  371. IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point
  372. ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north
  373. !
  374. ENDIF
  375. ! 2. East and west directions exchange
  376. ! ------------------------------------
  377. ! we play with the neigbours AND the row number because of the periodicity
  378. !
  379. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  380. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  381. iihom = nlci-nreci
  382. DO jl = 1, jpreci
  383. zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:)
  384. zt3we(:,jl,:,1) = ptab(iihom +jl,:,:)
  385. END DO
  386. END SELECT
  387. !
  388. ! ! Migrations
  389. imigr = jpreci * jpj * jpk
  390. !
  391. SELECT CASE ( nbondi )
  392. CASE ( -1 )
  393. CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 )
  394. CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )
  395. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  396. CASE ( 0 )
  397. CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )
  398. CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 )
  399. CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )
  400. CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )
  401. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  402. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  403. CASE ( 1 )
  404. CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )
  405. CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )
  406. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  407. END SELECT
  408. !
  409. ! ! Write Dirichlet lateral conditions
  410. iihom = nlci-jpreci
  411. !
  412. SELECT CASE ( nbondi )
  413. CASE ( -1 )
  414. DO jl = 1, jpreci
  415. ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)
  416. END DO
  417. CASE ( 0 )
  418. DO jl = 1, jpreci
  419. ptab(jl ,:,:) = zt3we(:,jl,:,2)
  420. ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)
  421. END DO
  422. CASE ( 1 )
  423. DO jl = 1, jpreci
  424. ptab(jl ,:,:) = zt3we(:,jl,:,2)
  425. END DO
  426. END SELECT
  427. ! 3. North and south directions
  428. ! -----------------------------
  429. ! always closed : we play only with the neigbours
  430. !
  431. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  432. ijhom = nlcj-nrecj
  433. DO jl = 1, jprecj
  434. zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:)
  435. zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:)
  436. END DO
  437. ENDIF
  438. !
  439. ! ! Migrations
  440. imigr = jprecj * jpi * jpk
  441. !
  442. SELECT CASE ( nbondj )
  443. CASE ( -1 )
  444. CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 )
  445. CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )
  446. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  447. CASE ( 0 )
  448. CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )
  449. CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 )
  450. CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )
  451. CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )
  452. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  453. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  454. CASE ( 1 )
  455. CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )
  456. CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )
  457. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  458. END SELECT
  459. !
  460. ! ! Write Dirichlet lateral conditions
  461. ijhom = nlcj-jprecj
  462. !
  463. SELECT CASE ( nbondj )
  464. CASE ( -1 )
  465. DO jl = 1, jprecj
  466. ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)
  467. END DO
  468. CASE ( 0 )
  469. DO jl = 1, jprecj
  470. ptab(:,jl ,:) = zt3sn(:,jl,:,2)
  471. ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)
  472. END DO
  473. CASE ( 1 )
  474. DO jl = 1, jprecj
  475. ptab(:,jl,:) = zt3sn(:,jl,:,2)
  476. END DO
  477. END SELECT
  478. ! 4. north fold treatment
  479. ! -----------------------
  480. !
  481. IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN
  482. !
  483. SELECT CASE ( jpni )
  484. CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp
  485. CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.
  486. END SELECT
  487. !
  488. ENDIF
  489. !
  490. DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we )
  491. !
  492. END SUBROUTINE mpp_lnk_3d
  493. SUBROUTINE mpp_lnk_2d_multiple( pt2d_array , type_array , psgn_array , num_fields , cd_mpp, pval )
  494. !!----------------------------------------------------------------------
  495. !! *** routine mpp_lnk_2d_multiple ***
  496. !!
  497. !! ** Purpose : Message passing management for multiple 2d arrays
  498. !!
  499. !! ** Method : Use mppsend and mpprecv function for passing mask
  500. !! between processors following neighboring subdomains.
  501. !! domain parameters
  502. !! nlci : first dimension of the local subdomain
  503. !! nlcj : second dimension of the local subdomain
  504. !! nbondi : mark for "east-west local boundary"
  505. !! nbondj : mark for "north-south local boundary"
  506. !! noea : number for local neighboring processors
  507. !! nowe : number for local neighboring processors
  508. !! noso : number for local neighboring processors
  509. !! nono : number for local neighboring processors
  510. !!
  511. !!----------------------------------------------------------------------
  512. INTEGER :: num_fields
  513. TYPE( arrayptr ), DIMENSION(:) :: pt2d_array
  514. CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: type_array ! define the nature of ptab array grid-points
  515. ! ! = T , U , V , F , W and I points
  516. REAL(wp) , DIMENSION(:), INTENT(in ) :: psgn_array ! =-1 the sign change across the north fold boundary
  517. ! ! = 1. , the sign is kept
  518. CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only
  519. REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)
  520. !!
  521. INTEGER :: ji, jj, jl ! dummy loop indices
  522. INTEGER :: ii !!MULTI SEND DUMMY LOOP INDICES
  523. INTEGER :: imigr, iihom, ijhom ! temporary integers
  524. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  525. REAL(wp) :: zland
  526. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  527. !
  528. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north
  529. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east
  530. !!----------------------------------------------------------------------
  531. ALLOCATE( zt2ns(jpi,jprecj,2*num_fields), zt2sn(jpi,jprecj,2*num_fields), &
  532. & zt2ew(jpj,jpreci,2*num_fields), zt2we(jpj,jpreci,2*num_fields) )
  533. !
  534. IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value
  535. ELSE ; zland = 0.e0 ! zero by default
  536. ENDIF
  537. ! 1. standard boundary treatment
  538. ! ------------------------------
  539. !
  540. !First Array
  541. DO ii = 1 , num_fields
  542. IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values
  543. !
  544. ! WARNING pt2d is defined only between nld and nle
  545. DO jj = nlcj+1, jpj ! added line(s) (inner only)
  546. pt2d_array(ii)%pt2d(nldi :nlei , jj) = pt2d_array(ii)%pt2d(nldi:nlei, nlej)
  547. pt2d_array(ii)%pt2d(1 :nldi-1, jj) = pt2d_array(ii)%pt2d(nldi , nlej)
  548. pt2d_array(ii)%pt2d(nlei+1:nlci , jj) = pt2d_array(ii)%pt2d( nlei, nlej)
  549. END DO
  550. DO ji = nlci+1, jpi ! added column(s) (full)
  551. pt2d_array(ii)%pt2d(ji, nldj :nlej ) = pt2d_array(ii)%pt2d(nlei, nldj:nlej)
  552. pt2d_array(ii)%pt2d(ji, 1 :nldj-1) = pt2d_array(ii)%pt2d(nlei, nldj )
  553. pt2d_array(ii)%pt2d(ji, nlej+1:jpj ) = pt2d_array(ii)%pt2d(nlei, nlej)
  554. END DO
  555. !
  556. ELSE ! standard close or cyclic treatment
  557. !
  558. ! ! East-West boundaries
  559. IF( nbondi == 2 .AND. & ! Cyclic east-west
  560. & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  561. pt2d_array(ii)%pt2d( 1 , : ) = pt2d_array(ii)%pt2d( jpim1, : ) ! west
  562. pt2d_array(ii)%pt2d( jpi , : ) = pt2d_array(ii)%pt2d( 2 , : ) ! east
  563. ELSE ! closed
  564. IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d( 1 : jpreci,:) = zland ! south except F-point
  565. pt2d_array(ii)%pt2d(nlci-jpreci+1 : jpi ,:) = zland ! north
  566. ENDIF
  567. ! ! North-South boundaries (always closed)
  568. IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d(:, 1:jprecj ) = zland ! south except F-point
  569. pt2d_array(ii)%pt2d(:, nlcj-jprecj+1:jpj ) = zland ! north
  570. !
  571. ENDIF
  572. END DO
  573. ! 2. East and west directions exchange
  574. ! ------------------------------------
  575. ! we play with the neigbours AND the row number because of the periodicity
  576. !
  577. DO ii = 1 , num_fields
  578. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  579. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  580. iihom = nlci-nreci
  581. DO jl = 1, jpreci
  582. zt2ew( : , jl , ii ) = pt2d_array(ii)%pt2d( jpreci+jl , : )
  583. zt2we( : , jl , ii ) = pt2d_array(ii)%pt2d( iihom +jl , : )
  584. END DO
  585. END SELECT
  586. END DO
  587. !
  588. ! ! Migrations
  589. imigr = jpreci * jpj
  590. !
  591. SELECT CASE ( nbondi )
  592. CASE ( -1 )
  593. CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req1 )
  594. CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea )
  595. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  596. CASE ( 0 )
  597. CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 )
  598. CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req2 )
  599. CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea )
  600. CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe )
  601. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  602. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  603. CASE ( 1 )
  604. CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 )
  605. CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe )
  606. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  607. END SELECT
  608. !
  609. ! ! Write Dirichlet lateral conditions
  610. iihom = nlci - jpreci
  611. !
  612. DO ii = 1 , num_fields
  613. SELECT CASE ( nbondi )
  614. CASE ( -1 )
  615. DO jl = 1, jpreci
  616. pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii)
  617. END DO
  618. CASE ( 0 )
  619. DO jl = 1, jpreci
  620. pt2d_array(ii)%pt2d( jl , : ) = zt2we(:,jl,num_fields+ii)
  621. pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii)
  622. END DO
  623. CASE ( 1 )
  624. DO jl = 1, jpreci
  625. pt2d_array(ii)%pt2d( jl , : )= zt2we(:,jl,num_fields+ii)
  626. END DO
  627. END SELECT
  628. END DO
  629. ! 3. North and south directions
  630. ! -----------------------------
  631. ! always closed : we play only with the neigbours
  632. !
  633. !First Array
  634. DO ii = 1 , num_fields
  635. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  636. ijhom = nlcj-nrecj
  637. DO jl = 1, jprecj
  638. zt2sn(:,jl , ii) = pt2d_array(ii)%pt2d( : , ijhom +jl )
  639. zt2ns(:,jl , ii) = pt2d_array(ii)%pt2d( : , jprecj+jl )
  640. END DO
  641. ENDIF
  642. END DO
  643. !
  644. ! ! Migrations
  645. imigr = jprecj * jpi
  646. !
  647. SELECT CASE ( nbondj )
  648. CASE ( -1 )
  649. CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req1 )
  650. CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono )
  651. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  652. CASE ( 0 )
  653. CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 )
  654. CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req2 )
  655. CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono )
  656. CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso )
  657. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  658. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  659. CASE ( 1 )
  660. CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 )
  661. CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso )
  662. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  663. END SELECT
  664. !
  665. ! ! Write Dirichlet lateral conditions
  666. ijhom = nlcj - jprecj
  667. !
  668. DO ii = 1 , num_fields
  669. !First Array
  670. SELECT CASE ( nbondj )
  671. CASE ( -1 )
  672. DO jl = 1, jprecj
  673. pt2d_array(ii)%pt2d( : , ijhom+jl ) = zt2ns( : , jl , num_fields+ii )
  674. END DO
  675. CASE ( 0 )
  676. DO jl = 1, jprecj
  677. pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii)
  678. pt2d_array(ii)%pt2d( : , ijhom + jl ) = zt2ns( : , jl , num_fields + ii )
  679. END DO
  680. CASE ( 1 )
  681. DO jl = 1, jprecj
  682. pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii )
  683. END DO
  684. END SELECT
  685. END DO
  686. ! 4. north fold treatment
  687. ! -----------------------
  688. !
  689. !First Array
  690. IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN
  691. !
  692. SELECT CASE ( jpni )
  693. CASE ( 1 ) ;
  694. DO ii = 1 , num_fields
  695. CALL lbc_nfd ( pt2d_array(ii)%pt2d( : , : ), type_array(ii) , psgn_array(ii) ) ! only 1 northern proc, no mpp
  696. END DO
  697. CASE DEFAULT ; CALL mpp_lbc_north_2d_multiple( pt2d_array, type_array, psgn_array, num_fields ) ! for all northern procs.
  698. END SELECT
  699. !
  700. ENDIF
  701. !
  702. DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we )
  703. !
  704. END SUBROUTINE mpp_lnk_2d_multiple
  705. SUBROUTINE load_array(pt2d,cd_type,psgn,pt2d_array, type_array, psgn_array,num_fields)
  706. !!---------------------------------------------------------------------
  707. REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2d ! Second 2D array on which the boundary condition is applied
  708. CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
  709. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  710. TYPE(arrayptr) , DIMENSION(9) :: pt2d_array
  711. CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points
  712. REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary
  713. INTEGER , INTENT (inout):: num_fields
  714. !!---------------------------------------------------------------------
  715. num_fields=num_fields+1
  716. pt2d_array(num_fields)%pt2d=>pt2d
  717. type_array(num_fields)=cd_type
  718. psgn_array(num_fields)=psgn
  719. END SUBROUTINE load_array
  720. SUBROUTINE mpp_lnk_2d_9( pt2dA, cd_typeA, psgnA, pt2dB, cd_typeB, psgnB, pt2dC, cd_typeC, psgnC &
  721. & , pt2dD, cd_typeD, psgnD, pt2dE, cd_typeE, psgnE, pt2dF, cd_typeF, psgnF &
  722. & , pt2dG, cd_typeG, psgnG, pt2dH, cd_typeH, psgnH, pt2dI, cd_typeI, psgnI, cd_mpp, pval)
  723. !!---------------------------------------------------------------------
  724. ! Second 2D array on which the boundary condition is applied
  725. REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2dA
  726. REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dB , pt2dC , pt2dD , pt2dE
  727. REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dF , pt2dG , pt2dH , pt2dI
  728. ! define the nature of ptab array grid-points
  729. CHARACTER(len=1) , INTENT(in ) :: cd_typeA
  730. CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeB , cd_typeC , cd_typeD , cd_typeE
  731. CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeF , cd_typeG , cd_typeH , cd_typeI
  732. ! =-1 the sign change across the north fold boundary
  733. REAL(wp) , INTENT(in ) :: psgnA
  734. REAL(wp) , OPTIONAL, INTENT(in ) :: psgnB , psgnC , psgnD , psgnE
  735. REAL(wp) , OPTIONAL, INTENT(in ) :: psgnF , psgnG , psgnH , psgnI
  736. CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! fill the overlap area only
  737. REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! background value (used at closed boundaries)
  738. !!
  739. TYPE(arrayptr) , DIMENSION(9) :: pt2d_array
  740. CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points
  741. ! ! = T , U , V , F , W and I points
  742. REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary
  743. INTEGER :: num_fields
  744. !!---------------------------------------------------------------------
  745. num_fields = 0
  746. !! Load the first array
  747. CALL load_array(pt2dA,cd_typeA,psgnA,pt2d_array, type_array, psgn_array,num_fields)
  748. !! Look if more arrays are added
  749. IF(PRESENT (psgnB) )CALL load_array(pt2dB,cd_typeB,psgnB,pt2d_array, type_array, psgn_array,num_fields)
  750. IF(PRESENT (psgnC) )CALL load_array(pt2dC,cd_typeC,psgnC,pt2d_array, type_array, psgn_array,num_fields)
  751. IF(PRESENT (psgnD) )CALL load_array(pt2dD,cd_typeD,psgnD,pt2d_array, type_array, psgn_array,num_fields)
  752. IF(PRESENT (psgnE) )CALL load_array(pt2dE,cd_typeE,psgnE,pt2d_array, type_array, psgn_array,num_fields)
  753. IF(PRESENT (psgnF) )CALL load_array(pt2dF,cd_typeF,psgnF,pt2d_array, type_array, psgn_array,num_fields)
  754. IF(PRESENT (psgnG) )CALL load_array(pt2dG,cd_typeG,psgnG,pt2d_array, type_array, psgn_array,num_fields)
  755. IF(PRESENT (psgnH) )CALL load_array(pt2dH,cd_typeH,psgnH,pt2d_array, type_array, psgn_array,num_fields)
  756. IF(PRESENT (psgnI) )CALL load_array(pt2dI,cd_typeI,psgnI,pt2d_array, type_array, psgn_array,num_fields)
  757. CALL mpp_lnk_2d_multiple(pt2d_array,type_array,psgn_array,num_fields,cd_mpp,pval)
  758. END SUBROUTINE mpp_lnk_2d_9
  759. SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval )
  760. !!----------------------------------------------------------------------
  761. !! *** routine mpp_lnk_2d ***
  762. !!
  763. !! ** Purpose : Message passing manadgement for 2d array
  764. !!
  765. !! ** Method : Use mppsend and mpprecv function for passing mask
  766. !! between processors following neighboring subdomains.
  767. !! domain parameters
  768. !! nlci : first dimension of the local subdomain
  769. !! nlcj : second dimension of the local subdomain
  770. !! nbondi : mark for "east-west local boundary"
  771. !! nbondj : mark for "north-south local boundary"
  772. !! noea : number for local neighboring processors
  773. !! nowe : number for local neighboring processors
  774. !! noso : number for local neighboring processors
  775. !! nono : number for local neighboring processors
  776. !!
  777. !!----------------------------------------------------------------------
  778. REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied
  779. CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
  780. ! ! = T , U , V , F , W and I points
  781. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  782. ! ! = 1. , the sign is kept
  783. CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only
  784. REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)
  785. !!
  786. INTEGER :: ji, jj, jl ! dummy loop indices
  787. INTEGER :: imigr, iihom, ijhom ! temporary integers
  788. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  789. REAL(wp) :: zland
  790. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  791. !
  792. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north
  793. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east
  794. !!----------------------------------------------------------------------
  795. ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), &
  796. & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) )
  797. !
  798. IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value
  799. ELSE ; zland = 0.e0 ! zero by default
  800. ENDIF
  801. ! 1. standard boundary treatment
  802. ! ------------------------------
  803. !
  804. IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values
  805. !
  806. ! WARNING pt2d is defined only between nld and nle
  807. DO jj = nlcj+1, jpj ! added line(s) (inner only)
  808. pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej)
  809. pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej)
  810. pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej)
  811. END DO
  812. DO ji = nlci+1, jpi ! added column(s) (full)
  813. pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej)
  814. pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj )
  815. pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej)
  816. END DO
  817. !
  818. ELSE ! standard close or cyclic treatment
  819. !
  820. ! ! East-West boundaries
  821. IF( nbondi == 2 .AND. & ! Cyclic east-west
  822. & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  823. pt2d( 1 ,:) = pt2d(jpim1,:) ! west
  824. pt2d(jpi,:) = pt2d( 2 ,:) ! east
  825. ELSE ! closed
  826. IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point
  827. pt2d(nlci-jpreci+1:jpi ,:) = zland ! north
  828. ENDIF
  829. ! ! North-South boundaries (always closed)
  830. IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point
  831. pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north
  832. !
  833. ENDIF
  834. ! 2. East and west directions exchange
  835. ! ------------------------------------
  836. ! we play with the neigbours AND the row number because of the periodicity
  837. !
  838. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  839. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  840. iihom = nlci-nreci
  841. DO jl = 1, jpreci
  842. zt2ew(:,jl,1) = pt2d(jpreci+jl,:)
  843. zt2we(:,jl,1) = pt2d(iihom +jl,:)
  844. END DO
  845. END SELECT
  846. !
  847. ! ! Migrations
  848. imigr = jpreci * jpj
  849. !
  850. SELECT CASE ( nbondi )
  851. CASE ( -1 )
  852. CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 )
  853. CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )
  854. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  855. CASE ( 0 )
  856. CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )
  857. CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 )
  858. CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )
  859. CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )
  860. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  861. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  862. CASE ( 1 )
  863. CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )
  864. CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )
  865. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  866. END SELECT
  867. !
  868. ! ! Write Dirichlet lateral conditions
  869. iihom = nlci - jpreci
  870. !
  871. SELECT CASE ( nbondi )
  872. CASE ( -1 )
  873. DO jl = 1, jpreci
  874. pt2d(iihom+jl,:) = zt2ew(:,jl,2)
  875. END DO
  876. CASE ( 0 )
  877. DO jl = 1, jpreci
  878. pt2d(jl ,:) = zt2we(:,jl,2)
  879. pt2d(iihom+jl,:) = zt2ew(:,jl,2)
  880. END DO
  881. CASE ( 1 )
  882. DO jl = 1, jpreci
  883. pt2d(jl ,:) = zt2we(:,jl,2)
  884. END DO
  885. END SELECT
  886. ! 3. North and south directions
  887. ! -----------------------------
  888. ! always closed : we play only with the neigbours
  889. !
  890. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  891. ijhom = nlcj-nrecj
  892. DO jl = 1, jprecj
  893. zt2sn(:,jl,1) = pt2d(:,ijhom +jl)
  894. zt2ns(:,jl,1) = pt2d(:,jprecj+jl)
  895. END DO
  896. ENDIF
  897. !
  898. ! ! Migrations
  899. imigr = jprecj * jpi
  900. !
  901. SELECT CASE ( nbondj )
  902. CASE ( -1 )
  903. CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 )
  904. CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )
  905. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  906. CASE ( 0 )
  907. CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )
  908. CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 )
  909. CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )
  910. CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )
  911. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  912. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  913. CASE ( 1 )
  914. CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )
  915. CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )
  916. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  917. END SELECT
  918. !
  919. ! ! Write Dirichlet lateral conditions
  920. ijhom = nlcj - jprecj
  921. !
  922. SELECT CASE ( nbondj )
  923. CASE ( -1 )
  924. DO jl = 1, jprecj
  925. pt2d(:,ijhom+jl) = zt2ns(:,jl,2)
  926. END DO
  927. CASE ( 0 )
  928. DO jl = 1, jprecj
  929. pt2d(:,jl ) = zt2sn(:,jl,2)
  930. pt2d(:,ijhom+jl) = zt2ns(:,jl,2)
  931. END DO
  932. CASE ( 1 )
  933. DO jl = 1, jprecj
  934. pt2d(:,jl ) = zt2sn(:,jl,2)
  935. END DO
  936. END SELECT
  937. ! 4. north fold treatment
  938. ! -----------------------
  939. !
  940. IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN
  941. !
  942. SELECT CASE ( jpni )
  943. CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp
  944. CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs.
  945. END SELECT
  946. !
  947. ENDIF
  948. !
  949. DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we )
  950. !
  951. END SUBROUTINE mpp_lnk_2d
  952. SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn )
  953. !!----------------------------------------------------------------------
  954. !! *** routine mpp_lnk_3d_gather ***
  955. !!
  956. !! ** Purpose : Message passing manadgement for two 3D arrays
  957. !!
  958. !! ** Method : Use mppsend and mpprecv function for passing mask
  959. !! between processors following neighboring subdomains.
  960. !! domain parameters
  961. !! nlci : first dimension of the local subdomain
  962. !! nlcj : second dimension of the local subdomain
  963. !! nbondi : mark for "east-west local boundary"
  964. !! nbondj : mark for "north-south local boundary"
  965. !! noea : number for local neighboring processors
  966. !! nowe : number for local neighboring processors
  967. !! noso : number for local neighboring processors
  968. !! nono : number for local neighboring processors
  969. !!
  970. !! ** Action : ptab1 and ptab2 with update value at its periphery
  971. !!
  972. !!----------------------------------------------------------------------
  973. REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which
  974. REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied
  975. CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays
  976. CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points
  977. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  978. !! ! = 1. , the sign is kept
  979. INTEGER :: jl ! dummy loop indices
  980. INTEGER :: imigr, iihom, ijhom ! temporary integers
  981. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  982. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  983. !
  984. REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ns, zt4sn ! 2 x 3d for north-south & south-north
  985. REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ew, zt4we ! 2 x 3d for east-west & west-east
  986. !!----------------------------------------------------------------------
  987. ALLOCATE( zt4ns(jpi,jprecj,jpk,2,2), zt4sn(jpi,jprecj,jpk,2,2) , &
  988. & zt4ew(jpj,jpreci,jpk,2,2), zt4we(jpj,jpreci,jpk,2,2) )
  989. ! 1. standard boundary treatment
  990. ! ------------------------------
  991. ! ! East-West boundaries
  992. ! !* Cyclic east-west
  993. IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  994. ptab1( 1 ,:,:) = ptab1(jpim1,:,:)
  995. ptab1(jpi,:,:) = ptab1( 2 ,:,:)
  996. ptab2( 1 ,:,:) = ptab2(jpim1,:,:)
  997. ptab2(jpi,:,:) = ptab2( 2 ,:,:)
  998. ELSE !* closed
  999. IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point
  1000. IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0
  1001. ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north
  1002. ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0
  1003. ENDIF
  1004. ! ! North-South boundaries
  1005. IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point
  1006. IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0
  1007. ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north
  1008. ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0
  1009. ! 2. East and west directions exchange
  1010. ! ------------------------------------
  1011. ! we play with the neigbours AND the row number because of the periodicity
  1012. !
  1013. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  1014. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  1015. iihom = nlci-nreci
  1016. DO jl = 1, jpreci
  1017. zt4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:)
  1018. zt4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:)
  1019. zt4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:)
  1020. zt4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:)
  1021. END DO
  1022. END SELECT
  1023. !
  1024. ! ! Migrations
  1025. imigr = jpreci * jpj * jpk *2
  1026. !
  1027. SELECT CASE ( nbondi )
  1028. CASE ( -1 )
  1029. CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req1 )
  1030. CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea )
  1031. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1032. CASE ( 0 )
  1033. CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 )
  1034. CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req2 )
  1035. CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea )
  1036. CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe )
  1037. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1038. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  1039. CASE ( 1 )
  1040. CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 )
  1041. CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe )
  1042. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1043. END SELECT
  1044. !
  1045. ! ! Write Dirichlet lateral conditions
  1046. iihom = nlci - jpreci
  1047. !
  1048. SELECT CASE ( nbondi )
  1049. CASE ( -1 )
  1050. DO jl = 1, jpreci
  1051. ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2)
  1052. ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2)
  1053. END DO
  1054. CASE ( 0 )
  1055. DO jl = 1, jpreci
  1056. ptab1(jl ,:,:) = zt4we(:,jl,:,1,2)
  1057. ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2)
  1058. ptab2(jl ,:,:) = zt4we(:,jl,:,2,2)
  1059. ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2)
  1060. END DO
  1061. CASE ( 1 )
  1062. DO jl = 1, jpreci
  1063. ptab1(jl ,:,:) = zt4we(:,jl,:,1,2)
  1064. ptab2(jl ,:,:) = zt4we(:,jl,:,2,2)
  1065. END DO
  1066. END SELECT
  1067. ! 3. North and south directions
  1068. ! -----------------------------
  1069. ! always closed : we play only with the neigbours
  1070. !
  1071. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  1072. ijhom = nlcj - nrecj
  1073. DO jl = 1, jprecj
  1074. zt4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:)
  1075. zt4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:)
  1076. zt4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:)
  1077. zt4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:)
  1078. END DO
  1079. ENDIF
  1080. !
  1081. ! ! Migrations
  1082. imigr = jprecj * jpi * jpk * 2
  1083. !
  1084. SELECT CASE ( nbondj )
  1085. CASE ( -1 )
  1086. CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req1 )
  1087. CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono )
  1088. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1089. CASE ( 0 )
  1090. CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 )
  1091. CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req2 )
  1092. CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono )
  1093. CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso )
  1094. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1095. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  1096. CASE ( 1 )
  1097. CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 )
  1098. CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso )
  1099. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  1100. END SELECT
  1101. !
  1102. ! ! Write Dirichlet lateral conditions
  1103. ijhom = nlcj - jprecj
  1104. !
  1105. SELECT CASE ( nbondj )
  1106. CASE ( -1 )
  1107. DO jl = 1, jprecj
  1108. ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2)
  1109. ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2)
  1110. END DO
  1111. CASE ( 0 )
  1112. DO jl = 1, jprecj
  1113. ptab1(:,jl ,:) = zt4sn(:,jl,:,1,2)
  1114. ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2)
  1115. ptab2(:,jl ,:) = zt4sn(:,jl,:,2,2)
  1116. ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2)
  1117. END DO
  1118. CASE ( 1 )
  1119. DO jl = 1, jprecj
  1120. ptab1(:,jl,:) = zt4sn(:,jl,:,1,2)
  1121. ptab2(:,jl,:) = zt4sn(:,jl,:,2,2)
  1122. END DO
  1123. END SELECT
  1124. ! 4. north fold treatment
  1125. ! -----------------------
  1126. IF( npolj /= 0 ) THEN
  1127. !
  1128. SELECT CASE ( jpni )
  1129. CASE ( 1 )
  1130. CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs.
  1131. CALL lbc_nfd ( ptab2, cd_type2, psgn )
  1132. CASE DEFAULT
  1133. CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs.
  1134. CALL mpp_lbc_north (ptab2, cd_type2, psgn)
  1135. END SELECT
  1136. !
  1137. ENDIF
  1138. !
  1139. DEALLOCATE( zt4ns, zt4sn, zt4ew, zt4we )
  1140. !
  1141. END SUBROUTINE mpp_lnk_3d_gather
  1142. SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn, jpri, jprj )
  1143. !!----------------------------------------------------------------------
  1144. !! *** routine mpp_lnk_2d_e ***
  1145. !!
  1146. !! ** Purpose : Message passing manadgement for 2d array (with halo)
  1147. !!
  1148. !! ** Method : Use mppsend and mpprecv function for passing mask
  1149. !! between processors following neighboring subdomains.
  1150. !! domain parameters
  1151. !! nlci : first dimension of the local subdomain
  1152. !! nlcj : second dimension of the local subdomain
  1153. !! jpri : number of rows for extra outer halo
  1154. !! jprj : number of columns for extra outer halo
  1155. !! nbondi : mark for "east-west local boundary"
  1156. !! nbondj : mark for "north-south local boundary"
  1157. !! noea : number for local neighboring processors
  1158. !! nowe : number for local neighboring processors
  1159. !! noso : number for local neighboring processors
  1160. !! nono : number for local neighboring processors
  1161. !!
  1162. !!----------------------------------------------------------------------
  1163. INTEGER , INTENT(in ) :: jpri
  1164. INTEGER , INTENT(in ) :: jprj
  1165. REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo
  1166. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points
  1167. ! ! = T , U , V , F , W and I points
  1168. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the
  1169. !! ! north boundary, = 1. otherwise
  1170. INTEGER :: jl ! dummy loop indices
  1171. INTEGER :: imigr, iihom, ijhom ! temporary integers
  1172. INTEGER :: ipreci, iprecj ! temporary integers
  1173. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  1174. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  1175. !!
  1176. REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns
  1177. REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn
  1178. REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe
  1179. REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew
  1180. !!----------------------------------------------------------------------
  1181. ipreci = jpreci + jpri ! take into account outer extra 2D overlap area
  1182. iprecj = jprecj + jprj
  1183. ! 1. standard boundary treatment
  1184. ! ------------------------------
  1185. ! Order matters Here !!!!
  1186. !
  1187. ! !* North-South boundaries (always colsed)
  1188. IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0.e0 ! south except at F-point
  1189. pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0.e0 ! north
  1190. ! ! East-West boundaries
  1191. ! !* Cyclic east-west
  1192. IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  1193. pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east
  1194. pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west
  1195. !
  1196. ELSE !* closed
  1197. IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point
  1198. pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north
  1199. ENDIF
  1200. !
  1201. ! north fold treatment
  1202. ! -----------------------
  1203. IF( npolj /= 0 ) THEN
  1204. !
  1205. SELECT CASE ( jpni )
  1206. CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj )
  1207. CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn )
  1208. END SELECT
  1209. !
  1210. ENDIF
  1211. ! 2. East and west directions exchange
  1212. ! ------------------------------------
  1213. ! we play with the neigbours AND the row number because of the periodicity
  1214. !
  1215. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  1216. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  1217. iihom = nlci-nreci-jpri
  1218. DO jl = 1, ipreci
  1219. r2dew(:,jl,1) = pt2d(jpreci+jl,:)
  1220. r2dwe(:,jl,1) = pt2d(iihom +jl,:)
  1221. END DO
  1222. END SELECT
  1223. !
  1224. ! ! Migrations
  1225. imigr = ipreci * ( jpj + 2*jprj)
  1226. !
  1227. SELECT CASE ( nbondi )
  1228. CASE ( -1 )
  1229. CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 )
  1230. CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea )
  1231. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1232. CASE ( 0 )
  1233. CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 )
  1234. CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 )
  1235. CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea )
  1236. CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe )
  1237. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1238. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  1239. CASE ( 1 )
  1240. CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 )
  1241. CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe )
  1242. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1243. END SELECT
  1244. !
  1245. ! ! Write Dirichlet lateral conditions
  1246. iihom = nlci - jpreci
  1247. !
  1248. SELECT CASE ( nbondi )
  1249. CASE ( -1 )
  1250. DO jl = 1, ipreci
  1251. pt2d(iihom+jl,:) = r2dew(:,jl,2)
  1252. END DO
  1253. CASE ( 0 )
  1254. DO jl = 1, ipreci
  1255. pt2d(jl-jpri,:) = r2dwe(:,jl,2)
  1256. pt2d( iihom+jl,:) = r2dew(:,jl,2)
  1257. END DO
  1258. CASE ( 1 )
  1259. DO jl = 1, ipreci
  1260. pt2d(jl-jpri,:) = r2dwe(:,jl,2)
  1261. END DO
  1262. END SELECT
  1263. ! 3. North and south directions
  1264. ! -----------------------------
  1265. ! always closed : we play only with the neigbours
  1266. !
  1267. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  1268. ijhom = nlcj-nrecj-jprj
  1269. DO jl = 1, iprecj
  1270. r2dsn(:,jl,1) = pt2d(:,ijhom +jl)
  1271. r2dns(:,jl,1) = pt2d(:,jprecj+jl)
  1272. END DO
  1273. ENDIF
  1274. !
  1275. ! ! Migrations
  1276. imigr = iprecj * ( jpi + 2*jpri )
  1277. !
  1278. SELECT CASE ( nbondj )
  1279. CASE ( -1 )
  1280. CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 )
  1281. CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono )
  1282. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1283. CASE ( 0 )
  1284. CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 )
  1285. CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 )
  1286. CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono )
  1287. CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso )
  1288. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1289. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  1290. CASE ( 1 )
  1291. CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 )
  1292. CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso )
  1293. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  1294. END SELECT
  1295. !
  1296. ! ! Write Dirichlet lateral conditions
  1297. ijhom = nlcj - jprecj
  1298. !
  1299. SELECT CASE ( nbondj )
  1300. CASE ( -1 )
  1301. DO jl = 1, iprecj
  1302. pt2d(:,ijhom+jl) = r2dns(:,jl,2)
  1303. END DO
  1304. CASE ( 0 )
  1305. DO jl = 1, iprecj
  1306. pt2d(:,jl-jprj) = r2dsn(:,jl,2)
  1307. pt2d(:,ijhom+jl ) = r2dns(:,jl,2)
  1308. END DO
  1309. CASE ( 1 )
  1310. DO jl = 1, iprecj
  1311. pt2d(:,jl-jprj) = r2dsn(:,jl,2)
  1312. END DO
  1313. END SELECT
  1314. END SUBROUTINE mpp_lnk_2d_e
  1315. SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req )
  1316. !!----------------------------------------------------------------------
  1317. !! *** routine mppsend ***
  1318. !!
  1319. !! ** Purpose : Send messag passing array
  1320. !!
  1321. !!----------------------------------------------------------------------
  1322. REAL(wp), INTENT(inout) :: pmess(*) ! array of real
  1323. INTEGER , INTENT(in ) :: kbytes ! size of the array pmess
  1324. INTEGER , INTENT(in ) :: kdest ! receive process number
  1325. INTEGER , INTENT(in ) :: ktyp ! tag of the message
  1326. INTEGER , INTENT(in ) :: md_req ! argument for isend
  1327. !!
  1328. INTEGER :: iflag
  1329. !!----------------------------------------------------------------------
  1330. !
  1331. SELECT CASE ( cn_mpi_send )
  1332. CASE ( 'S' ) ! Standard mpi send (blocking)
  1333. CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag )
  1334. CASE ( 'B' ) ! Buffer mpi send (blocking)
  1335. CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag )
  1336. CASE ( 'I' ) ! Immediate mpi send (non-blocking send)
  1337. ! be carefull, one more argument here : the mpi request identifier..
  1338. CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa, md_req, iflag )
  1339. END SELECT
  1340. !
  1341. END SUBROUTINE mppsend
  1342. SUBROUTINE mpprecv( ktyp, pmess, kbytes, ksource )
  1343. !!----------------------------------------------------------------------
  1344. !! *** routine mpprecv ***
  1345. !!
  1346. !! ** Purpose : Receive messag passing array
  1347. !!
  1348. !!----------------------------------------------------------------------
  1349. REAL(wp), INTENT(inout) :: pmess(*) ! array of real
  1350. INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess
  1351. INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message
  1352. INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number
  1353. !!
  1354. INTEGER :: istatus(mpi_status_size)
  1355. INTEGER :: iflag
  1356. INTEGER :: use_source
  1357. !!----------------------------------------------------------------------
  1358. !
  1359. ! If a specific process number has been passed to the receive call,
  1360. ! use that one. Default is to use mpi_any_source
  1361. use_source=mpi_any_source
  1362. if(present(ksource)) then
  1363. use_source=ksource
  1364. end if
  1365. CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_opa, istatus, iflag )
  1366. !
  1367. END SUBROUTINE mpprecv
  1368. SUBROUTINE mppgather( ptab, kp, pio )
  1369. !!----------------------------------------------------------------------
  1370. !! *** routine mppgather ***
  1371. !!
  1372. !! ** Purpose : Transfert between a local subdomain array and a work
  1373. !! array which is distributed following the vertical level.
  1374. !!
  1375. !!----------------------------------------------------------------------
  1376. REAL(wp), DIMENSION(jpi,jpj), INTENT(in ) :: ptab ! subdomain input array
  1377. INTEGER , INTENT(in ) :: kp ! record length
  1378. REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out) :: pio ! subdomain input array
  1379. !!
  1380. INTEGER :: itaille, ierror ! temporary integer
  1381. !!---------------------------------------------------------------------
  1382. !
  1383. itaille = jpi * jpj
  1384. CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , &
  1385. & mpi_double_precision, kp , mpi_comm_opa, ierror )
  1386. !
  1387. END SUBROUTINE mppgather
  1388. SUBROUTINE mppscatter( pio, kp, ptab )
  1389. !!----------------------------------------------------------------------
  1390. !! *** routine mppscatter ***
  1391. !!
  1392. !! ** Purpose : Transfert between awork array which is distributed
  1393. !! following the vertical level and the local subdomain array.
  1394. !!
  1395. !!----------------------------------------------------------------------
  1396. REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array
  1397. INTEGER :: kp ! Tag (not used with MPI
  1398. REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input
  1399. !!
  1400. INTEGER :: itaille, ierror ! temporary integer
  1401. !!---------------------------------------------------------------------
  1402. !
  1403. itaille=jpi*jpj
  1404. !
  1405. CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , &
  1406. & mpi_double_precision, kp , mpi_comm_opa, ierror )
  1407. !
  1408. END SUBROUTINE mppscatter
  1409. SUBROUTINE mppmax_a_int( ktab, kdim, kcom )
  1410. !!----------------------------------------------------------------------
  1411. !! *** routine mppmax_a_int ***
  1412. !!
  1413. !! ** Purpose : Find maximum value in an integer layout array
  1414. !!
  1415. !!----------------------------------------------------------------------
  1416. INTEGER , INTENT(in ) :: kdim ! size of array
  1417. INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array
  1418. INTEGER , INTENT(in ), OPTIONAL :: kcom !
  1419. !!
  1420. INTEGER :: ierror, localcomm ! temporary integer
  1421. INTEGER, DIMENSION(kdim) :: iwork
  1422. !!----------------------------------------------------------------------
  1423. !
  1424. localcomm = mpi_comm_opa
  1425. IF( PRESENT(kcom) ) localcomm = kcom
  1426. !
  1427. CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror )
  1428. !
  1429. ktab(:) = iwork(:)
  1430. !
  1431. END SUBROUTINE mppmax_a_int
  1432. SUBROUTINE mppmax_int( ktab, kcom )
  1433. !!----------------------------------------------------------------------
  1434. !! *** routine mppmax_int ***
  1435. !!
  1436. !! ** Purpose : Find maximum value in an integer layout array
  1437. !!
  1438. !!----------------------------------------------------------------------
  1439. INTEGER, INTENT(inout) :: ktab ! ???
  1440. INTEGER, INTENT(in ), OPTIONAL :: kcom ! ???
  1441. !!
  1442. INTEGER :: ierror, iwork, localcomm ! temporary integer
  1443. !!----------------------------------------------------------------------
  1444. !
  1445. localcomm = mpi_comm_opa
  1446. IF( PRESENT(kcom) ) localcomm = kcom
  1447. !
  1448. CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror)
  1449. !
  1450. ktab = iwork
  1451. !
  1452. END SUBROUTINE mppmax_int
  1453. SUBROUTINE mppmin_a_int( ktab, kdim, kcom )
  1454. !!----------------------------------------------------------------------
  1455. !! *** routine mppmin_a_int ***
  1456. !!
  1457. !! ** Purpose : Find minimum value in an integer layout array
  1458. !!
  1459. !!----------------------------------------------------------------------
  1460. INTEGER , INTENT( in ) :: kdim ! size of array
  1461. INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array
  1462. INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array
  1463. !!
  1464. INTEGER :: ierror, localcomm ! temporary integer
  1465. INTEGER, DIMENSION(kdim) :: iwork
  1466. !!----------------------------------------------------------------------
  1467. !
  1468. localcomm = mpi_comm_opa
  1469. IF( PRESENT(kcom) ) localcomm = kcom
  1470. !
  1471. CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror )
  1472. !
  1473. ktab(:) = iwork(:)
  1474. !
  1475. END SUBROUTINE mppmin_a_int
  1476. SUBROUTINE mppmin_int( ktab, kcom )
  1477. !!----------------------------------------------------------------------
  1478. !! *** routine mppmin_int ***
  1479. !!
  1480. !! ** Purpose : Find minimum value in an integer layout array
  1481. !!
  1482. !!----------------------------------------------------------------------
  1483. INTEGER, INTENT(inout) :: ktab ! ???
  1484. INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array
  1485. !!
  1486. INTEGER :: ierror, iwork, localcomm
  1487. !!----------------------------------------------------------------------
  1488. !
  1489. localcomm = mpi_comm_opa
  1490. IF( PRESENT(kcom) ) localcomm = kcom
  1491. !
  1492. CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror )
  1493. !
  1494. ktab = iwork
  1495. !
  1496. END SUBROUTINE mppmin_int
  1497. SUBROUTINE mppsum_a_int( ktab, kdim )
  1498. !!----------------------------------------------------------------------
  1499. !! *** routine mppsum_a_int ***
  1500. !!
  1501. !! ** Purpose : Global integer sum, 1D array case
  1502. !!
  1503. !!----------------------------------------------------------------------
  1504. INTEGER, INTENT(in ) :: kdim ! ???
  1505. INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ???
  1506. !!
  1507. INTEGER :: ierror
  1508. INTEGER, DIMENSION (kdim) :: iwork
  1509. !!----------------------------------------------------------------------
  1510. !
  1511. CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror )
  1512. !
  1513. ktab(:) = iwork(:)
  1514. !
  1515. END SUBROUTINE mppsum_a_int
  1516. SUBROUTINE mppsum_int( ktab )
  1517. !!----------------------------------------------------------------------
  1518. !! *** routine mppsum_int ***
  1519. !!
  1520. !! ** Purpose : Global integer sum
  1521. !!
  1522. !!----------------------------------------------------------------------
  1523. INTEGER, INTENT(inout) :: ktab
  1524. !!
  1525. INTEGER :: ierror, iwork
  1526. !!----------------------------------------------------------------------
  1527. !
  1528. CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror )
  1529. !
  1530. ktab = iwork
  1531. !
  1532. END SUBROUTINE mppsum_int
  1533. SUBROUTINE mppmax_a_real( ptab, kdim, kcom )
  1534. !!----------------------------------------------------------------------
  1535. !! *** routine mppmax_a_real ***
  1536. !!
  1537. !! ** Purpose : Maximum
  1538. !!
  1539. !!----------------------------------------------------------------------
  1540. INTEGER , INTENT(in ) :: kdim
  1541. REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab
  1542. INTEGER , INTENT(in ), OPTIONAL :: kcom
  1543. !!
  1544. INTEGER :: ierror, localcomm
  1545. REAL(wp), DIMENSION(kdim) :: zwork
  1546. !!----------------------------------------------------------------------
  1547. !
  1548. localcomm = mpi_comm_opa
  1549. IF( PRESENT(kcom) ) localcomm = kcom
  1550. !
  1551. CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror )
  1552. ptab(:) = zwork(:)
  1553. !
  1554. END SUBROUTINE mppmax_a_real
  1555. SUBROUTINE mppmax_real( ptab, kcom )
  1556. !!----------------------------------------------------------------------
  1557. !! *** routine mppmax_real ***
  1558. !!
  1559. !! ** Purpose : Maximum
  1560. !!
  1561. !!----------------------------------------------------------------------
  1562. REAL(wp), INTENT(inout) :: ptab ! ???
  1563. INTEGER , INTENT(in ), OPTIONAL :: kcom ! ???
  1564. !!
  1565. INTEGER :: ierror, localcomm
  1566. REAL(wp) :: zwork
  1567. !!----------------------------------------------------------------------
  1568. !
  1569. localcomm = mpi_comm_opa
  1570. IF( PRESENT(kcom) ) localcomm = kcom
  1571. !
  1572. CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror )
  1573. ptab = zwork
  1574. !
  1575. END SUBROUTINE mppmax_real
  1576. SUBROUTINE mppmax_real_multiple( ptab, NUM , kcom )
  1577. !!----------------------------------------------------------------------
  1578. !! *** routine mppmax_real ***
  1579. !!
  1580. !! ** Purpose : Maximum
  1581. !!
  1582. !!----------------------------------------------------------------------
  1583. REAL(wp), DIMENSION(:) , INTENT(inout) :: ptab ! ???
  1584. INTEGER , INTENT(in ) :: NUM
  1585. INTEGER , INTENT(in ), OPTIONAL :: kcom ! ???
  1586. !!
  1587. INTEGER :: ierror, localcomm
  1588. REAL(wp) , POINTER , DIMENSION(:) :: zwork
  1589. !!----------------------------------------------------------------------
  1590. !
  1591. CALL wrk_alloc(NUM , zwork)
  1592. localcomm = mpi_comm_opa
  1593. IF( PRESENT(kcom) ) localcomm = kcom
  1594. !
  1595. CALL mpi_allreduce( ptab, zwork, NUM, mpi_double_precision, mpi_max, localcomm, ierror )
  1596. ptab = zwork
  1597. CALL wrk_dealloc(NUM , zwork)
  1598. !
  1599. END SUBROUTINE mppmax_real_multiple
  1600. SUBROUTINE mppmin_a_real( ptab, kdim, kcom )
  1601. !!----------------------------------------------------------------------
  1602. !! *** routine mppmin_a_real ***
  1603. !!
  1604. !! ** Purpose : Minimum of REAL, array case
  1605. !!
  1606. !!-----------------------------------------------------------------------
  1607. INTEGER , INTENT(in ) :: kdim
  1608. REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab
  1609. INTEGER , INTENT(in ), OPTIONAL :: kcom
  1610. !!
  1611. INTEGER :: ierror, localcomm
  1612. REAL(wp), DIMENSION(kdim) :: zwork
  1613. !!-----------------------------------------------------------------------
  1614. !
  1615. localcomm = mpi_comm_opa
  1616. IF( PRESENT(kcom) ) localcomm = kcom
  1617. !
  1618. CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror )
  1619. ptab(:) = zwork(:)
  1620. !
  1621. END SUBROUTINE mppmin_a_real
  1622. SUBROUTINE mppmin_real( ptab, kcom )
  1623. !!----------------------------------------------------------------------
  1624. !! *** routine mppmin_real ***
  1625. !!
  1626. !! ** Purpose : minimum of REAL, scalar case
  1627. !!
  1628. !!-----------------------------------------------------------------------
  1629. REAL(wp), INTENT(inout) :: ptab !
  1630. INTEGER , INTENT(in ), OPTIONAL :: kcom
  1631. !!
  1632. INTEGER :: ierror
  1633. REAL(wp) :: zwork
  1634. INTEGER :: localcomm
  1635. !!-----------------------------------------------------------------------
  1636. !
  1637. localcomm = mpi_comm_opa
  1638. IF( PRESENT(kcom) ) localcomm = kcom
  1639. !
  1640. CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror )
  1641. ptab = zwork
  1642. !
  1643. END SUBROUTINE mppmin_real
  1644. SUBROUTINE mppsum_a_real( ptab, kdim, kcom )
  1645. !!----------------------------------------------------------------------
  1646. !! *** routine mppsum_a_real ***
  1647. !!
  1648. !! ** Purpose : global sum, REAL ARRAY argument case
  1649. !!
  1650. !!-----------------------------------------------------------------------
  1651. INTEGER , INTENT( in ) :: kdim ! size of ptab
  1652. REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array
  1653. INTEGER , INTENT( in ), OPTIONAL :: kcom
  1654. !!
  1655. INTEGER :: ierror ! temporary integer
  1656. INTEGER :: localcomm
  1657. REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace
  1658. !!-----------------------------------------------------------------------
  1659. !
  1660. localcomm = mpi_comm_opa
  1661. IF( PRESENT(kcom) ) localcomm = kcom
  1662. !
  1663. CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror )
  1664. ptab(:) = zwork(:)
  1665. !
  1666. END SUBROUTINE mppsum_a_real
  1667. SUBROUTINE mppsum_real( ptab, kcom )
  1668. !!----------------------------------------------------------------------
  1669. !! *** routine mppsum_real ***
  1670. !!
  1671. !! ** Purpose : global sum, SCALAR argument case
  1672. !!
  1673. !!-----------------------------------------------------------------------
  1674. REAL(wp), INTENT(inout) :: ptab ! input scalar
  1675. INTEGER , INTENT(in ), OPTIONAL :: kcom
  1676. !!
  1677. INTEGER :: ierror, localcomm
  1678. REAL(wp) :: zwork
  1679. !!-----------------------------------------------------------------------
  1680. !
  1681. localcomm = mpi_comm_opa
  1682. IF( PRESENT(kcom) ) localcomm = kcom
  1683. !
  1684. CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror )
  1685. ptab = zwork
  1686. !
  1687. END SUBROUTINE mppsum_real
  1688. SUBROUTINE mppsum_realdd( ytab, kcom )
  1689. !!----------------------------------------------------------------------
  1690. !! *** routine mppsum_realdd ***
  1691. !!
  1692. !! ** Purpose : global sum in Massively Parallel Processing
  1693. !! SCALAR argument case for double-double precision
  1694. !!
  1695. !!-----------------------------------------------------------------------
  1696. COMPLEX(wp), INTENT(inout) :: ytab ! input scalar
  1697. INTEGER , INTENT( in ), OPTIONAL :: kcom
  1698. !! * Local variables (MPI version)
  1699. INTEGER :: ierror
  1700. INTEGER :: localcomm
  1701. COMPLEX(wp) :: zwork
  1702. localcomm = mpi_comm_opa
  1703. IF( PRESENT(kcom) ) localcomm = kcom
  1704. ! reduce local sums into global sum
  1705. CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, &
  1706. MPI_SUMDD,localcomm,ierror)
  1707. ytab = zwork
  1708. END SUBROUTINE mppsum_realdd
  1709. SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom )
  1710. !!----------------------------------------------------------------------
  1711. !! *** routine mppsum_a_realdd ***
  1712. !!
  1713. !! ** Purpose : global sum in Massively Parallel Processing
  1714. !! COMPLEX ARRAY case for double-double precision
  1715. !!
  1716. !!-----------------------------------------------------------------------
  1717. INTEGER , INTENT( in ) :: kdim ! size of ytab
  1718. COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array
  1719. INTEGER , INTENT( in ), OPTIONAL :: kcom
  1720. !! * Local variables (MPI version)
  1721. INTEGER :: ierror ! temporary integer
  1722. INTEGER :: localcomm
  1723. COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace
  1724. localcomm = mpi_comm_opa
  1725. IF( PRESENT(kcom) ) localcomm = kcom
  1726. CALL MPI_ALLREDUCE (ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, &
  1727. MPI_SUMDD,localcomm,ierror)
  1728. ytab(:) = zwork(:)
  1729. END SUBROUTINE mppsum_a_realdd
  1730. SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj )
  1731. !!------------------------------------------------------------------------
  1732. !! *** routine mpp_minloc ***
  1733. !!
  1734. !! ** Purpose : Compute the global minimum of an array ptab
  1735. !! and also give its global position
  1736. !!
  1737. !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
  1738. !!
  1739. !!--------------------------------------------------------------------------
  1740. REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array
  1741. REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask
  1742. REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab
  1743. INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame
  1744. !!
  1745. INTEGER , DIMENSION(2) :: ilocs
  1746. INTEGER :: ierror
  1747. REAL(wp) :: zmin ! local minimum
  1748. REAL(wp), DIMENSION(2,1) :: zain, zaout
  1749. !!-----------------------------------------------------------------------
  1750. !
  1751. zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 )
  1752. ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 )
  1753. !
  1754. ki = ilocs(1) + nimpp - 1
  1755. kj = ilocs(2) + njmpp - 1
  1756. !
  1757. zain(1,:)=zmin
  1758. zain(2,:)=ki+10000.*kj
  1759. !
  1760. CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror)
  1761. !
  1762. pmin = zaout(1,1)
  1763. kj = INT(zaout(2,1)/10000.)
  1764. ki = INT(zaout(2,1) - 10000.*kj )
  1765. !
  1766. END SUBROUTINE mpp_minloc2d
  1767. SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk)
  1768. !!------------------------------------------------------------------------
  1769. !! *** routine mpp_minloc ***
  1770. !!
  1771. !! ** Purpose : Compute the global minimum of an array ptab
  1772. !! and also give its global position
  1773. !!
  1774. !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
  1775. !!
  1776. !!--------------------------------------------------------------------------
  1777. REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array
  1778. REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask
  1779. REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab
  1780. INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame
  1781. !!
  1782. INTEGER :: ierror
  1783. REAL(wp) :: zmin ! local minimum
  1784. INTEGER , DIMENSION(3) :: ilocs
  1785. REAL(wp), DIMENSION(2,1) :: zain, zaout
  1786. !!-----------------------------------------------------------------------
  1787. !
  1788. zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 )
  1789. ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 )
  1790. !
  1791. ki = ilocs(1) + nimpp - 1
  1792. kj = ilocs(2) + njmpp - 1
  1793. kk = ilocs(3)
  1794. !
  1795. zain(1,:)=zmin
  1796. zain(2,:)=ki+10000.*kj+100000000.*kk
  1797. !
  1798. CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror)
  1799. !
  1800. pmin = zaout(1,1)
  1801. kk = INT( zaout(2,1) / 100000000. )
  1802. kj = INT( zaout(2,1) - kk * 100000000. ) / 10000
  1803. ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. )
  1804. !
  1805. END SUBROUTINE mpp_minloc3d
  1806. SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj )
  1807. !!------------------------------------------------------------------------
  1808. !! *** routine mpp_maxloc ***
  1809. !!
  1810. !! ** Purpose : Compute the global maximum of an array ptab
  1811. !! and also give its global position
  1812. !!
  1813. !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
  1814. !!
  1815. !!--------------------------------------------------------------------------
  1816. REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array
  1817. REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask
  1818. REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab
  1819. INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame
  1820. !!
  1821. INTEGER :: ierror
  1822. INTEGER, DIMENSION (2) :: ilocs
  1823. REAL(wp) :: zmax ! local maximum
  1824. REAL(wp), DIMENSION(2,1) :: zain, zaout
  1825. !!-----------------------------------------------------------------------
  1826. !
  1827. zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 )
  1828. ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 )
  1829. !
  1830. ki = ilocs(1) + nimpp - 1
  1831. kj = ilocs(2) + njmpp - 1
  1832. !
  1833. zain(1,:) = zmax
  1834. zain(2,:) = ki + 10000. * kj
  1835. !
  1836. CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror)
  1837. !
  1838. pmax = zaout(1,1)
  1839. kj = INT( zaout(2,1) / 10000. )
  1840. ki = INT( zaout(2,1) - 10000.* kj )
  1841. !
  1842. END SUBROUTINE mpp_maxloc2d
  1843. SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk )
  1844. !!------------------------------------------------------------------------
  1845. !! *** routine mpp_maxloc ***
  1846. !!
  1847. !! ** Purpose : Compute the global maximum of an array ptab
  1848. !! and also give its global position
  1849. !!
  1850. !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC
  1851. !!
  1852. !!--------------------------------------------------------------------------
  1853. REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array
  1854. REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask
  1855. REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab
  1856. INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame
  1857. !!
  1858. REAL(wp) :: zmax ! local maximum
  1859. REAL(wp), DIMENSION(2,1) :: zain, zaout
  1860. INTEGER , DIMENSION(3) :: ilocs
  1861. INTEGER :: ierror
  1862. !!-----------------------------------------------------------------------
  1863. !
  1864. zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 )
  1865. ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 )
  1866. !
  1867. ki = ilocs(1) + nimpp - 1
  1868. kj = ilocs(2) + njmpp - 1
  1869. kk = ilocs(3)
  1870. !
  1871. zain(1,:)=zmax
  1872. zain(2,:)=ki+10000.*kj+100000000.*kk
  1873. !
  1874. CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror)
  1875. !
  1876. pmax = zaout(1,1)
  1877. kk = INT( zaout(2,1) / 100000000. )
  1878. kj = INT( zaout(2,1) - kk * 100000000. ) / 10000
  1879. ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. )
  1880. !
  1881. END SUBROUTINE mpp_maxloc3d
  1882. SUBROUTINE mppsync()
  1883. !!----------------------------------------------------------------------
  1884. !! *** routine mppsync ***
  1885. !!
  1886. !! ** Purpose : Massively parallel processors, synchroneous
  1887. !!
  1888. !!-----------------------------------------------------------------------
  1889. INTEGER :: ierror
  1890. !!-----------------------------------------------------------------------
  1891. !
  1892. CALL mpi_barrier( mpi_comm_opa, ierror )
  1893. !
  1894. END SUBROUTINE mppsync
  1895. SUBROUTINE mppstop
  1896. !!----------------------------------------------------------------------
  1897. !! *** routine mppstop ***
  1898. !!
  1899. !! ** purpose : Stop massively parallel processors method
  1900. !!
  1901. !!----------------------------------------------------------------------
  1902. INTEGER :: info
  1903. !!----------------------------------------------------------------------
  1904. !
  1905. CALL mppsync
  1906. CALL mpi_finalize( info )
  1907. !
  1908. END SUBROUTINE mppstop
  1909. SUBROUTINE mpp_comm_free( kcom )
  1910. !!----------------------------------------------------------------------
  1911. !!----------------------------------------------------------------------
  1912. INTEGER, INTENT(in) :: kcom
  1913. !!
  1914. INTEGER :: ierr
  1915. !!----------------------------------------------------------------------
  1916. !
  1917. CALL MPI_COMM_FREE(kcom, ierr)
  1918. !
  1919. END SUBROUTINE mpp_comm_free
  1920. SUBROUTINE mpp_ini_ice( pindic, kumout )
  1921. !!----------------------------------------------------------------------
  1922. !! *** routine mpp_ini_ice ***
  1923. !!
  1924. !! ** Purpose : Initialize special communicator for ice areas
  1925. !! condition together with global variables needed in the ddmpp folding
  1926. !!
  1927. !! ** Method : - Look for ice processors in ice routines
  1928. !! - Put their number in nrank_ice
  1929. !! - Create groups for the world processors and the ice processors
  1930. !! - Create a communicator for ice processors
  1931. !!
  1932. !! ** output
  1933. !! njmppmax = njmpp for northern procs
  1934. !! ndim_rank_ice = number of processors with ice
  1935. !! nrank_ice (ndim_rank_ice) = ice processors
  1936. !! ngrp_iworld = group ID for the world processors
  1937. !! ngrp_ice = group ID for the ice processors
  1938. !! ncomm_ice = communicator for the ice procs.
  1939. !! n_ice_root = number (in the world) of proc 0 in the ice comm.
  1940. !!
  1941. !!----------------------------------------------------------------------
  1942. INTEGER, INTENT(in) :: pindic
  1943. INTEGER, INTENT(in) :: kumout ! ocean.output logical unit
  1944. !!
  1945. INTEGER :: jjproc
  1946. INTEGER :: ii, ierr
  1947. INTEGER, ALLOCATABLE, DIMENSION(:) :: kice
  1948. INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork
  1949. !!----------------------------------------------------------------------
  1950. !
  1951. ! Since this is just an init routine and these arrays are of length jpnij
  1952. ! then don't use wrk_nemo module - just allocate and deallocate.
  1953. ALLOCATE( kice(jpnij), zwork(jpnij), STAT=ierr )
  1954. IF( ierr /= 0 ) THEN
  1955. WRITE(kumout, cform_err)
  1956. WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)'
  1957. CALL mppstop
  1958. ENDIF
  1959. ! Look for how many procs with sea-ice
  1960. !
  1961. kice = 0
  1962. DO jjproc = 1, jpnij
  1963. IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1
  1964. END DO
  1965. !
  1966. zwork = 0
  1967. CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr )
  1968. ndim_rank_ice = SUM( zwork )
  1969. ! Allocate the right size to nrank_north
  1970. IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice )
  1971. ALLOCATE( nrank_ice(ndim_rank_ice) )
  1972. !
  1973. ii = 0
  1974. nrank_ice = 0
  1975. DO jjproc = 1, jpnij
  1976. IF( zwork(jjproc) == 1) THEN
  1977. ii = ii + 1
  1978. nrank_ice(ii) = jjproc -1
  1979. ENDIF
  1980. END DO
  1981. ! Create the world group
  1982. CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_iworld, ierr )
  1983. ! Create the ice group from the world group
  1984. CALL MPI_GROUP_INCL( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr )
  1985. ! Create the ice communicator , ie the pool of procs with sea-ice
  1986. CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr )
  1987. ! Find proc number in the world of proc 0 in the north
  1988. ! The following line seems to be useless, we just comment & keep it as reminder
  1989. ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr)
  1990. !
  1991. CALL MPI_GROUP_FREE(ngrp_ice, ierr)
  1992. CALL MPI_GROUP_FREE(ngrp_iworld, ierr)
  1993. DEALLOCATE(kice, zwork)
  1994. !
  1995. END SUBROUTINE mpp_ini_ice
  1996. SUBROUTINE mpp_ini_znl( kumout )
  1997. !!----------------------------------------------------------------------
  1998. !! *** routine mpp_ini_znl ***
  1999. !!
  2000. !! ** Purpose : Initialize special communicator for computing zonal sum
  2001. !!
  2002. !! ** Method : - Look for processors in the same row
  2003. !! - Put their number in nrank_znl
  2004. !! - Create group for the znl processors
  2005. !! - Create a communicator for znl processors
  2006. !! - Determine if processor should write znl files
  2007. !!
  2008. !! ** output
  2009. !! ndim_rank_znl = number of processors on the same row
  2010. !! ngrp_znl = group ID for the znl processors
  2011. !! ncomm_znl = communicator for the ice procs.
  2012. !! n_znl_root = number (in the world) of proc 0 in the ice comm.
  2013. !!
  2014. !!----------------------------------------------------------------------
  2015. INTEGER, INTENT(in) :: kumout ! ocean.output logical units
  2016. !
  2017. INTEGER :: jproc ! dummy loop integer
  2018. INTEGER :: ierr, ii ! local integer
  2019. INTEGER, ALLOCATABLE, DIMENSION(:) :: kwork
  2020. !!----------------------------------------------------------------------
  2021. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world
  2022. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world
  2023. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_opa : ', mpi_comm_opa
  2024. !
  2025. ALLOCATE( kwork(jpnij), STAT=ierr )
  2026. IF( ierr /= 0 ) THEN
  2027. WRITE(kumout, cform_err)
  2028. WRITE(kumout,*) 'mpp_ini_znl : failed to allocate 1D array of length jpnij'
  2029. CALL mppstop
  2030. ENDIF
  2031. IF( jpnj == 1 ) THEN
  2032. ngrp_znl = ngrp_world
  2033. ncomm_znl = mpi_comm_opa
  2034. ELSE
  2035. !
  2036. CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_opa, ierr )
  2037. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork
  2038. !-$$ CALL flush(numout)
  2039. !
  2040. ! Count number of processors on the same row
  2041. ndim_rank_znl = 0
  2042. DO jproc=1,jpnij
  2043. IF ( kwork(jproc) == njmpp ) THEN
  2044. ndim_rank_znl = ndim_rank_znl + 1
  2045. ENDIF
  2046. END DO
  2047. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ndim_rank_znl : ', ndim_rank_znl
  2048. !-$$ CALL flush(numout)
  2049. ! Allocate the right size to nrank_znl
  2050. IF (ALLOCATED (nrank_znl)) DEALLOCATE(nrank_znl)
  2051. ALLOCATE(nrank_znl(ndim_rank_znl))
  2052. ii = 0
  2053. nrank_znl (:) = 0
  2054. DO jproc=1,jpnij
  2055. IF ( kwork(jproc) == njmpp) THEN
  2056. ii = ii + 1
  2057. nrank_znl(ii) = jproc -1
  2058. ENDIF
  2059. END DO
  2060. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - nrank_znl : ', nrank_znl
  2061. !-$$ CALL flush(numout)
  2062. ! Create the opa group
  2063. CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_opa,ierr)
  2064. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa
  2065. !-$$ CALL flush(numout)
  2066. ! Create the znl group from the opa group
  2067. CALL MPI_GROUP_INCL ( ngrp_opa, ndim_rank_znl, nrank_znl, ngrp_znl, ierr )
  2068. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_znl ', ngrp_znl
  2069. !-$$ CALL flush(numout)
  2070. ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row
  2071. CALL MPI_COMM_CREATE ( mpi_comm_opa, ngrp_znl, ncomm_znl, ierr )
  2072. !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl
  2073. !-$$ CALL flush(numout)
  2074. !
  2075. END IF
  2076. ! Determines if processor if the first (starting from i=1) on the row
  2077. IF ( jpni == 1 ) THEN
  2078. l_znl_root = .TRUE.
  2079. ELSE
  2080. l_znl_root = .FALSE.
  2081. kwork (1) = nimpp
  2082. CALL mpp_min ( kwork(1), kcom = ncomm_znl)
  2083. IF ( nimpp == kwork(1)) l_znl_root = .TRUE.
  2084. END IF
  2085. DEALLOCATE(kwork)
  2086. END SUBROUTINE mpp_ini_znl
  2087. SUBROUTINE mpp_ini_north
  2088. !!----------------------------------------------------------------------
  2089. !! *** routine mpp_ini_north ***
  2090. !!
  2091. !! ** Purpose : Initialize special communicator for north folding
  2092. !! condition together with global variables needed in the mpp folding
  2093. !!
  2094. !! ** Method : - Look for northern processors
  2095. !! - Put their number in nrank_north
  2096. !! - Create groups for the world processors and the north processors
  2097. !! - Create a communicator for northern processors
  2098. !!
  2099. !! ** output
  2100. !! njmppmax = njmpp for northern procs
  2101. !! ndim_rank_north = number of processors in the northern line
  2102. !! nrank_north (ndim_rank_north) = number of the northern procs.
  2103. !! ngrp_world = group ID for the world processors
  2104. !! ngrp_north = group ID for the northern processors
  2105. !! ncomm_north = communicator for the northern procs.
  2106. !! north_root = number (in the world) of proc 0 in the northern comm.
  2107. !!
  2108. !!----------------------------------------------------------------------
  2109. INTEGER :: ierr
  2110. INTEGER :: jjproc
  2111. INTEGER :: ii, ji
  2112. !!----------------------------------------------------------------------
  2113. !
  2114. njmppmax = MAXVAL( njmppt )
  2115. !
  2116. ! Look for how many procs on the northern boundary
  2117. ndim_rank_north = 0
  2118. DO jjproc = 1, jpnij
  2119. IF( njmppt(jjproc) == njmppmax ) ndim_rank_north = ndim_rank_north + 1
  2120. END DO
  2121. !
  2122. ! Allocate the right size to nrank_north
  2123. IF (ALLOCATED (nrank_north)) DEALLOCATE(nrank_north)
  2124. ALLOCATE( nrank_north(ndim_rank_north) )
  2125. ! Fill the nrank_north array with proc. number of northern procs.
  2126. ! Note : the rank start at 0 in MPI
  2127. ii = 0
  2128. DO ji = 1, jpnij
  2129. IF ( njmppt(ji) == njmppmax ) THEN
  2130. ii=ii+1
  2131. nrank_north(ii)=ji-1
  2132. END IF
  2133. END DO
  2134. !
  2135. ! create the world group
  2136. CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr )
  2137. !
  2138. ! Create the North group from the world group
  2139. CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_north, nrank_north, ngrp_north, ierr )
  2140. !
  2141. ! Create the North communicator , ie the pool of procs in the north group
  2142. CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_north, ncomm_north, ierr )
  2143. !
  2144. END SUBROUTINE mpp_ini_north
  2145. SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn )
  2146. !!---------------------------------------------------------------------
  2147. !! *** routine mpp_lbc_north_3d ***
  2148. !!
  2149. !! ** Purpose : Ensure proper north fold horizontal bondary condition
  2150. !! in mpp configuration in case of jpn1 > 1
  2151. !!
  2152. !! ** Method : North fold condition and mpp with more than one proc
  2153. !! in i-direction require a specific treatment. We gather
  2154. !! the 4 northern lines of the global domain on 1 processor
  2155. !! and apply lbc north-fold on this sub array. Then we
  2156. !! scatter the north fold array back to the processors.
  2157. !!
  2158. !!----------------------------------------------------------------------
  2159. REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied
  2160. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
  2161. ! ! = T , U , V , F or W gridpoints
  2162. REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold
  2163. !! ! = 1. , the sign is kept
  2164. INTEGER :: ji, jj, jr, jk
  2165. INTEGER :: ierr, itaille, ildi, ilei, iilb
  2166. INTEGER :: ijpj, ijpjm1, ij, iproc
  2167. INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather
  2168. INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather
  2169. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather
  2170. ! ! Workspace for message transfers avoiding mpi_allgather
  2171. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab
  2172. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk
  2173. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio
  2174. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr
  2175. INTEGER :: istatus(mpi_status_size)
  2176. INTEGER :: iflag
  2177. !!----------------------------------------------------------------------
  2178. !
  2179. ALLOCATE( ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk), zfoldwk(jpi,4,jpk), znorthgloio(jpi,4,jpk,jpni) )
  2180. ALLOCATE( ztabl(jpi,4,jpk), ztabr(jpi*jpmaxngh, 4, jpk) )
  2181. ijpj = 4
  2182. ijpjm1 = 3
  2183. !
  2184. znorthloc(:,:,:) = 0
  2185. DO jk = 1, jpk
  2186. DO jj = nlcj - ijpj +1, nlcj ! put in xnorthloc the last 4 jlines of pt3d
  2187. ij = jj - nlcj + ijpj
  2188. znorthloc(:,ij,jk) = pt3d(:,jj,jk)
  2189. END DO
  2190. END DO
  2191. !
  2192. ! ! Build in procs of ncomm_north the znorthgloio
  2193. itaille = jpi * jpk * ijpj
  2194. IF ( l_north_nogather ) THEN
  2195. !
  2196. ztabr(:,:,:) = 0
  2197. ztabl(:,:,:) = 0
  2198. DO jk = 1, jpk
  2199. DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array
  2200. ij = jj - nlcj + ijpj
  2201. DO ji = nfsloop, nfeloop
  2202. ztabl(ji,ij,jk) = pt3d(ji,jj,jk)
  2203. END DO
  2204. END DO
  2205. END DO
  2206. DO jr = 1,nsndto
  2207. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2208. CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) )
  2209. ENDIF
  2210. END DO
  2211. DO jr = 1,nsndto
  2212. iproc = nfipproc(isendto(jr),jpnj)
  2213. IF(iproc .ne. -1) THEN
  2214. ilei = nleit (iproc+1)
  2215. ildi = nldit (iproc+1)
  2216. iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)
  2217. ENDIF
  2218. IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN
  2219. CALL mpprecv(5, zfoldwk, itaille, iproc)
  2220. DO jk = 1, jpk
  2221. DO jj = 1, ijpj
  2222. DO ji = ildi, ilei
  2223. ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk)
  2224. END DO
  2225. END DO
  2226. END DO
  2227. ELSE IF (iproc .eq. (narea-1)) THEN
  2228. DO jk = 1, jpk
  2229. DO jj = 1, ijpj
  2230. DO ji = ildi, ilei
  2231. ztabr(iilb+ji,jj,jk) = pt3d(ji,nlcj-ijpj+jj,jk)
  2232. END DO
  2233. END DO
  2234. END DO
  2235. ENDIF
  2236. END DO
  2237. IF (l_isend) THEN
  2238. DO jr = 1,nsndto
  2239. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2240. CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)
  2241. ENDIF
  2242. END DO
  2243. ENDIF
  2244. CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition
  2245. DO jk = 1, jpk
  2246. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d
  2247. ij = jj - nlcj + ijpj
  2248. DO ji= 1, nlci
  2249. pt3d(ji,jj,jk) = ztabl(ji,ij,jk)
  2250. END DO
  2251. END DO
  2252. END DO
  2253. !
  2254. ELSE
  2255. CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, &
  2256. & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )
  2257. !
  2258. ztab(:,:,:) = 0.e0
  2259. DO jr = 1, ndim_rank_north ! recover the global north array
  2260. iproc = nrank_north(jr) + 1
  2261. ildi = nldit (iproc)
  2262. ilei = nleit (iproc)
  2263. iilb = nimppt(iproc)
  2264. DO jk = 1, jpk
  2265. DO jj = 1, ijpj
  2266. DO ji = ildi, ilei
  2267. ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr)
  2268. END DO
  2269. END DO
  2270. END DO
  2271. END DO
  2272. CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition
  2273. !
  2274. DO jk = 1, jpk
  2275. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d
  2276. ij = jj - nlcj + ijpj
  2277. DO ji= 1, nlci
  2278. pt3d(ji,jj,jk) = ztab(ji+nimpp-1,ij,jk)
  2279. END DO
  2280. END DO
  2281. END DO
  2282. !
  2283. ENDIF
  2284. !
  2285. ! The ztab array has been either:
  2286. ! a. Fully populated by the mpi_allgather operation or
  2287. ! b. Had the active points for this domain and northern neighbours populated
  2288. ! by peer to peer exchanges
  2289. ! Either way the array may be folded by lbc_nfd and the result for the span of
  2290. ! this domain will be identical.
  2291. !
  2292. DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )
  2293. DEALLOCATE( ztabl, ztabr )
  2294. !
  2295. END SUBROUTINE mpp_lbc_north_3d
  2296. SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn)
  2297. !!---------------------------------------------------------------------
  2298. !! *** routine mpp_lbc_north_2d ***
  2299. !!
  2300. !! ** Purpose : Ensure proper north fold horizontal bondary condition
  2301. !! in mpp configuration in case of jpn1 > 1 (for 2d array )
  2302. !!
  2303. !! ** Method : North fold condition and mpp with more than one proc
  2304. !! in i-direction require a specific treatment. We gather
  2305. !! the 4 northern lines of the global domain on 1 processor
  2306. !! and apply lbc north-fold on this sub array. Then we
  2307. !! scatter the north fold array back to the processors.
  2308. !!
  2309. !!----------------------------------------------------------------------
  2310. REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the b.c. is applied
  2311. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt2d grid-points
  2312. ! ! = T , U , V , F or W gridpoints
  2313. REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold
  2314. !! ! = 1. , the sign is kept
  2315. INTEGER :: ji, jj, jr
  2316. INTEGER :: ierr, itaille, ildi, ilei, iilb
  2317. INTEGER :: ijpj, ijpjm1, ij, iproc
  2318. INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather
  2319. INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather
  2320. INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather
  2321. ! ! Workspace for message transfers avoiding mpi_allgather
  2322. REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab
  2323. REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: znorthloc, zfoldwk
  2324. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio
  2325. REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztabl, ztabr
  2326. INTEGER :: istatus(mpi_status_size)
  2327. INTEGER :: iflag
  2328. !!----------------------------------------------------------------------
  2329. !
  2330. ALLOCATE( ztab(jpiglo,4), znorthloc(jpi,4), zfoldwk(jpi,4), znorthgloio(jpi,4,jpni) )
  2331. ALLOCATE( ztabl(jpi,4), ztabr(jpi*jpmaxngh, 4) )
  2332. !
  2333. ijpj = 4
  2334. ijpjm1 = 3
  2335. !
  2336. DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d
  2337. ij = jj - nlcj + ijpj
  2338. znorthloc(:,ij) = pt2d(:,jj)
  2339. END DO
  2340. ! ! Build in procs of ncomm_north the znorthgloio
  2341. itaille = jpi * ijpj
  2342. IF ( l_north_nogather ) THEN
  2343. !
  2344. ! Avoid the use of mpi_allgather by exchanging only with the processes already identified
  2345. ! (in nemo_northcomms) as being involved in this process' northern boundary exchange
  2346. !
  2347. ztabr(:,:) = 0
  2348. ztabl(:,:) = 0
  2349. DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array
  2350. ij = jj - nlcj + ijpj
  2351. DO ji = nfsloop, nfeloop
  2352. ztabl(ji,ij) = pt2d(ji,jj)
  2353. END DO
  2354. END DO
  2355. DO jr = 1,nsndto
  2356. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2357. CALL mppsend(5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr))
  2358. ENDIF
  2359. END DO
  2360. DO jr = 1,nsndto
  2361. iproc = nfipproc(isendto(jr),jpnj)
  2362. IF(iproc .ne. -1) THEN
  2363. ilei = nleit (iproc+1)
  2364. ildi = nldit (iproc+1)
  2365. iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)
  2366. ENDIF
  2367. IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN
  2368. CALL mpprecv(5, zfoldwk, itaille, iproc)
  2369. DO jj = 1, ijpj
  2370. DO ji = ildi, ilei
  2371. ztabr(iilb+ji,jj) = zfoldwk(ji,jj)
  2372. END DO
  2373. END DO
  2374. ELSE IF (iproc .eq. (narea-1)) THEN
  2375. DO jj = 1, ijpj
  2376. DO ji = ildi, ilei
  2377. ztabr(iilb+ji,jj) = pt2d(ji,nlcj-ijpj+jj)
  2378. END DO
  2379. END DO
  2380. ENDIF
  2381. END DO
  2382. IF (l_isend) THEN
  2383. DO jr = 1,nsndto
  2384. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2385. CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)
  2386. ENDIF
  2387. END DO
  2388. ENDIF
  2389. CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition
  2390. !
  2391. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d
  2392. ij = jj - nlcj + ijpj
  2393. DO ji = 1, nlci
  2394. pt2d(ji,jj) = ztabl(ji,ij)
  2395. END DO
  2396. END DO
  2397. !
  2398. ELSE
  2399. CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, &
  2400. & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )
  2401. !
  2402. ztab(:,:) = 0.e0
  2403. DO jr = 1, ndim_rank_north ! recover the global north array
  2404. iproc = nrank_north(jr) + 1
  2405. ildi = nldit (iproc)
  2406. ilei = nleit (iproc)
  2407. iilb = nimppt(iproc)
  2408. DO jj = 1, ijpj
  2409. DO ji = ildi, ilei
  2410. ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr)
  2411. END DO
  2412. END DO
  2413. END DO
  2414. CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition
  2415. !
  2416. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d
  2417. ij = jj - nlcj + ijpj
  2418. DO ji = 1, nlci
  2419. pt2d(ji,jj) = ztab(ji+nimpp-1,ij)
  2420. END DO
  2421. END DO
  2422. !
  2423. ENDIF
  2424. DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )
  2425. DEALLOCATE( ztabl, ztabr )
  2426. !
  2427. END SUBROUTINE mpp_lbc_north_2d
  2428. SUBROUTINE mpp_lbc_north_2d_multiple( pt2d_array, cd_type, psgn, num_fields)
  2429. !!---------------------------------------------------------------------
  2430. !! *** routine mpp_lbc_north_2d ***
  2431. !!
  2432. !! ** Purpose : Ensure proper north fold horizontal bondary condition
  2433. !! in mpp configuration in case of jpn1 > 1
  2434. !! (for multiple 2d arrays )
  2435. !!
  2436. !! ** Method : North fold condition and mpp with more than one proc
  2437. !! in i-direction require a specific treatment. We gather
  2438. !! the 4 northern lines of the global domain on 1 processor
  2439. !! and apply lbc north-fold on this sub array. Then we
  2440. !! scatter the north fold array back to the processors.
  2441. !!
  2442. !!----------------------------------------------------------------------
  2443. INTEGER , INTENT (in ) :: num_fields ! number of variables contained in pt2d
  2444. TYPE( arrayptr ), DIMENSION(:) :: pt2d_array
  2445. CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: cd_type ! nature of pt2d grid-points
  2446. ! ! = T , U , V , F or W gridpoints
  2447. REAL(wp), DIMENSION(:), INTENT(in ) :: psgn ! = -1. the sign change across the north fold
  2448. !! ! = 1. , the sign is kept
  2449. INTEGER :: ji, jj, jr, jk
  2450. INTEGER :: ierr, itaille, ildi, ilei, iilb
  2451. INTEGER :: ijpj, ijpjm1, ij, iproc
  2452. INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather
  2453. INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather
  2454. INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather
  2455. ! ! Workspace for message transfers avoiding mpi_allgather
  2456. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab
  2457. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk
  2458. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio
  2459. REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr
  2460. INTEGER :: istatus(mpi_status_size)
  2461. INTEGER :: iflag
  2462. !!----------------------------------------------------------------------
  2463. !
  2464. ALLOCATE( ztab(jpiglo,4,num_fields), znorthloc(jpi,4,num_fields), zfoldwk(jpi,4,num_fields), znorthgloio(jpi,4,num_fields,jpni) ) ! expanded to 3 dimensions
  2465. ALLOCATE( ztabl(jpi,4,num_fields), ztabr(jpi*jpmaxngh, 4,num_fields) )
  2466. !
  2467. ijpj = 4
  2468. ijpjm1 = 3
  2469. !
  2470. DO jk = 1, num_fields
  2471. DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d (for every variable)
  2472. ij = jj - nlcj + ijpj
  2473. znorthloc(:,ij,jk) = pt2d_array(jk)%pt2d(:,jj)
  2474. END DO
  2475. END DO
  2476. ! ! Build in procs of ncomm_north the znorthgloio
  2477. itaille = jpi * ijpj
  2478. IF ( l_north_nogather ) THEN
  2479. !
  2480. ! Avoid the use of mpi_allgather by exchanging only with the processes already identified
  2481. ! (in nemo_northcomms) as being involved in this process' northern boundary exchange
  2482. !
  2483. ztabr(:,:,:) = 0
  2484. ztabl(:,:,:) = 0
  2485. DO jk = 1, num_fields
  2486. DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array
  2487. ij = jj - nlcj + ijpj
  2488. DO ji = nfsloop, nfeloop
  2489. ztabl(ji,ij,jk) = pt2d_array(jk)%pt2d(ji,jj)
  2490. END DO
  2491. END DO
  2492. END DO
  2493. DO jr = 1,nsndto
  2494. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2495. CALL mppsend(5, znorthloc, itaille*num_fields, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) ! Buffer expanded "num_fields" times
  2496. ENDIF
  2497. END DO
  2498. DO jr = 1,nsndto
  2499. iproc = nfipproc(isendto(jr),jpnj)
  2500. IF(iproc .ne. -1) THEN
  2501. ilei = nleit (iproc+1)
  2502. ildi = nldit (iproc+1)
  2503. iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)
  2504. ENDIF
  2505. IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN
  2506. CALL mpprecv(5, zfoldwk, itaille*num_fields, iproc) ! Buffer expanded "num_fields" times
  2507. DO jk = 1 , num_fields
  2508. DO jj = 1, ijpj
  2509. DO ji = ildi, ilei
  2510. ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) ! Modified to 3D
  2511. END DO
  2512. END DO
  2513. END DO
  2514. ELSE IF (iproc .eq. (narea-1)) THEN
  2515. DO jk = 1, num_fields
  2516. DO jj = 1, ijpj
  2517. DO ji = ildi, ilei
  2518. ztabr(iilb+ji,jj,jk) = pt2d_array(jk)%pt2d(ji,nlcj-ijpj+jj) ! Modified to 3D
  2519. END DO
  2520. END DO
  2521. END DO
  2522. ENDIF
  2523. END DO
  2524. IF (l_isend) THEN
  2525. DO jr = 1,nsndto
  2526. IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN
  2527. CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)
  2528. ENDIF
  2529. END DO
  2530. ENDIF
  2531. !
  2532. DO ji = 1, num_fields ! Loop to manage 3D variables
  2533. CALL mpp_lbc_nfd( ztabl(:,:,ji), ztabr(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition
  2534. END DO
  2535. !
  2536. DO jk = 1, num_fields
  2537. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d
  2538. ij = jj - nlcj + ijpj
  2539. DO ji = 1, nlci
  2540. pt2d_array(jk)%pt2d(ji,jj) = ztabl(ji,ij,jk) ! Modified to 3D
  2541. END DO
  2542. END DO
  2543. END DO
  2544. !
  2545. ELSE
  2546. !
  2547. CALL MPI_ALLGATHER( znorthloc , itaille*num_fields, MPI_DOUBLE_PRECISION, &
  2548. & znorthgloio, itaille*num_fields, MPI_DOUBLE_PRECISION, ncomm_north, ierr )
  2549. !
  2550. ztab(:,:,:) = 0.e0
  2551. DO jk = 1, num_fields
  2552. DO jr = 1, ndim_rank_north ! recover the global north array
  2553. iproc = nrank_north(jr) + 1
  2554. ildi = nldit (iproc)
  2555. ilei = nleit (iproc)
  2556. iilb = nimppt(iproc)
  2557. DO jj = 1, ijpj
  2558. DO ji = ildi, ilei
  2559. ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr)
  2560. END DO
  2561. END DO
  2562. END DO
  2563. END DO
  2564. DO ji = 1, num_fields
  2565. CALL lbc_nfd( ztab(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition
  2566. END DO
  2567. !
  2568. DO jk = 1, num_fields
  2569. DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d
  2570. ij = jj - nlcj + ijpj
  2571. DO ji = 1, nlci
  2572. pt2d_array(jk)%pt2d(ji,jj) = ztab(ji+nimpp-1,ij,jk)
  2573. END DO
  2574. END DO
  2575. END DO
  2576. !
  2577. !
  2578. ENDIF
  2579. DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )
  2580. DEALLOCATE( ztabl, ztabr )
  2581. !
  2582. END SUBROUTINE mpp_lbc_north_2d_multiple
  2583. SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn)
  2584. !!---------------------------------------------------------------------
  2585. !! *** routine mpp_lbc_north_2d ***
  2586. !!
  2587. !! ** Purpose : Ensure proper north fold horizontal bondary condition
  2588. !! in mpp configuration in case of jpn1 > 1 and for 2d
  2589. !! array with outer extra halo
  2590. !!
  2591. !! ** Method : North fold condition and mpp with more than one proc
  2592. !! in i-direction require a specific treatment. We gather
  2593. !! the 4+2*jpr2dj northern lines of the global domain on 1
  2594. !! processor and apply lbc north-fold on this sub array.
  2595. !! Then we scatter the north fold array back to the processors.
  2596. !!
  2597. !!----------------------------------------------------------------------
  2598. REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo
  2599. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
  2600. ! ! = T , U , V , F or W -points
  2601. REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the
  2602. !! ! north fold, = 1. otherwise
  2603. INTEGER :: ji, jj, jr
  2604. INTEGER :: ierr, itaille, ildi, ilei, iilb
  2605. INTEGER :: ijpj, ij, iproc
  2606. !
  2607. REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e
  2608. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e
  2609. !!----------------------------------------------------------------------
  2610. !
  2611. ALLOCATE( ztab_e(jpiglo,4+2*jpr2dj), znorthloc_e(jpi,4+2*jpr2dj), znorthgloio_e(jpi,4+2*jpr2dj,jpni) )
  2612. !
  2613. ijpj=4
  2614. ztab_e(:,:) = 0.e0
  2615. ij=0
  2616. ! put in znorthloc_e the last 4 jlines of pt2d
  2617. DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj
  2618. ij = ij + 1
  2619. DO ji = 1, jpi
  2620. znorthloc_e(ji,ij)=pt2d(ji,jj)
  2621. END DO
  2622. END DO
  2623. !
  2624. itaille = jpi * ( ijpj + 2 * jpr2dj )
  2625. CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, &
  2626. & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )
  2627. !
  2628. DO jr = 1, ndim_rank_north ! recover the global north array
  2629. iproc = nrank_north(jr) + 1
  2630. ildi = nldit (iproc)
  2631. ilei = nleit (iproc)
  2632. iilb = nimppt(iproc)
  2633. DO jj = 1, ijpj+2*jpr2dj
  2634. DO ji = ildi, ilei
  2635. ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr)
  2636. END DO
  2637. END DO
  2638. END DO
  2639. ! 2. North-Fold boundary conditions
  2640. ! ----------------------------------
  2641. CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj )
  2642. ij = jpr2dj
  2643. !! Scatter back to pt2d
  2644. DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj
  2645. ij = ij +1
  2646. DO ji= 1, nlci
  2647. pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij)
  2648. END DO
  2649. END DO
  2650. !
  2651. DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e )
  2652. !
  2653. END SUBROUTINE mpp_lbc_north_e
  2654. SUBROUTINE mpp_lnk_bdy_3d( ptab, cd_type, psgn, ib_bdy )
  2655. !!----------------------------------------------------------------------
  2656. !! *** routine mpp_lnk_bdy_3d ***
  2657. !!
  2658. !! ** Purpose : Message passing management
  2659. !!
  2660. !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries
  2661. !! between processors following neighboring subdomains.
  2662. !! domain parameters
  2663. !! nlci : first dimension of the local subdomain
  2664. !! nlcj : second dimension of the local subdomain
  2665. !! nbondi_bdy : mark for "east-west local boundary"
  2666. !! nbondj_bdy : mark for "north-south local boundary"
  2667. !! noea : number for local neighboring processors
  2668. !! nowe : number for local neighboring processors
  2669. !! noso : number for local neighboring processors
  2670. !! nono : number for local neighboring processors
  2671. !!
  2672. !! ** Action : ptab with update value at its periphery
  2673. !!
  2674. !!----------------------------------------------------------------------
  2675. REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied
  2676. CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
  2677. ! ! = T , U , V , F , W points
  2678. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  2679. ! ! = 1. , the sign is kept
  2680. INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set
  2681. !
  2682. INTEGER :: ji, jj, jk, jl ! dummy loop indices
  2683. INTEGER :: imigr, iihom, ijhom ! local integers
  2684. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  2685. REAL(wp) :: zland ! local scalar
  2686. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  2687. !
  2688. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north
  2689. REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east
  2690. !!----------------------------------------------------------------------
  2691. ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), &
  2692. & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) )
  2693. zland = 0.e0
  2694. ! 1. standard boundary treatment
  2695. ! ------------------------------
  2696. ! ! East-West boundaries
  2697. ! !* Cyclic east-west
  2698. IF( nbondi == 2) THEN
  2699. IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN
  2700. ptab( 1 ,:,:) = ptab(jpim1,:,:)
  2701. ptab(jpi,:,:) = ptab( 2 ,:,:)
  2702. ELSE
  2703. IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point
  2704. ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north
  2705. ENDIF
  2706. ELSEIF(nbondi == -1) THEN
  2707. IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point
  2708. ELSEIF(nbondi == 1) THEN
  2709. ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north
  2710. ENDIF !* closed
  2711. IF (nbondj == 2 .OR. nbondj == -1) THEN
  2712. IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point
  2713. ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN
  2714. ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north
  2715. ENDIF
  2716. !
  2717. ! 2. East and west directions exchange
  2718. ! ------------------------------------
  2719. ! we play with the neigbours AND the row number because of the periodicity
  2720. !
  2721. SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions
  2722. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  2723. iihom = nlci-nreci
  2724. DO jl = 1, jpreci
  2725. zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:)
  2726. zt3we(:,jl,:,1) = ptab(iihom +jl,:,:)
  2727. END DO
  2728. END SELECT
  2729. !
  2730. ! ! Migrations
  2731. imigr = jpreci * jpj * jpk
  2732. !
  2733. SELECT CASE ( nbondi_bdy(ib_bdy) )
  2734. CASE ( -1 )
  2735. CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 )
  2736. CASE ( 0 )
  2737. CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )
  2738. CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 )
  2739. CASE ( 1 )
  2740. CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )
  2741. END SELECT
  2742. !
  2743. SELECT CASE ( nbondi_bdy_b(ib_bdy) )
  2744. CASE ( -1 )
  2745. CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )
  2746. CASE ( 0 )
  2747. CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )
  2748. CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )
  2749. CASE ( 1 )
  2750. CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )
  2751. END SELECT
  2752. !
  2753. SELECT CASE ( nbondi_bdy(ib_bdy) )
  2754. CASE ( -1 )
  2755. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2756. CASE ( 0 )
  2757. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2758. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  2759. CASE ( 1 )
  2760. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2761. END SELECT
  2762. !
  2763. ! ! Write Dirichlet lateral conditions
  2764. iihom = nlci-jpreci
  2765. !
  2766. SELECT CASE ( nbondi_bdy_b(ib_bdy) )
  2767. CASE ( -1 )
  2768. DO jl = 1, jpreci
  2769. ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)
  2770. END DO
  2771. CASE ( 0 )
  2772. DO jl = 1, jpreci
  2773. ptab(jl ,:,:) = zt3we(:,jl,:,2)
  2774. ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)
  2775. END DO
  2776. CASE ( 1 )
  2777. DO jl = 1, jpreci
  2778. ptab(jl ,:,:) = zt3we(:,jl,:,2)
  2779. END DO
  2780. END SELECT
  2781. ! 3. North and south directions
  2782. ! -----------------------------
  2783. ! always closed : we play only with the neigbours
  2784. !
  2785. IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions
  2786. ijhom = nlcj-nrecj
  2787. DO jl = 1, jprecj
  2788. zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:)
  2789. zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:)
  2790. END DO
  2791. ENDIF
  2792. !
  2793. ! ! Migrations
  2794. imigr = jprecj * jpi * jpk
  2795. !
  2796. SELECT CASE ( nbondj_bdy(ib_bdy) )
  2797. CASE ( -1 )
  2798. CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 )
  2799. CASE ( 0 )
  2800. CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )
  2801. CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 )
  2802. CASE ( 1 )
  2803. CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )
  2804. END SELECT
  2805. !
  2806. SELECT CASE ( nbondj_bdy_b(ib_bdy) )
  2807. CASE ( -1 )
  2808. CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )
  2809. CASE ( 0 )
  2810. CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )
  2811. CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )
  2812. CASE ( 1 )
  2813. CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )
  2814. END SELECT
  2815. !
  2816. SELECT CASE ( nbondj_bdy(ib_bdy) )
  2817. CASE ( -1 )
  2818. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2819. CASE ( 0 )
  2820. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2821. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  2822. CASE ( 1 )
  2823. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2824. END SELECT
  2825. !
  2826. ! ! Write Dirichlet lateral conditions
  2827. ijhom = nlcj-jprecj
  2828. !
  2829. SELECT CASE ( nbondj_bdy_b(ib_bdy) )
  2830. CASE ( -1 )
  2831. DO jl = 1, jprecj
  2832. ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)
  2833. END DO
  2834. CASE ( 0 )
  2835. DO jl = 1, jprecj
  2836. ptab(:,jl ,:) = zt3sn(:,jl,:,2)
  2837. ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)
  2838. END DO
  2839. CASE ( 1 )
  2840. DO jl = 1, jprecj
  2841. ptab(:,jl,:) = zt3sn(:,jl,:,2)
  2842. END DO
  2843. END SELECT
  2844. ! 4. north fold treatment
  2845. ! -----------------------
  2846. !
  2847. IF( npolj /= 0) THEN
  2848. !
  2849. SELECT CASE ( jpni )
  2850. CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp
  2851. CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.
  2852. END SELECT
  2853. !
  2854. ENDIF
  2855. !
  2856. DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we )
  2857. !
  2858. END SUBROUTINE mpp_lnk_bdy_3d
  2859. SUBROUTINE mpp_lnk_bdy_2d( ptab, cd_type, psgn, ib_bdy )
  2860. !!----------------------------------------------------------------------
  2861. !! *** routine mpp_lnk_bdy_2d ***
  2862. !!
  2863. !! ** Purpose : Message passing management
  2864. !!
  2865. !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries
  2866. !! between processors following neighboring subdomains.
  2867. !! domain parameters
  2868. !! nlci : first dimension of the local subdomain
  2869. !! nlcj : second dimension of the local subdomain
  2870. !! nbondi_bdy : mark for "east-west local boundary"
  2871. !! nbondj_bdy : mark for "north-south local boundary"
  2872. !! noea : number for local neighboring processors
  2873. !! nowe : number for local neighboring processors
  2874. !! noso : number for local neighboring processors
  2875. !! nono : number for local neighboring processors
  2876. !!
  2877. !! ** Action : ptab with update value at its periphery
  2878. !!
  2879. !!----------------------------------------------------------------------
  2880. REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied
  2881. CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points
  2882. ! ! = T , U , V , F , W points
  2883. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary
  2884. ! ! = 1. , the sign is kept
  2885. INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set
  2886. !
  2887. INTEGER :: ji, jj, jl ! dummy loop indices
  2888. INTEGER :: imigr, iihom, ijhom ! local integers
  2889. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  2890. REAL(wp) :: zland
  2891. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  2892. !
  2893. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north
  2894. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east
  2895. !!----------------------------------------------------------------------
  2896. ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), &
  2897. & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) )
  2898. zland = 0._wp
  2899. ! 1. standard boundary treatment
  2900. ! ------------------------------
  2901. ! ! East-West boundaries
  2902. ! !* Cyclic east-west
  2903. IF( nbondi == 2) THEN
  2904. IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN
  2905. ptab( 1 ,:) = ptab(jpim1,:)
  2906. ptab(jpi,:) = ptab( 2 ,:)
  2907. ELSE
  2908. IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point
  2909. ptab(nlci-jpreci+1:jpi ,:) = zland ! north
  2910. ENDIF
  2911. ELSEIF(nbondi == -1) THEN
  2912. IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point
  2913. ELSEIF(nbondi == 1) THEN
  2914. ptab(nlci-jpreci+1:jpi ,:) = zland ! north
  2915. ENDIF !* closed
  2916. IF (nbondj == 2 .OR. nbondj == -1) THEN
  2917. IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj) = zland ! south except F-point
  2918. ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN
  2919. ptab(:,nlcj-jprecj+1:jpj) = zland ! north
  2920. ENDIF
  2921. !
  2922. ! 2. East and west directions exchange
  2923. ! ------------------------------------
  2924. ! we play with the neigbours AND the row number because of the periodicity
  2925. !
  2926. SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions
  2927. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  2928. iihom = nlci-nreci
  2929. DO jl = 1, jpreci
  2930. zt2ew(:,jl,1) = ptab(jpreci+jl,:)
  2931. zt2we(:,jl,1) = ptab(iihom +jl,:)
  2932. END DO
  2933. END SELECT
  2934. !
  2935. ! ! Migrations
  2936. imigr = jpreci * jpj
  2937. !
  2938. SELECT CASE ( nbondi_bdy(ib_bdy) )
  2939. CASE ( -1 )
  2940. CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 )
  2941. CASE ( 0 )
  2942. CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )
  2943. CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 )
  2944. CASE ( 1 )
  2945. CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )
  2946. END SELECT
  2947. !
  2948. SELECT CASE ( nbondi_bdy_b(ib_bdy) )
  2949. CASE ( -1 )
  2950. CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )
  2951. CASE ( 0 )
  2952. CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )
  2953. CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )
  2954. CASE ( 1 )
  2955. CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )
  2956. END SELECT
  2957. !
  2958. SELECT CASE ( nbondi_bdy(ib_bdy) )
  2959. CASE ( -1 )
  2960. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2961. CASE ( 0 )
  2962. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2963. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  2964. CASE ( 1 )
  2965. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  2966. END SELECT
  2967. !
  2968. ! ! Write Dirichlet lateral conditions
  2969. iihom = nlci-jpreci
  2970. !
  2971. SELECT CASE ( nbondi_bdy_b(ib_bdy) )
  2972. CASE ( -1 )
  2973. DO jl = 1, jpreci
  2974. ptab(iihom+jl,:) = zt2ew(:,jl,2)
  2975. END DO
  2976. CASE ( 0 )
  2977. DO jl = 1, jpreci
  2978. ptab(jl ,:) = zt2we(:,jl,2)
  2979. ptab(iihom+jl,:) = zt2ew(:,jl,2)
  2980. END DO
  2981. CASE ( 1 )
  2982. DO jl = 1, jpreci
  2983. ptab(jl ,:) = zt2we(:,jl,2)
  2984. END DO
  2985. END SELECT
  2986. ! 3. North and south directions
  2987. ! -----------------------------
  2988. ! always closed : we play only with the neigbours
  2989. !
  2990. IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions
  2991. ijhom = nlcj-nrecj
  2992. DO jl = 1, jprecj
  2993. zt2sn(:,jl,1) = ptab(:,ijhom +jl)
  2994. zt2ns(:,jl,1) = ptab(:,jprecj+jl)
  2995. END DO
  2996. ENDIF
  2997. !
  2998. ! ! Migrations
  2999. imigr = jprecj * jpi
  3000. !
  3001. SELECT CASE ( nbondj_bdy(ib_bdy) )
  3002. CASE ( -1 )
  3003. CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 )
  3004. CASE ( 0 )
  3005. CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )
  3006. CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 )
  3007. CASE ( 1 )
  3008. CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )
  3009. END SELECT
  3010. !
  3011. SELECT CASE ( nbondj_bdy_b(ib_bdy) )
  3012. CASE ( -1 )
  3013. CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )
  3014. CASE ( 0 )
  3015. CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )
  3016. CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )
  3017. CASE ( 1 )
  3018. CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )
  3019. END SELECT
  3020. !
  3021. SELECT CASE ( nbondj_bdy(ib_bdy) )
  3022. CASE ( -1 )
  3023. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  3024. CASE ( 0 )
  3025. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  3026. IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)
  3027. CASE ( 1 )
  3028. IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)
  3029. END SELECT
  3030. !
  3031. ! ! Write Dirichlet lateral conditions
  3032. ijhom = nlcj-jprecj
  3033. !
  3034. SELECT CASE ( nbondj_bdy_b(ib_bdy) )
  3035. CASE ( -1 )
  3036. DO jl = 1, jprecj
  3037. ptab(:,ijhom+jl) = zt2ns(:,jl,2)
  3038. END DO
  3039. CASE ( 0 )
  3040. DO jl = 1, jprecj
  3041. ptab(:,jl ) = zt2sn(:,jl,2)
  3042. ptab(:,ijhom+jl) = zt2ns(:,jl,2)
  3043. END DO
  3044. CASE ( 1 )
  3045. DO jl = 1, jprecj
  3046. ptab(:,jl) = zt2sn(:,jl,2)
  3047. END DO
  3048. END SELECT
  3049. ! 4. north fold treatment
  3050. ! -----------------------
  3051. !
  3052. IF( npolj /= 0) THEN
  3053. !
  3054. SELECT CASE ( jpni )
  3055. CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp
  3056. CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.
  3057. END SELECT
  3058. !
  3059. ENDIF
  3060. !
  3061. DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we )
  3062. !
  3063. END SUBROUTINE mpp_lnk_bdy_2d
  3064. SUBROUTINE mpi_init_opa( ldtxt, ksft, code )
  3065. !!---------------------------------------------------------------------
  3066. !! *** routine mpp_init.opa ***
  3067. !!
  3068. !! ** Purpose :: export and attach a MPI buffer for bsend
  3069. !!
  3070. !! ** Method :: define buffer size in namelist, if 0 no buffer attachment
  3071. !! but classical mpi_init
  3072. !!
  3073. !! History :: 01/11 :: IDRIS initial version for IBM only
  3074. !! 08/04 :: R. Benshila, generalisation
  3075. !!---------------------------------------------------------------------
  3076. CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt
  3077. INTEGER , INTENT(inout) :: ksft
  3078. INTEGER , INTENT( out) :: code
  3079. INTEGER :: ierr, ji
  3080. LOGICAL :: mpi_was_called
  3081. !!---------------------------------------------------------------------
  3082. !
  3083. CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization
  3084. IF ( code /= MPI_SUCCESS ) THEN
  3085. DO ji = 1, SIZE(ldtxt)
  3086. IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
  3087. END DO
  3088. WRITE(*, cform_err)
  3089. WRITE(*, *) ' lib_mpp: Error in routine mpi_initialized'
  3090. CALL mpi_abort( mpi_comm_world, code, ierr )
  3091. ENDIF
  3092. !
  3093. IF( .NOT. mpi_was_called ) THEN
  3094. CALL mpi_init( code )
  3095. CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code )
  3096. IF ( code /= MPI_SUCCESS ) THEN
  3097. DO ji = 1, SIZE(ldtxt)
  3098. IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
  3099. END DO
  3100. WRITE(*, cform_err)
  3101. WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup'
  3102. CALL mpi_abort( mpi_comm_world, code, ierr )
  3103. ENDIF
  3104. ENDIF
  3105. !
  3106. IF( nn_buffer > 0 ) THEN
  3107. WRITE(ldtxt(ksft),*) 'mpi_bsend, buffer allocation of : ', nn_buffer ; ksft = ksft + 1
  3108. ! Buffer allocation and attachment
  3109. ALLOCATE( tampon(nn_buffer), stat = ierr )
  3110. IF( ierr /= 0 ) THEN
  3111. DO ji = 1, SIZE(ldtxt)
  3112. IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode
  3113. END DO
  3114. WRITE(*, cform_err)
  3115. WRITE(*, *) ' lib_mpp: Error in ALLOCATE', ierr
  3116. CALL mpi_abort( mpi_comm_world, code, ierr )
  3117. END IF
  3118. CALL mpi_buffer_attach( tampon, nn_buffer, code )
  3119. ENDIF
  3120. !
  3121. END SUBROUTINE mpi_init_opa
  3122. SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype)
  3123. !!---------------------------------------------------------------------
  3124. !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD
  3125. !!
  3126. !! Modification of original codes written by David H. Bailey
  3127. !! This subroutine computes yddb(i) = ydda(i)+yddb(i)
  3128. !!---------------------------------------------------------------------
  3129. INTEGER, INTENT(in) :: ilen, itype
  3130. COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda
  3131. COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb
  3132. !
  3133. REAL(wp) :: zerr, zt1, zt2 ! local work variables
  3134. INTEGER :: ji, ztmp ! local scalar
  3135. ztmp = itype ! avoid compilation warning
  3136. DO ji=1,ilen
  3137. ! Compute ydda + yddb using Knuth's trick.
  3138. zt1 = real(ydda(ji)) + real(yddb(ji))
  3139. zerr = zt1 - real(ydda(ji))
  3140. zt2 = ((real(yddb(ji)) - zerr) + (real(ydda(ji)) - (zt1 - zerr))) &
  3141. + aimag(ydda(ji)) + aimag(yddb(ji))
  3142. ! The result is zt1 + zt2, after normalization.
  3143. yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp )
  3144. END DO
  3145. END SUBROUTINE DDPDD_MPI
  3146. SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, pr2dj)
  3147. !!---------------------------------------------------------------------
  3148. !! *** routine mpp_lbc_north_icb ***
  3149. !!
  3150. !! ** Purpose : Ensure proper north fold horizontal bondary condition
  3151. !! in mpp configuration in case of jpn1 > 1 and for 2d
  3152. !! array with outer extra halo
  3153. !!
  3154. !! ** Method : North fold condition and mpp with more than one proc
  3155. !! in i-direction require a specific treatment. We gather
  3156. !! the 4+2*jpr2dj northern lines of the global domain on 1
  3157. !! processor and apply lbc north-fold on this sub array.
  3158. !! Then we scatter the north fold array back to the processors.
  3159. !! This version accounts for an extra halo with icebergs.
  3160. !!
  3161. !!----------------------------------------------------------------------
  3162. REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array with extra halo
  3163. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points
  3164. ! ! = T , U , V , F or W -points
  3165. REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the
  3166. !! ! north fold, = 1. otherwise
  3167. INTEGER, OPTIONAL , INTENT(in ) :: pr2dj
  3168. INTEGER :: ji, jj, jr
  3169. INTEGER :: ierr, itaille, ildi, ilei, iilb
  3170. INTEGER :: ijpj, ij, iproc, ipr2dj
  3171. !
  3172. REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e
  3173. REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e
  3174. !!----------------------------------------------------------------------
  3175. !
  3176. ijpj=4
  3177. IF( PRESENT(pr2dj) ) THEN ! use of additional halos
  3178. ipr2dj = pr2dj
  3179. ELSE
  3180. ipr2dj = 0
  3181. ENDIF
  3182. ALLOCATE( ztab_e(jpiglo,4+2*ipr2dj), znorthloc_e(jpi,4+2*ipr2dj), znorthgloio_e(jpi,4+2*ipr2dj,jpni) )
  3183. !
  3184. ztab_e(:,:) = 0.e0
  3185. ij=0
  3186. ! put in znorthloc_e the last 4 jlines of pt2d
  3187. DO jj = nlcj - ijpj + 1 - ipr2dj, nlcj +ipr2dj
  3188. ij = ij + 1
  3189. DO ji = 1, jpi
  3190. znorthloc_e(ji,ij)=pt2d(ji,jj)
  3191. END DO
  3192. END DO
  3193. !
  3194. itaille = jpi * ( ijpj + 2 * ipr2dj )
  3195. CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, &
  3196. & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )
  3197. !
  3198. DO jr = 1, ndim_rank_north ! recover the global north array
  3199. iproc = nrank_north(jr) + 1
  3200. ildi = nldit (iproc)
  3201. ilei = nleit (iproc)
  3202. iilb = nimppt(iproc)
  3203. DO jj = 1, ijpj+2*ipr2dj
  3204. DO ji = ildi, ilei
  3205. ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr)
  3206. END DO
  3207. END DO
  3208. END DO
  3209. ! 2. North-Fold boundary conditions
  3210. ! ----------------------------------
  3211. CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = ipr2dj )
  3212. ij = ipr2dj
  3213. !! Scatter back to pt2d
  3214. DO jj = nlcj - ijpj + 1 , nlcj +ipr2dj
  3215. ij = ij +1
  3216. DO ji= 1, nlci
  3217. pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij)
  3218. END DO
  3219. END DO
  3220. !
  3221. DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e )
  3222. !
  3223. END SUBROUTINE mpp_lbc_north_icb
  3224. SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, jpri, jprj )
  3225. !!----------------------------------------------------------------------
  3226. !! *** routine mpp_lnk_2d_icb ***
  3227. !!
  3228. !! ** Purpose : Message passing manadgement for 2d array (with extra halo and icebergs)
  3229. !!
  3230. !! ** Method : Use mppsend and mpprecv function for passing mask
  3231. !! between processors following neighboring subdomains.
  3232. !! domain parameters
  3233. !! nlci : first dimension of the local subdomain
  3234. !! nlcj : second dimension of the local subdomain
  3235. !! jpri : number of rows for extra outer halo
  3236. !! jprj : number of columns for extra outer halo
  3237. !! nbondi : mark for "east-west local boundary"
  3238. !! nbondj : mark for "north-south local boundary"
  3239. !! noea : number for local neighboring processors
  3240. !! nowe : number for local neighboring processors
  3241. !! noso : number for local neighboring processors
  3242. !! nono : number for local neighboring processors
  3243. !!
  3244. !!----------------------------------------------------------------------
  3245. INTEGER , INTENT(in ) :: jpri
  3246. INTEGER , INTENT(in ) :: jprj
  3247. REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo
  3248. CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points
  3249. ! ! = T , U , V , F , W and I points
  3250. REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the
  3251. !! ! north boundary, = 1. otherwise
  3252. INTEGER :: jl ! dummy loop indices
  3253. INTEGER :: imigr, iihom, ijhom ! temporary integers
  3254. INTEGER :: ipreci, iprecj ! temporary integers
  3255. INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend
  3256. INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend
  3257. !!
  3258. REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns
  3259. REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn
  3260. REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe
  3261. REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew
  3262. !!----------------------------------------------------------------------
  3263. ipreci = jpreci + jpri ! take into account outer extra 2D overlap area
  3264. iprecj = jprecj + jprj
  3265. ! 1. standard boundary treatment
  3266. ! ------------------------------
  3267. ! Order matters Here !!!!
  3268. !
  3269. ! ! East-West boundaries
  3270. ! !* Cyclic east-west
  3271. IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN
  3272. pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east
  3273. pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west
  3274. !
  3275. ELSE !* closed
  3276. IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point
  3277. pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north
  3278. ENDIF
  3279. !
  3280. ! north fold treatment
  3281. ! -----------------------
  3282. IF( npolj /= 0 ) THEN
  3283. !
  3284. SELECT CASE ( jpni )
  3285. CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj )
  3286. CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj )
  3287. END SELECT
  3288. !
  3289. ENDIF
  3290. ! 2. East and west directions exchange
  3291. ! ------------------------------------
  3292. ! we play with the neigbours AND the row number because of the periodicity
  3293. !
  3294. SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions
  3295. CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)
  3296. iihom = nlci-nreci-jpri
  3297. DO jl = 1, ipreci
  3298. r2dew(:,jl,1) = pt2d(jpreci+jl,:)
  3299. r2dwe(:,jl,1) = pt2d(iihom +jl,:)
  3300. END DO
  3301. END SELECT
  3302. !
  3303. ! ! Migrations
  3304. imigr = ipreci * ( jpj + 2*jprj)
  3305. !
  3306. SELECT CASE ( nbondi )
  3307. CASE ( -1 )
  3308. CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 )
  3309. CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea )
  3310. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3311. CASE ( 0 )
  3312. CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 )
  3313. CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 )
  3314. CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea )
  3315. CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe )
  3316. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3317. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  3318. CASE ( 1 )
  3319. CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 )
  3320. CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe )
  3321. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3322. END SELECT
  3323. !
  3324. ! ! Write Dirichlet lateral conditions
  3325. iihom = nlci - jpreci
  3326. !
  3327. SELECT CASE ( nbondi )
  3328. CASE ( -1 )
  3329. DO jl = 1, ipreci
  3330. pt2d(iihom+jl,:) = r2dew(:,jl,2)
  3331. END DO
  3332. CASE ( 0 )
  3333. DO jl = 1, ipreci
  3334. pt2d(jl-jpri,:) = r2dwe(:,jl,2)
  3335. pt2d( iihom+jl,:) = r2dew(:,jl,2)
  3336. END DO
  3337. CASE ( 1 )
  3338. DO jl = 1, ipreci
  3339. pt2d(jl-jpri,:) = r2dwe(:,jl,2)
  3340. END DO
  3341. END SELECT
  3342. ! 3. North and south directions
  3343. ! -----------------------------
  3344. ! always closed : we play only with the neigbours
  3345. !
  3346. IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions
  3347. ijhom = nlcj-nrecj-jprj
  3348. DO jl = 1, iprecj
  3349. r2dsn(:,jl,1) = pt2d(:,ijhom +jl)
  3350. r2dns(:,jl,1) = pt2d(:,jprecj+jl)
  3351. END DO
  3352. ENDIF
  3353. !
  3354. ! ! Migrations
  3355. imigr = iprecj * ( jpi + 2*jpri )
  3356. !
  3357. SELECT CASE ( nbondj )
  3358. CASE ( -1 )
  3359. CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 )
  3360. CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono )
  3361. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3362. CASE ( 0 )
  3363. CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 )
  3364. CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 )
  3365. CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono )
  3366. CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso )
  3367. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3368. IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)
  3369. CASE ( 1 )
  3370. CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 )
  3371. CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso )
  3372. IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)
  3373. END SELECT
  3374. !
  3375. ! ! Write Dirichlet lateral conditions
  3376. ijhom = nlcj - jprecj
  3377. !
  3378. SELECT CASE ( nbondj )
  3379. CASE ( -1 )
  3380. DO jl = 1, iprecj
  3381. pt2d(:,ijhom+jl) = r2dns(:,jl,2)
  3382. END DO
  3383. CASE ( 0 )
  3384. DO jl = 1, iprecj
  3385. pt2d(:,jl-jprj) = r2dsn(:,jl,2)
  3386. pt2d(:,ijhom+jl ) = r2dns(:,jl,2)
  3387. END DO
  3388. CASE ( 1 )
  3389. DO jl = 1, iprecj
  3390. pt2d(:,jl-jprj) = r2dsn(:,jl,2)
  3391. END DO
  3392. END SELECT
  3393. END SUBROUTINE mpp_lnk_2d_icb
  3394. #else
  3395. !!----------------------------------------------------------------------
  3396. !! Default case: Dummy module share memory computing
  3397. !!----------------------------------------------------------------------
  3398. USE in_out_manager
  3399. INTERFACE mpp_sum
  3400. MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, mppsum_realdd, mppsum_a_realdd
  3401. END INTERFACE
  3402. INTERFACE mpp_max
  3403. MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real
  3404. END INTERFACE
  3405. INTERFACE mpp_min
  3406. MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real
  3407. END INTERFACE
  3408. INTERFACE mpp_minloc
  3409. MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d
  3410. END INTERFACE
  3411. INTERFACE mpp_maxloc
  3412. MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d
  3413. END INTERFACE
  3414. LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag
  3415. LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used)
  3416. INTEGER :: ncomm_ice
  3417. INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator
  3418. !!----------------------------------------------------------------------
  3419. CONTAINS
  3420. INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function
  3421. INTEGER, INTENT(in) :: kumout
  3422. lib_mpp_alloc = 0
  3423. END FUNCTION lib_mpp_alloc
  3424. FUNCTION mynode( ldtxt, ldname, kumnam_ref, knumnam_cfg, kumond , kstop, localComm ) RESULT (function_value)
  3425. INTEGER, OPTIONAL , INTENT(in ) :: localComm
  3426. CHARACTER(len=*),DIMENSION(:) :: ldtxt
  3427. CHARACTER(len=*) :: ldname
  3428. INTEGER :: kumnam_ref, knumnam_cfg , kumond , kstop
  3429. IF( PRESENT( localComm ) ) mpi_comm_opa = localComm
  3430. function_value = 0
  3431. IF( .FALSE. ) ldtxt(:) = 'never done'
  3432. CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 )
  3433. END FUNCTION mynode
  3434. SUBROUTINE mppsync ! Dummy routine
  3435. END SUBROUTINE mppsync
  3436. SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine
  3437. REAL , DIMENSION(:) :: parr
  3438. INTEGER :: kdim
  3439. INTEGER, OPTIONAL :: kcom
  3440. WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom
  3441. END SUBROUTINE mpp_sum_as
  3442. SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine
  3443. REAL , DIMENSION(:,:) :: parr
  3444. INTEGER :: kdim
  3445. INTEGER, OPTIONAL :: kcom
  3446. WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom
  3447. END SUBROUTINE mpp_sum_a2s
  3448. SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine
  3449. INTEGER, DIMENSION(:) :: karr
  3450. INTEGER :: kdim
  3451. INTEGER, OPTIONAL :: kcom
  3452. WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom
  3453. END SUBROUTINE mpp_sum_ai
  3454. SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine
  3455. REAL :: psca
  3456. INTEGER, OPTIONAL :: kcom
  3457. WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom
  3458. END SUBROUTINE mpp_sum_s
  3459. SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine
  3460. integer :: kint
  3461. INTEGER, OPTIONAL :: kcom
  3462. WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom
  3463. END SUBROUTINE mpp_sum_i
  3464. SUBROUTINE mppsum_realdd( ytab, kcom )
  3465. COMPLEX(wp), INTENT(inout) :: ytab ! input scalar
  3466. INTEGER , INTENT( in ), OPTIONAL :: kcom
  3467. WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab
  3468. END SUBROUTINE mppsum_realdd
  3469. SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom )
  3470. INTEGER , INTENT( in ) :: kdim ! size of ytab
  3471. COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array
  3472. INTEGER , INTENT( in ), OPTIONAL :: kcom
  3473. WRITE(*,*) 'mppsum_a_realdd: You should not have seen this print! error?', kdim, ytab(1), kcom
  3474. END SUBROUTINE mppsum_a_realdd
  3475. SUBROUTINE mppmax_a_real( parr, kdim, kcom )
  3476. REAL , DIMENSION(:) :: parr
  3477. INTEGER :: kdim
  3478. INTEGER, OPTIONAL :: kcom
  3479. WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom
  3480. END SUBROUTINE mppmax_a_real
  3481. SUBROUTINE mppmax_real( psca, kcom )
  3482. REAL :: psca
  3483. INTEGER, OPTIONAL :: kcom
  3484. WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom
  3485. END SUBROUTINE mppmax_real
  3486. SUBROUTINE mppmin_a_real( parr, kdim, kcom )
  3487. REAL , DIMENSION(:) :: parr
  3488. INTEGER :: kdim
  3489. INTEGER, OPTIONAL :: kcom
  3490. WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom
  3491. END SUBROUTINE mppmin_a_real
  3492. SUBROUTINE mppmin_real( psca, kcom )
  3493. REAL :: psca
  3494. INTEGER, OPTIONAL :: kcom
  3495. WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom
  3496. END SUBROUTINE mppmin_real
  3497. SUBROUTINE mppmax_a_int( karr, kdim ,kcom)
  3498. INTEGER, DIMENSION(:) :: karr
  3499. INTEGER :: kdim
  3500. INTEGER, OPTIONAL :: kcom
  3501. WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom
  3502. END SUBROUTINE mppmax_a_int
  3503. SUBROUTINE mppmax_int( kint, kcom)
  3504. INTEGER :: kint
  3505. INTEGER, OPTIONAL :: kcom
  3506. WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom
  3507. END SUBROUTINE mppmax_int
  3508. SUBROUTINE mppmin_a_int( karr, kdim, kcom )
  3509. INTEGER, DIMENSION(:) :: karr
  3510. INTEGER :: kdim
  3511. INTEGER, OPTIONAL :: kcom
  3512. WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom
  3513. END SUBROUTINE mppmin_a_int
  3514. SUBROUTINE mppmin_int( kint, kcom )
  3515. INTEGER :: kint
  3516. INTEGER, OPTIONAL :: kcom
  3517. WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom
  3518. END SUBROUTINE mppmin_int
  3519. SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj )
  3520. REAL :: pmin
  3521. REAL , DIMENSION (:,:) :: ptab, pmask
  3522. INTEGER :: ki, kj
  3523. WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1)
  3524. END SUBROUTINE mpp_minloc2d
  3525. SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk )
  3526. REAL :: pmin
  3527. REAL , DIMENSION (:,:,:) :: ptab, pmask
  3528. INTEGER :: ki, kj, kk
  3529. WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1)
  3530. END SUBROUTINE mpp_minloc3d
  3531. SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj )
  3532. REAL :: pmax
  3533. REAL , DIMENSION (:,:) :: ptab, pmask
  3534. INTEGER :: ki, kj
  3535. WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1)
  3536. END SUBROUTINE mpp_maxloc2d
  3537. SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk )
  3538. REAL :: pmax
  3539. REAL , DIMENSION (:,:,:) :: ptab, pmask
  3540. INTEGER :: ki, kj, kk
  3541. WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1)
  3542. END SUBROUTINE mpp_maxloc3d
  3543. SUBROUTINE mppstop
  3544. STOP ! non MPP case, just stop the run
  3545. END SUBROUTINE mppstop
  3546. SUBROUTINE mpp_ini_ice( kcom, knum )
  3547. INTEGER :: kcom, knum
  3548. WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom, knum
  3549. END SUBROUTINE mpp_ini_ice
  3550. SUBROUTINE mpp_ini_znl( knum )
  3551. INTEGER :: knum
  3552. WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum
  3553. END SUBROUTINE mpp_ini_znl
  3554. SUBROUTINE mpp_comm_free( kcom )
  3555. INTEGER :: kcom
  3556. WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom
  3557. END SUBROUTINE mpp_comm_free
  3558. #endif
  3559. !!----------------------------------------------------------------------
  3560. !! All cases: ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam routines
  3561. !!----------------------------------------------------------------------
  3562. SUBROUTINE ctl_stop( cd1, cd2, cd3, cd4, cd5 , &
  3563. & cd6, cd7, cd8, cd9, cd10 )
  3564. !!----------------------------------------------------------------------
  3565. !! *** ROUTINE stop_opa ***
  3566. !!
  3567. !! ** Purpose : print in ocean.outpput file a error message and
  3568. !! increment the error number (nstop) by one.
  3569. !!----------------------------------------------------------------------
  3570. CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5
  3571. CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10
  3572. !!----------------------------------------------------------------------
  3573. !
  3574. nstop = nstop + 1
  3575. IF(lwp) THEN
  3576. WRITE(numout,cform_err)
  3577. IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1
  3578. IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2
  3579. IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3
  3580. IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4
  3581. IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5
  3582. IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6
  3583. IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7
  3584. IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8
  3585. IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9
  3586. IF( PRESENT(cd10) ) WRITE(numout,*) cd10
  3587. ENDIF
  3588. CALL FLUSH(numout )
  3589. IF( numstp /= -1 ) CALL FLUSH(numstp )
  3590. IF( numsol /= -1 ) CALL FLUSH(numsol )
  3591. IF( numevo_ice /= -1 ) CALL FLUSH(numevo_ice)
  3592. !
  3593. IF( cd1 == 'STOP' ) THEN
  3594. IF(lwp) WRITE(numout,*) 'huge E-R-R-O-R : immediate stop'
  3595. CALL mppstop()
  3596. ENDIF
  3597. !
  3598. END SUBROUTINE ctl_stop
  3599. SUBROUTINE ctl_warn( cd1, cd2, cd3, cd4, cd5, &
  3600. & cd6, cd7, cd8, cd9, cd10 )
  3601. !!----------------------------------------------------------------------
  3602. !! *** ROUTINE stop_warn ***
  3603. !!
  3604. !! ** Purpose : print in ocean.outpput file a error message and
  3605. !! increment the warning number (nwarn) by one.
  3606. !!----------------------------------------------------------------------
  3607. CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5
  3608. CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10
  3609. !!----------------------------------------------------------------------
  3610. !
  3611. nwarn = nwarn + 1
  3612. IF(lwp) THEN
  3613. WRITE(numout,cform_war)
  3614. IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1
  3615. IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2
  3616. IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3
  3617. IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4
  3618. IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5
  3619. IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6
  3620. IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7
  3621. IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8
  3622. IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9
  3623. IF( PRESENT(cd10) ) WRITE(numout,*) cd10
  3624. ENDIF
  3625. CALL FLUSH(numout)
  3626. !
  3627. END SUBROUTINE ctl_warn
  3628. SUBROUTINE ctl_opn( knum, cdfile, cdstat, cdform, cdacce, klengh, kout, ldwp, karea )
  3629. !!----------------------------------------------------------------------
  3630. !! *** ROUTINE ctl_opn ***
  3631. !!
  3632. !! ** Purpose : Open file and check if required file is available.
  3633. !!
  3634. !! ** Method : Fortan open
  3635. !!----------------------------------------------------------------------
  3636. INTEGER , INTENT( out) :: knum ! logical unit to open
  3637. CHARACTER(len=*) , INTENT(in ) :: cdfile ! file name to open
  3638. CHARACTER(len=*) , INTENT(in ) :: cdstat ! disposition specifier
  3639. CHARACTER(len=*) , INTENT(in ) :: cdform ! formatting specifier
  3640. CHARACTER(len=*) , INTENT(in ) :: cdacce ! access specifier
  3641. INTEGER , INTENT(in ) :: klengh ! record length
  3642. INTEGER , INTENT(in ) :: kout ! number of logical units for write
  3643. LOGICAL , INTENT(in ) :: ldwp ! boolean term for print
  3644. INTEGER, OPTIONAL, INTENT(in ) :: karea ! proc number
  3645. !!
  3646. CHARACTER(len=80) :: clfile
  3647. INTEGER :: iost
  3648. !!----------------------------------------------------------------------
  3649. ! adapt filename
  3650. ! ----------------
  3651. clfile = TRIM(cdfile)
  3652. IF( PRESENT( karea ) ) THEN
  3653. IF( karea > 1 ) WRITE(clfile, "(a,'_',i4.4)") TRIM(clfile), karea-1
  3654. ENDIF
  3655. #if defined key_agrif
  3656. IF( .NOT. Agrif_Root() ) clfile = TRIM(Agrif_CFixed())//'_'//TRIM(clfile)
  3657. knum=Agrif_Get_Unit()
  3658. #else
  3659. knum=get_unit()
  3660. #endif
  3661. iost=0
  3662. IF( cdacce(1:6) == 'DIRECT' ) THEN
  3663. OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh, ERR=100, IOSTAT=iost )
  3664. ELSE
  3665. OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost )
  3666. ENDIF
  3667. IF( iost == 0 ) THEN
  3668. IF(ldwp) THEN
  3669. WRITE(kout,*) ' file : ', clfile,' open ok'
  3670. WRITE(kout,*) ' unit = ', knum
  3671. WRITE(kout,*) ' status = ', cdstat
  3672. WRITE(kout,*) ' form = ', cdform
  3673. WRITE(kout,*) ' access = ', cdacce
  3674. WRITE(kout,*)
  3675. ENDIF
  3676. ENDIF
  3677. 100 CONTINUE
  3678. IF( iost /= 0 ) THEN
  3679. IF(ldwp) THEN
  3680. WRITE(kout,*)
  3681. WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile
  3682. WRITE(kout,*) ' ======= === '
  3683. WRITE(kout,*) ' unit = ', knum
  3684. WRITE(kout,*) ' status = ', cdstat
  3685. WRITE(kout,*) ' form = ', cdform
  3686. WRITE(kout,*) ' access = ', cdacce
  3687. WRITE(kout,*) ' iostat = ', iost
  3688. WRITE(kout,*) ' we stop. verify the file '
  3689. WRITE(kout,*)
  3690. ENDIF
  3691. STOP 'ctl_opn bad opening'
  3692. ENDIF
  3693. END SUBROUTINE ctl_opn
  3694. SUBROUTINE ctl_nam ( kios, cdnam, ldwp )
  3695. !!----------------------------------------------------------------------
  3696. !! *** ROUTINE ctl_nam ***
  3697. !!
  3698. !! ** Purpose : Informations when error while reading a namelist
  3699. !!
  3700. !! ** Method : Fortan open
  3701. !!----------------------------------------------------------------------
  3702. INTEGER , INTENT(inout) :: kios ! IO status after reading the namelist
  3703. CHARACTER(len=*) , INTENT(in ) :: cdnam ! group name of namelist for which error occurs
  3704. CHARACTER(len=5) :: clios ! string to convert iostat in character for print
  3705. LOGICAL , INTENT(in ) :: ldwp ! boolean term for print
  3706. !!----------------------------------------------------------------------
  3707. !
  3708. ! ----------------
  3709. WRITE (clios, '(I5.0)') kios
  3710. IF( kios < 0 ) THEN
  3711. CALL ctl_warn( 'W A R N I N G: end of record or file while reading namelist ' &
  3712. & // TRIM(cdnam) // ' iostat = ' // TRIM(clios) )
  3713. ENDIF
  3714. IF( kios > 0 ) THEN
  3715. CALL ctl_stop( 'E R R O R : misspelled variable in namelist ' &
  3716. & // TRIM(cdnam) // ' iostat = ' // TRIM(clios) )
  3717. ENDIF
  3718. kios = 0
  3719. RETURN
  3720. END SUBROUTINE ctl_nam
  3721. INTEGER FUNCTION get_unit()
  3722. !!----------------------------------------------------------------------
  3723. !! *** FUNCTION get_unit ***
  3724. !!
  3725. !! ** Purpose : return the index of an unused logical unit
  3726. !!----------------------------------------------------------------------
  3727. LOGICAL :: llopn
  3728. !!----------------------------------------------------------------------
  3729. !
  3730. get_unit = 15 ! choose a unit that is big enough then it is not already used in NEMO
  3731. llopn = .TRUE.
  3732. DO WHILE( (get_unit < 998) .AND. llopn )
  3733. get_unit = get_unit + 1
  3734. INQUIRE( unit = get_unit, opened = llopn )
  3735. END DO
  3736. IF( (get_unit == 999) .AND. llopn ) THEN
  3737. CALL ctl_stop( 'get_unit: All logical units until 999 are used...' )
  3738. get_unit = -1
  3739. ENDIF
  3740. !
  3741. END FUNCTION get_unit
  3742. !!----------------------------------------------------------------------
  3743. END MODULE lib_mpp