123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226 |
- !
- #define TRACEBACK write (gol,'("in ",a," (",a,", line",i5,")")') rname, __FILE__, __LINE__; call goErr
- #define IF_NOTOK_RETURN(action) if (status/=0) then; TRACEBACK; action; return; end if
- #define IF_ERROR_RETURN(action) if (status> 0) then; TRACEBACK; action; return; end if
- !
- #define IF_NOTOK_MPI(action) if (ierr/=MPI_SUCCESS) then; TRACEBACK; action; return; end if
- !
- #include "tm5.inc"
- !
- !----------------------------------------------------------------------------
- ! TM5 !
- !----------------------------------------------------------------------------
- !BOP
- !
- ! !MODULE: DOMAIN_DECOMP
- !
- ! !DESCRIPTION: Define a distributed grid object and its methods.
- ! Horizontal grid is decomposed across longitudes and latitudes.
- !\\
- !\\
- ! !INTERFACE:
- !
- MODULE DOMAIN_DECOMP
- !
- ! !USES:
- !
- use grid, only : TllGridInfo ! Type with Lon/Lat Grid Info
- use Go, only : goErr, goPr, gol ! go = general objects
- use dims, only : okdebug
- use partools ! to include mpif.h, ierr, localComm,...
-
- IMPLICIT NONE
- PRIVATE
- !
- ! !PUBLIC MEMBER FUNCTIONS:
- !
- public :: Init_DistGrid, Done_DistGrid ! life cycle routines
- public :: Get_DistGrid ! get bounds & grids
- public :: Print_DistGrid ! print utility (debug)
-
- public :: GATHER, SCATTER, UPDATE_HALO ! communication routines
- public :: GATHER_I_BAND, GATHER_J_BAND ! communication routines
- public :: SCATTER_I_BAND, SCATTER_J_BAND! for distributed slices
- public :: UPDATE_HALO_JBAND !
- public :: UPDATE_HALO_IBAND !
-
- public :: REDUCE ! mimic MPI_REDUCE / MPI_ALLREDUCE
- public :: DIST_ARR_STAT ! basic statitics of a distributed array
- public :: testcomm ! test communication (debug)
- !
- ! !PUBLIC TYPES:
- !
- TYPE, PUBLIC :: DIST_GRID
- PRIVATE
- ! parameters for global domain
- integer :: im_region ! number of longitudes
- integer :: jm_region ! number of latitudes
- ! parameters for local domain
- integer :: i_strt ! begin local domain longitude index
- integer :: i_stop ! end local domain longitude index
- integer :: i_strt_halo ! begin halo longitude index
- integer :: i_stop_halo ! end halo longitude index
- integer :: j_strt ! begin local domain latitude index
- integer :: j_stop ! end local domain latitude index
- integer :: j_strt_halo ! begin halo latitude index
- integer :: j_stop_halo ! end halo latitude index
- type(TllGridInfo) :: lli ! local Lat-Lon grid info
- type(TllGridInfo) :: glbl_lli ! global Lat-Lon grid info
- type(TllGridInfo) :: lli_z ! global meridional grid info
- integer :: neighbors(4) ! rank of (west, north, east, south) processes
- logical :: has_south_pole ! south pole is in local domain
- logical :: has_north_pole ! north pole is in local domain
- logical :: has_south_boundary ! south boundary is in local domain
- logical :: has_north_boundary ! north boundary is in local domain
- logical :: has_west_boundary ! west boundary is in local domain
- logical :: has_east_boundary ! east boundary is in local domain
- logical :: is_periodic ! covers [-180, 180]
- ! i_start, i_stop, j_start, j_stop of all PEs: (4,npes)
- integer, pointer :: bounds(:,:)
-
- END TYPE DIST_GRID
- !
- ! !PRIVATE DATA MEMBERS:
- !
- character(len=*), parameter :: mname='Domain_Decomp_MOD_'
- !
- ! !INTERFACE:
- !
- INTERFACE Init_DistGrid
- MODULE PROCEDURE INIT_DISTGRID_FROM_REGION
- MODULE PROCEDURE INIT_DISTGRID_FROM_LLI
- END INTERFACE
-
- INTERFACE Update_halo ! for arrays distributed accross both I and J (1st and 2nd dim)
- MODULE PROCEDURE Update_halo_2d_r
- MODULE PROCEDURE Update_halo_3d_r
- MODULE PROCEDURE Update_halo_4d_r
- MODULE PROCEDURE Update_halo_2d_i
- END INTERFACE
- INTERFACE Update_halo_jband ! for arrays distributed accross I (1st dim)
- MODULE PROCEDURE Update_halo_jband_1d_r
- MODULE PROCEDURE Update_halo_jband_2d_r
- MODULE PROCEDURE Update_halo_jband_3d_r
- MODULE PROCEDURE Update_halo_jband_4d_r
- END INTERFACE
-
- INTERFACE Update_halo_iband ! for arrays distributed accross J (1st dim)
- MODULE PROCEDURE Update_halo_iband_1d_r
- END INTERFACE
-
- INTERFACE Gather
- MODULE PROCEDURE gather_2d_i
- MODULE PROCEDURE gather_2d_r
- MODULE PROCEDURE gather_3d_r
- MODULE PROCEDURE gather_4d_r
- MODULE PROCEDURE gather_5d_r
- END INTERFACE
-
- INTERFACE Scatter
- MODULE PROCEDURE scatter_2d_r
- MODULE PROCEDURE scatter_3d_r
- MODULE PROCEDURE scatter_4d_r
- END INTERFACE
- INTERFACE Gather_I_Band
- MODULE PROCEDURE gather_iband_1d_r
- ! MODULE PROCEDURE gather_iband_2d_r
- MODULE PROCEDURE gather_iband_3d_r
- END INTERFACE
- INTERFACE Gather_J_Band
- ! MODULE PROCEDURE gather_jband_1d_r
- MODULE PROCEDURE gather_jband_2d_r
- MODULE PROCEDURE gather_jband_3d_r
- MODULE PROCEDURE gather_jband_4d_r
- END INTERFACE
- INTERFACE Scatter_I_Band
- MODULE PROCEDURE scatter_iband_1d_r
- MODULE PROCEDURE scatter_iband_2d_r
- END INTERFACE
- INTERFACE Scatter_J_Band
- MODULE PROCEDURE scatter_jband_1d_r
- MODULE PROCEDURE scatter_jband_2d_r
- MODULE PROCEDURE scatter_jband_3d_r
- MODULE PROCEDURE scatter_jband_4d_r
- END INTERFACE
- INTERFACE Reduce
- MODULE PROCEDURE reduce_2d_r
- MODULE PROCEDURE reduce_3d_r
- MODULE PROCEDURE reduce_4d_r
- END INTERFACE
- INTERFACE Dist_Arr_Stat
- MODULE PROCEDURE dist_arr_stat_2d_r
- END INTERFACE
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 13 Feb 2013 - P. Le Sager - Remove deprecated MPI_GET_EXTENT and
- ! MPI_TYPE_HVECTOR.
- ! - Extra garbage clean.
- ! 21 Jun 2013 - P. Le Sager - Replace all MPI_SEND with MPI_SSEND in all
- ! scatter routines to avoid buffering.
- !
- ! !REMARKS:
- ! (1) There is two categories of subroutines here : one to define the
- ! distributed grid objects, the other for communication b/w processes
- ! (update_halo for one-to-one comm, and scatter/gather/reduce for
- ! collective comm)
- ! Communications routines are for data DECOMPOSED on the domain. For
- ! more general comm, see partools module.
- !
- ! (2) GATHER & SCATTER :
- ! - global arrays have to really be global on root only, and can be
- ! (1,1,..) on other processes.
- ! - global arrays are without halo.
- ! - if not using MPI, gather and scatter just copy arrays, so it may be
- ! better to not call them to save memory in that case.
- !
- ! (3) Be careful when passing a slice (or a pointer to a slice) as argument:
- !
- ! Passing a subarray will *NOT* work most of the time, unless it is a
- ! slice on the last dimension(s). This will give erroneous results:
- !
- ! allocate( gbl_arr(-3:imr, 1:jmr ))
- ! call gather( dgrid, local_arr, gbl_arr(1:imr,:), halo, status)
- !
- ! but passing full size array will work:
- !
- ! allocate( gbl_arr(-3:imr, 1:jmr ))
- ! allocate( temp(1:imr,1:jmr) )
- ! call gather( dgrid, local_arr, temp, halo, status)
- ! gbl_arr(1:imr,:) = temp
- !
- ! (4) Scatter[Gather]_I[J]_band are communications between a row or column
- ! of processors and the root processor (optionally the row_root in few
- ! cases).
- !EOP
- !------------------------------------------------------------------------
- CONTAINS
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DISTGRID_RANGE
- !
- ! !DESCRIPTION: Give range of indices covered by rank when using nprocs.
- ! This is used for one dimension, and thus called twice.
- ! Distribution is done evenly. Eg: it will distribute 11 boxes
- ! across 3 processes as 4,4,3 on each pe.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DISTGRID_RANGE(ij, rank, nprocs, istart, iend)
- !
- ! !INPUT PARAMETERS:
- !
- integer, intent(in) :: ij ! defines range (1,..,ij) to be distributed
- integer, intent(in) :: rank ! current process, in (0,.., nprocs-1)
- integer, intent(in) :: nprocs ! total # of processes
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out):: istart, iend ! index range covered by rank
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- integer :: iwork1, iwork2
- iwork1 = ij/nprocs
- iwork2 = mod(ij,nprocs)
- istart = rank * iwork1 + 1 + min(rank, iwork2)
- iend = istart + iwork1 - 1
- if (iwork2 > rank) iend = iend + 1
- END SUBROUTINE DISTGRID_RANGE
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: INIT_DISTGRID_FROM_REGION
- !
- ! !DESCRIPTION: initialize a distributed grid object for a TM5 region
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE INIT_DISTGRID_FROM_REGION( DistGrid, region, rank, nplon, nplat, halo, status)
- !
- ! !USES:
- !
- use grid, only : init
- use dims, only : xbeg, xend, dx, im, xcyc
- use dims, only : ybeg, yend, dy, jm, touch_np, touch_sp
- !
- ! !RETURN VALUE:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !INPUT PARAMETERS:
- !
- integer, intent(in) :: region ! TM5 region number
- integer, intent(in) :: rank ! current process in (0,.., nprocs-1)
- integer, intent(in) :: nplon ! number of processes along longitude
- integer, intent(in) :: nplat ! number of processes along latitude
- integer, intent(in), optional :: halo ! halo size (default=0)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- !
- ! (1) Ideally: to call with WIDTH=<halo used in the code>... but not the
- ! same halo is used for all data sets. Then, in many subroutines, halo has
- ! to be independent of the halo carried by the distributed grid. We could
- ! simplify things by using a halo of 2 for all distributed data, or
- ! disregard halo in the dist_grid, but we just keep it general (as is).
- !
- ! (2) npe_lat/lon are also available thru partools, but may not have been
- ! initialized yet, so we keep the nplon/nplat inputs here. Could do
- ! without though...
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'init_distgrid'
- integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4)
- real :: yb, xb, dlon, dlat
- character(len=39) :: name
- ! for pretty print
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: i1, j1, j2, k, work
- character(len=3) :: str1
- ! for virtual topology
- integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat)
- integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1)
- integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon)
- integer :: rank_lat ! index of current process in meridional set (1,..,np_lat)
- !---------------------------------------------
- ! 2D process topology, and mapping 1D <-> 2D
- !---------------------------------------------
- ! besides the periodicity, topology is independent of the region
- allocate(dist_map(nplon, nplat))
- npes = nplat*nplon
- dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /))
-
- ! store
- iwork = maxloc(dist_map, mask=dist_map.eq.rank)
- rank_lon = iwork(1) ! in [1..nplon]
- rank_lat = iwork(2) ! in [1..nplat]
- ! Neighbors = 2d process map with extra neighbors
- allocate(dist_mapw(0:nplon+1, 0:nplat+1))
- dist_mapw = MPI_PROC_NULL
- dist_mapw(1:nplon,1:nplat) = dist_map
- ! East-West periodicity
- DistGrid%is_periodic =.false.
- if (xcyc(region)==1) then
- dist_mapw( 0,1:nplat) = dist_map(nplon,:)
- dist_mapw(nplon+1,1:nplat) = dist_map(1,:)
- DistGrid%is_periodic =.true.
- end if
- DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west
- dist_mapw(rank_lon, rank_lat+1), & ! north
- dist_mapw(rank_lon+1, rank_lat ), & ! east
- dist_mapw(rank_lon, rank_lat-1) /) ! south
- !---------------------------------------------
- ! fill in distributed grid info
- !---------------------------------------------
- ! region boundaries
- DistGrid%has_south_boundary = (rank_lat == 1 )
- DistGrid%has_north_boundary = (rank_lat == nplat)
- DistGrid%has_west_boundary = (rank_lon == 1 )
- DistGrid%has_east_boundary = (rank_lon == nplon)
- ! poles
- DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (touch_sp(region) == 1)
- DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (touch_np(region) == 1)
-
- ! max halo that will be used in the code
- iwidth=0
- if (present(halo)) iwidth=halo
- ! index ranges covered by current process
- call DistGrid_range(im(region), rank_lon-1, nplon, start, end)
- ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1.
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in X (longitude) direction! ", nplon, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
-
- DistGrid%im_region = im(region)
- DistGrid%i_strt = start
- DistGrid%i_stop = end
- DistGrid%i_strt_halo = start-iwidth
- DistGrid%i_stop_halo = end+iwidth
- ! To think about it when/if needed:
- ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end
- ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start
- call DistGrid_range(jm(region), rank_lat-1, nplat, start, end)
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in Y (latitude) direction! ", nplat, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%jm_region = jm(region)
- DistGrid%j_strt = start
- DistGrid%j_stop = end
- DistGrid%j_strt_halo = start-iwidth
- DistGrid%j_stop_halo = end+iwidth
-
- #ifdef parallel_cplng
- if ( modulo(im(region),nplon) /= 0) then
- write(gol,'("number of X processors ",i3," does not divide evenly the number of grid boxes",i3)') &
- nplon, im(region) ; call goErr
- status=1; TRACEBACK; return
- endif
- if ( modulo(jm(region),nplat) /= 0) then
- write(gol,'("number of Y processors ",i3," does not divide evenly the number of grid boxes ",i3)') &
- nplat, jm(region) ; call goErr
- status=1; TRACEBACK; return
- endif
- #endif
-
- !---------------------------------------------
- ! geophysical grids
- !---------------------------------------------
- ! grid spacing:
- dlon = real(xend(region)-xbeg(region))/im(region)
- dlat = real(yend(region)-ybeg(region))/jm(region)
- !------ local
- iml = DistGrid%i_stop - DistGrid%i_strt + 1
- jml = DistGrid%j_stop - DistGrid%j_strt + 1
- xb = xbeg(region) + ( DistGrid%i_strt - 0.5 ) * dlon
- yb = ybeg(region) + ( DistGrid%j_strt - 0.5 ) * dlat
- write(name, '("distributed grid on process ", i5.5)') rank
-
- call INIT( DistGrid%lli, xb, dlon, iml, yb, dlat, jml, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !------ global
- xb = xbeg(region) + 0.5 * dlon
- yb = ybeg(region) + 0.5 * dlat
- write(name, '("global grid on process ", i5.5)') rank
- call INIT( DistGrid%glbl_lli, xb, dlon, im(region), yb, dlat, jm(region), status, name=name )
- IF_NOTOK_RETURN(status=1)
-
- !------ global meridional grid
- name = "merid " // trim(name)
- call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, dlat, jm(region), status, name=name )
- IF_NOTOK_RETURN(status=1)
- !---------------------------------------------
- ! store shapes info of all PE on all PE (used only on root so far, but may
- ! become useful if we gather/scatter to/from non-root in the future)
- !---------------------------------------------
- #ifdef MPI
- allocate(DistGrid%bounds(4,0:npes-1))
- lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /)
- call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr)
- #endif
- !---------------------------------------------
- ! PRINT (Debug) 2D process topology
- !---------------------------------------------
- if(okdebug)then
- write(gol,*) '------------- world view ----------------------' ; call goPr
- write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr
- write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr
- i1=min(maxcol,nplon)
- str1='...'
- if (i1==nplon) str1=''
- work=nplat/2
- j1=min(work, maxrow)
- j2=max(nplat-maxrow+1, work+1)
- do k=nplat,j2,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr
- do k=j1,1,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- write(gol,*) ''
- write(gol,*) '-----------------------------------------------' ; call goPr
- end if
-
- ! Done
- deallocate(dist_map)
- deallocate(dist_mapw)
- status = 0
- END SUBROUTINE INIT_DISTGRID_FROM_REGION
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: INIT_DISTGRID_FROM_LLI
- !
- ! !DESCRIPTION: initialize a distributed grid object from a lli object
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE INIT_DISTGRID_FROM_LLI( DistGrid, lli, rank, halo, status)
- !
- ! !USES:
- !
- use grid, only : init, assignment(=)
- !
- ! !INPUT/OUTPUT PARAMETERS:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !INPUT PARAMETERS:
- !
- type(TllGridInfo), intent(in) :: lli
- integer, intent(in) :: rank ! current process in (0,.., nprocs-1)
- integer, optional, intent(in) :: halo ! halo size (default=0)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 18 Nov 2013 - Ph. Le Sager - v0, adapted from init_distgrid_from_region
- !
- ! !REMARKS:
- !
- ! (1) See INIT_DISTGRID_FROM_REGION for comment about halo input.
- ! (2) Uses npe_lat/lon from partools module : TM5_MPI_Init2 must have been
- ! called before.
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'init_distgrid_from_lli'
- integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4)
- real :: yb, xb, dlon, dlat
- character(len=39) :: name
- integer :: nplon ! number of processes along longitude
- integer :: nplat ! number of processes along latitude
- ! for pretty print
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: i1, j1, j2, k, work
- character(len=3) :: str1
- ! for virtual topology
- integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat)
- integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1)
- integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon)
- integer :: rank_lat ! index of current process in meridional set (1,..,np_lat)
- !---------------------------------------------
- ! test for valid input
- !---------------------------------------------
- if (.not.(associated( lli%lon_deg ))) then
- write(gol,*)" The LLI object has not been initialized. " ; call goErr
- write(gol,*)" A distributed grid for MPI cannot be build." ; call goErr
- status=1; TRACEBACK; return
- end if
- nplat = npe_lat
- nplon = npe_lon
-
- !---------------------------------------------
- ! 2D process topology, and mapping 1D <-> 2D
- !---------------------------------------------
- ! besides the periodicity, topology is independent of the region
- allocate(dist_map(nplon, nplat))
- npes = nplat*nplon
- dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /))
- ! store
- iwork = maxloc(dist_map, mask=dist_map.eq.rank)
- rank_lon = iwork(1) ! in [1..nplon]
- rank_lat = iwork(2) ! in [1..nplat]
- ! Neighbors = 2d process map with extra neighbors
- allocate(dist_mapw(0:nplon+1, 0:nplat+1))
- dist_mapw = MPI_PROC_NULL
- dist_mapw(1:nplon,1:nplat) = dist_map
- ! East-West periodicity
- DistGrid%is_periodic =.false.
- if ( lli%dlon_deg*lli%im == 360. ) then
- dist_mapw( 0,1:nplat) = dist_map(nplon,:)
- dist_mapw(nplon+1,1:nplat) = dist_map(1,:)
- DistGrid%is_periodic =.true.
- end if
- DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west
- dist_mapw(rank_lon, rank_lat+1), & ! north
- dist_mapw(rank_lon+1, rank_lat ), & ! east
- dist_mapw(rank_lon, rank_lat-1) /) ! south
- !---------------------------------------------
- ! fill in distributed grid info
- !---------------------------------------------
- ! region boundaries
- DistGrid%has_south_boundary = (rank_lat == 1 )
- DistGrid%has_north_boundary = (rank_lat == nplat)
- DistGrid%has_west_boundary = (rank_lon == 1 )
- DistGrid%has_east_boundary = (rank_lon == nplon)
- ! poles
- DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (lli%blat_deg(0) == -90.0)
- DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (lli%blat_deg(lli%jm) == 90.0)
- ! max halo that will be used in the code
- iwidth=0
- if (present(halo)) iwidth=halo
- ! index ranges covered by current process
- call DistGrid_range(lli%im, rank_lon-1, nplon, start, end)
- ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1.
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in X (longitude) direction:", nplon, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%im_region = lli%im
- DistGrid%i_strt = start
- DistGrid%i_stop = end
- DistGrid%i_strt_halo = start-iwidth
- DistGrid%i_stop_halo = end+iwidth
- ! To think about it when/if needed:
- ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end
- ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start
- call DistGrid_range(lli%jm, rank_lat-1, nplat, start, end)
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in Y (latitude) direction:", nplat, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%jm_region = lli%jm
- DistGrid%j_strt = start
- DistGrid%j_stop = end
- DistGrid%j_strt_halo = start-iwidth
- DistGrid%j_stop_halo = end+iwidth
- !---------------------------------------------
- ! geophysical grids
- !---------------------------------------------
- !------ tile
- iml = DistGrid%i_stop - DistGrid%i_strt + 1
- jml = DistGrid%j_stop - DistGrid%j_strt + 1
- xb = lli%lon_deg(1) + ( DistGrid%i_strt - 1 ) * lli%dlon_deg
- yb = lli%lat_deg(1) + ( DistGrid%j_strt - 1 ) * lli%dlat_deg
- write(name, '("distributed grid on process ", i5.5)') rank
- call INIT( DistGrid%lli, xb, lli%dlon_deg, iml, yb, lli%dlat_deg, jml, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !------ world
- DistGrid%glbl_lli = lli
-
- !------ world meridional grid
- name = "merid "
- call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, lli%dlat_deg, lli%jm, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !---------------------------------------------
- ! store shapes info of all PE on all PE (used only on root so far, but may
- ! become useful if we gather/scatter to/from non-root in the future)
- !---------------------------------------------
- #ifdef MPI
- allocate(DistGrid%bounds(4,0:npes-1))
- lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /)
- call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr)
- #endif
- !---------------------------------------------
- ! PRINT (Debug) 2D process topology
- !---------------------------------------------
- if(okdebug)then
- write(gol,*) '------------- world view ----------------------' ; call goPr
- write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr
- write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr
- i1=min(maxcol,nplon)
- str1='...'
- if (i1==nplon) str1=''
- work=nplat/2
- j1=min(work, maxrow)
- j2=max(nplat-maxrow+1, work+1)
- do k=nplat,j2,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr
- do k=j1,1,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- write(gol,*) ''
- write(gol,*) '-----------------------------------------------' ; call goPr
- end if
- ! Done
- deallocate(dist_map)
- deallocate(dist_mapw)
- status = 0
- END SUBROUTINE INIT_DISTGRID_FROM_LLI
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DONE_DISTGRID
- !
- ! !DESCRIPTION: deallocate distributed grid object elements
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DONE_DISTGRID( DistGrid, STATUS )
- !
- ! !USES:
- !
- use grid, only : done
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'Done_Distgrid'
- call done(DistGrid%lli, status)
- IF_NOTOK_RETURN(status=1)
- call done(DistGrid%lli_z, status)
- IF_NOTOK_RETURN(status=1)
- call done(DistGrid%glbl_lli, status)
- IF_NOTOK_RETURN(status=1)
-
- if (associated(DistGrid%bounds)) deallocate(DistGrid%bounds)
-
- status=0
- END SUBROUTINE DONE_DISTGRID
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_DISTGRID
- !
- ! !DESCRIPTION: provide quick access to object elements (bounds and grids),
- ! while keeping them private.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_DISTGRID(DistGrid, &
- i_strt, i_stop, i_strt_halo, i_stop_halo, &
- j_strt, j_stop, j_strt_halo, j_stop_halo, &
- i_wrld, j_wrld, &
- hasSouthPole, hasNorthPole, &
- hasSouthBorder, hasNorthBorder, &
- hasEastBorder, hasWestBorder, &
- lli, lli_z, global_lli )
- !
- ! !USES:
- !
- use grid, only : assignment(=) ! to copy lli
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, optional :: i_strt, i_stop, i_strt_halo, i_stop_halo
- integer, optional :: j_strt, j_stop, j_strt_halo, j_stop_halo
- integer, optional :: i_wrld, j_wrld
- logical, optional :: hasSouthPole, hasNorthPole
- logical, optional :: hasSouthBorder, hasNorthBorder
- logical, optional :: hasWestBorder, hasEastBorder
- type(TllGridInfo), optional :: lli ! local Lat-Lon grid info
- type(TllGridInfo), optional :: global_lli ! global Lat-Lon grid info
- type(TllGridInfo), optional :: lli_z ! global zonal grid info
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- if (present(i_strt)) i_strt = DistGrid%i_strt
- if (present(i_stop)) i_stop = DistGrid%i_stop
- if (present(i_strt_halo)) i_strt_halo = DistGrid%i_strt_halo
- if (present(i_stop_halo)) i_stop_halo = DistGrid%i_stop_halo
- if (present(j_strt)) j_strt = DistGrid%j_strt
- if (present(j_stop)) j_stop = DistGrid%j_stop
- if (present(j_strt_halo)) j_strt_halo = DistGrid%j_strt_halo
- if (present(j_stop_halo)) j_stop_halo = DistGrid%j_stop_halo
- if (present(i_wrld)) i_wrld = DistGrid%im_region
- if (present(j_wrld)) j_wrld = DistGrid%jm_region
- if (present(hasSouthPole)) hasSouthPole = DistGrid%has_south_pole
- if (present(hasNorthPole)) hasNorthPole = DistGrid%has_north_pole
-
- if (present(hasSouthBorder)) hasSouthBorder = DistGrid%has_south_boundary
- if (present(hasNorthBorder)) hasNorthBorder = DistGrid%has_north_boundary
- if (present(hasEastBorder )) hasEastBorder = DistGrid%has_east_boundary
- if (present(hasWestBorder )) hasWestBorder = DistGrid%has_west_boundary
- if (present(lli)) lli = DistGrid%lli
- if (present(global_lli)) global_lli = DistGrid%glbl_lli
- if (present(lli_z)) lli_z = DistGrid%lli_z
-
- END SUBROUTINE GET_DISTGRID
- !EOC
-
- #ifdef MPI /* MPI TYPES */
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_HALO_TYPE
- !
- ! !DESCRIPTION: Returns derived MPI types needed for halo communications,
- ! and the start indices for the send & receive communications.
- ! Four of each are returned, one for each side of the domain,
- ! in the following order: West, North, East, South.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_HALO_TYPE( DistGrid, arr_shape, halo, datatype, srtype, ijsr, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: arr_shape(:) ! shape of local array
- integer, intent(in) :: halo ! halo size
- integer, intent(in) :: datatype ! basic MPI datatype
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: srtype(4) ! derived MPI datatypes for send/recv
- integer, intent(out) :: ijsr(4,4) ! start indices for send/recv
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) Not tested on all imaginable cases, but should work with any size
- ! GE 2, and any of the basic numerical MPI_xxx datatype
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'get_halo_type'
- integer :: nsslice, weslice, nstype, wetype ! MPI derived datatypes
- integer :: n, m, hstride ! sizes to build datatypes
- integer :: ndims, sz, i0, j0, i1, j1
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vstride
- sz = size(arr_shape)
- ! collapse third and above dimensions
- ndims = 1
- if (sz > 2) ndims = product(arr_shape(3:))
- ! strides
- CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr)
- IF_NOTOK_MPI(status=1)
- hstride = arr_shape(1)
- vstride = arr_shape(1)*arr_shape(2)*sizeoftype
-
- ! size of data slice to carry
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- ! Type for North and South halo regions
- ! --------------------------------------
- ! halo lines of lenght N, separated by hstride
- call MPI_TYPE_VECTOR (halo, n, hstride, datatype, nsslice, ierr)
- IF_NOTOK_MPI(status=1)
- ! stack 3rd (and above) dimension if any
- if (ndims == 1) then
- nstype = nsslice
- else
- ! note : also works with ndims=1
- call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, nsslice, nstype, ierr)
- IF_NOTOK_MPI(status=1)
-
- call MPI_TYPE_FREE(nsslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
- call MPI_TYPE_COMMIT (nstype, ierr)
- IF_NOTOK_MPI(status=1)
- ! Type for West and East halo regions
- ! ------------------------------------
- ! M lines of lenght 'halo', separated by hstride
- call MPI_TYPE_VECTOR (m, halo, hstride, datatype, weslice, ierr)
- IF_NOTOK_MPI(status=1)
- ! stack 3rd (and above) dimension
- if (ndims == 1) then
- wetype = weslice
- else
- ! note : also works with ndims=1
- call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, weslice, wetype, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(weslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (wetype, ierr)
- IF_NOTOK_MPI(status=1)
- ! Buffers anchors
- ! ----------------
- ! Assuming that neighbors are stored in (West, North, East, South) order
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- srtype = (/ wetype, nstype, wetype, nstype /)
- ijsr(:,1) = (/ i0, i0, i1+1-halo, i0 /) ! i-start location of buffer to send
- ijsr(:,2) = (/ j0, j1-halo+1, j0, j0 /) ! j-start location of buffer to send
- ijsr(:,3) = (/ i0-halo, i0, i1+1, i0 /) ! i-start location of buffer to receive
- ijsr(:,4) = (/ j0, j1+1, j0, j0-halo /) ! j-start location of buffer to receive
- status=0
-
- END SUBROUTINE GET_HALO_TYPE
- !EOC
-
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_INTERIOR_TYPE
- !
- ! !DESCRIPTION: Returns derived MPI types that describe the interior domains
- ! needed for collective communications.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_INTERIOR_TYPE( DistGrid, shp, datatype, linterior, ginterior, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: shp(:) ! shape of local array
- integer, intent(in) :: datatype ! basic MPI datatype
- !
- ! !OUTPUT PARAMETERS:
- !
- ! derived MPI datatypes describing distributed interior domains:
- integer, intent(out) :: ginterior(npes-1) ! within global array (used by root, as many as NPES-1)
- integer, intent(out) :: linterior ! within local array (used by non-root)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) input must be checked before by calling CHECK_DIST_ARR first
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'get_interior_type'
- integer :: gslice, lslice ! intermediate datatypes
- integer :: n, m ! sizes to build datatypes
- integer :: hlstride, hgstride ! strides to build datatypes
- integer :: stack, sz, klm
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vlstride, vgstride
- ! init : number of dimensions, default value
- sz = size(shp)
- ginterior = MPI_DATATYPE_NULL
- linterior = MPI_DATATYPE_NULL
- ! collapse third and above dimensions
- stack = 1
- if (sz > 2) stack = product(shp(3:))
-
- ! size of data slice to carry
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! horizontal global stride (in data)
- hgstride = DistGrid%im_region
- ! vertical global stride (in byte)
- vgstride = DistGrid%im_region * DistGrid%jm_region * sizeoftype
- ! local strides (may be different than n and n*m because of halo)
- hlstride = shp(1) ! in data
- vlstride = shp(1)*shp(2)*sizeoftype ! in byte
- if ( isRoot ) then
- do klm=1,npes-1
- ! horizontal chunk is M lines of lenght N, separated by a global
- ! stride
- n = DistGrid%bounds(2,klm) - DistGrid%bounds(1,klm) + 1
- m = DistGrid%bounds(4,klm) - DistGrid%bounds(3,klm) + 1
- call MPI_TYPE_VECTOR (m, n, hgstride, datatype, gslice, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! stack 3rd and above dimensions
- if (stack == 1) then
- ginterior(klm) = gslice
- else
- ! note : also works with stack=1
- call MPI_TYPE_CREATE_HVECTOR(stack, 1, vgstride, gslice, ginterior(klm), ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(gslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (ginterior(klm), ierr)
- IF_NOTOK_MPI(status=1)
-
- end do
- else
-
- ! local interior is basically M lines of lenght N, separated by a local
- ! stride
- call MPI_TYPE_VECTOR (m, n, hlstride, datatype, lslice, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! stack 3rd and above dimensions
- if (stack == 1) then
- linterior = lslice
- else
- ! note : also works with stack=1
- call MPI_TYPE_CREATE_HVECTOR (stack, 1, vlstride, lslice, linterior, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(lslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (linterior, ierr)
- IF_NOTOK_MPI(status=1)
-
- end if
- status=0
-
- END SUBROUTINE GET_INTERIOR_TYPE
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: FREE_DERIVED_TYPE
- !
- ! !DESCRIPTION: free all MPI derived datatypes in a vector
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE FREE_DERIVED_TYPE( datatype )
- !
- ! !INPUT/OUTPUT PARAMETERS:
- !
- integer, intent(inout) :: datatype(:) ! set of derived MPI datatypes
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- integer :: i, j
- integer :: res(size(datatype)) ! hold unique elements
- integer :: k ! number of unique elements
-
- ! Get list of unique handle(s)
- ! ----------------------------
- k = 1
- res(1) = 1
-
- outer: do i=2,size(datatype)
- ! look for a match
- do j=1,k
- if (datatype(res(j)) == datatype(i)) cycle outer ! match
- end do
-
- ! no match : add it to the list
- k = k + 1
- res(k) = i
-
- end do outer
- ! Free handle(s)
- ! ---------------------------
- do i=1,k
- if (datatype(res(i)) /= MPI_DATATYPE_NULL) &
- call MPI_TYPE_FREE(datatype(res(i)), ierr)
- end do
- END SUBROUTINE FREE_DERIVED_TYPE
- !EOC
- #endif /* MPI TYPES */
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: CHECK_DIST_ARR
- !
- ! !DESCRIPTION: Check that the shape of a local array correspond to an array
- ! distributed on current process. This check is done on the
- ! first 2 dimensions only, along which the domain
- ! decomposition is done.
- !
- ! Optionally: check shape of a global array. If arrays are 3D
- ! or more, the 3rd and above dimensions of local and global
- ! arrays are also compared. (This becomes "2D or more" for I-
- ! and J-Slices.)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE CHECK_DIST_ARR( DistGrid, shp, halo, status, glbl_shp, jband, iband, has_global )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: shp(:) ! shape of local array
- integer, intent(in) :: halo ! halo size
- !
- integer, intent(in), optional :: glbl_shp(:) ! shape of global array
- logical, intent(in), optional :: jband, iband ! is it a zonal or meridional slice?
- logical, intent(in), optional :: has_global ! current proc hold global array (default is root)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: i-band refers to a unique i value, i.e. a meridional dataset.
- ! j-band refers to a unique j value, i.e. a zonal dataset.
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'check_dist_arr '
- integer :: n, m, sz
- integer, allocatable :: glbsz(:)
- logical :: x_jband, x_iband, hold_global
- status = 0
- ! check inputs
- x_jband=.false.
- x_iband=.false.
- if(present(iband))x_iband=iband
- if(present(jband))x_jband=jband
- if(x_iband.and.x_jband)then
- write (gol,*) "CHECK_DIST_ARR: cannot have **both** I- and J-Slices" ; call goErr
- TRACEBACK; status=1; return
- end if
- ! by default global array is expected on root
- hold_global = isRoot
- if (present(has_global)) hold_global=has_global
-
- ! required size w/o halo
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- ! check shape of array
- if (x_iband) then
- if (shp(1) /= m+2*halo) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ", i4)') shp(1) ; call goErr
- write (gol,'(" should be : ", i4)') m+2*halo ; call goErr
- write (gol,'(" with J-stop : ", i4)') DistGrid%J_STOP ; call goErr
- write (gol,'(" J-start: ", i4)') DistGrid%J_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- else if (x_jband) then
- if (shp(1) /= n+2*halo) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ",2i4)') shp(1) ; call goErr
- write (gol,'(" should be : ",2i4)') n+2*halo ; call goErr
- write (gol,'(" with J-stop : ", i4)') DistGrid%I_STOP ; call goErr
- write (gol,'(" J-start: ", i4)') DistGrid%I_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- else
- if ((shp(1) /= n+2*halo).or.(shp(2) /= m+2*halo)) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ",2i4)') shp(1:2) ; call goErr
- write (gol,'(" should be : ",2i4)') n+2*halo, m+2*halo ; call goErr
- write (gol,'(" w/ I/J-stop : ", i4)') DistGrid%I_STOP, DistGrid%J_STOP ; call goErr
- write (gol,'(" I/J-start: ", i4)') DistGrid%I_STRT, DistGrid%J_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- end if
- ! check shape of global array on root
- if (present(glbl_shp) .and. hold_global) then
- sz = size(shp)
-
- if (sz /= size(glbl_shp)) then
- write (gol,'("CHECK_DIST_ARR : global and local arrays have different rank:")') ; call goErr
- write (gol,'(" local rank : ", i4)') sz ; call goErr
- write (gol,'(" global rank : ", i4)') size(glbl_shp) ; call goErr
- TRACEBACK; status=1; return
- end if
- allocate(glbsz(sz))
- glbsz = shp
- if (x_iband) then
- glbsz(1) = DistGrid%jm_region
- else if (x_jband) then
- glbsz(1) = DistGrid%im_region
- else
- glbsz(1:2) = (/ DistGrid%im_region, DistGrid%jm_region /)
- end if
-
- if ( any (glbl_shp /= glbsz) ) then
- write (gol,'("CHECK_DIST_ARR : global array has wrong shape:")') ; call goErr
- write (gol,*) " shape(global) : ", glbl_shp ; call goErr
- write (gol,*) " im [and/or] jm_region/-extra dims- : ", glbsz ; call goErr
- TRACEBACK; status=1; return
- end if
- deallocate(glbsz)
- end if
- END SUBROUTINE CHECK_DIST_ARR
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_IBAND_1D_R
- !
- ! !DESCRIPTION: update halo cells of a vector distributed along latitudes
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_IBAND_1D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 7 Jan 2016 - Ph. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_iband_1d_r'
- integer :: j0, j1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4)
- integer :: k, sz(1), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! degenerate case
- if (npe_lat==1) return
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, iband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ j1-halo+1, j0 /) ! j-start location of north and south buffers to send
- ijsr(:,2) = (/ j1+1, j0-halo /) ! j-start location of north and south buffers to receive
- ! Communicate
- ! ---------------- ! only south and north
- tag_snd = (/2,4/)
- tag_rcv = (/4,2/)
- nghbr = (/2,4/)
- neigh : do k=1,2
- call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_IBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAN_1D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal vector
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_1D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_1d_r'
- integer :: i0, i1
-
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4)
- integer :: k, sz(1), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1)
- dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
-
- ! Communicate
- ! ---------------- ! only east and west
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
-
- neigh : do k=1,2
- call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1)
- dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_2D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 2d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_2D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_2d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(2), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:)
- dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
-
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:)
- dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_3D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 3d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_3D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_3d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(3), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:)
- dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2)*sz(3), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:)
- dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_4D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 4d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_4D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_4d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(4), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:)
- dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2)*sz(3)*sz(4), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:)
- dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_2D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 2D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_2D_R( DistGrid, dist_array, halo, status, i_only, j_only )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- logical, optional, intent(in) :: i_only ! update East & West halo only
- logical, optional, intent(in) :: j_only ! update North & South halo only
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_2d_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8)
- integer :: k, sz(2), tag_snd(4), tag_rcv(4)
- integer :: srtype(4), ijsr(4,4)
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
-
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
-
- neigh : do k=1,4
-
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), &
- tag_rcv(k), localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call FREE_DERIVED_TYPE( srtype )
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
-
- dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2)
- dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2)
- end if
- #endif
- status = 0
-
- END SUBROUTINE UPDATE_HALO_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_2D_I
- !
- ! !DESCRIPTION: update halo cells of a distributed 2D integer array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_2D_I( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- integer, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) not tested yet, but the version for 'real' has been...
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_2D_i'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(2)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( distGrid, sz, halo, MPI_INTEGER, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), &
- tag_rcv(k), localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2)
- dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_2D_I
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_3D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 3D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_3D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_3d_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(3)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, &
- req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), localComm, &
- req(k+4), ierr)
- end do neigh
-
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2,:) = dist_array(i2-halo+1:i2,j1:j2,:)
- dist_array(i2+1:i2+halo,j1:j2,:) = dist_array(i1:i1+halo-1,j1:j2,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_4D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 4D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_4D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_4D_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(4)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), &
- localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), &
- localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
-
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2,:,:) = dist_array(i2-halo+1:i2,j1:j2,:,:)
- dist_array(i2+1:i2+halo,j1:j2,:,:) = dist_array(i1:i1+halo-1,j1:j2,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_2D_R
- !
- ! !DESCRIPTION: gather local 2D arrays into a global 2D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_2D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) I have not use mpi_gatherv because of varying *receiving* size
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_2d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
-
- ! check input, get derived types
- !-------------------------------
- sz = shape(dist_array)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
-
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
-
- END SUBROUTINE GATHER_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_2D_I
- !
- ! !DESCRIPTION: gather local 2D arrays into a global 2D array (integer version)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_2D_I( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- integer, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_2d_i'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
- ! check input, get derived types
- !-------------------------------
- sz = shape(dist_array)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, MPI_INTEGER, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_2D_I
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_3D_R
- !
- ! !DESCRIPTION: gather local 3D arrays into a global 3D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_3D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_3d_r'
-
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, : )
- status = 0
- #else
-
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(3)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:) = dist_array(i0:i1,j0:j1,:)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_4D_R
- !
- ! !DESCRIPTION: gather local 4D arrays into a global 4D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_4D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_4d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :, :)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(4)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then ! RECV
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:,:) = dist_array(i0:i1,j0:j1,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else !SEND
- call MPI_SEND( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_5D_R
- !
- ! !DESCRIPTION: gather local 5D arrays into a global 5D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_5D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_5d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(5)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then ! RECV
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:,:,:) = dist_array(i0:i1,j0:j1,:,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1,1,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else !SEND
- call MPI_SEND( dist_array(i0,j0,1,1,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_5D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_2D_R
- !
- ! !DESCRIPTION: scatter a 2D global real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_2D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, intent(in) :: halo
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: exactly the same as GATHER_2D_R, but inverting send/recv
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_2d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop ) = glbl_array
- status = 0
- #else
-
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
-
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- dist_array(i0:i1,j0:j1) = glbl_array(i0:i1,j0:j1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j), 1, ginterior(klm), klm, klm, localComm, ierr)
- IF_NOTOK_MPI(status=1)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_COMM_RANK(localComm, klm, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_RECV( dist_array(i0,j0), 1, linterior, root, klm, localComm, stat, ierr)
- IF_NOTOK_MPI(status=1)
-
- call MPI_TYPE_FREE(linterior, ierr)
- IF_NOTOK_MPI(status=1)
- end if
- #endif
- END SUBROUTINE SCATTER_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_3D_R
- !
- ! !DESCRIPTION: scatter 3D global real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_3D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: glbl_array(:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_3d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(3)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- dist_array(i0:i1,j0:j1,:) = glbl_array(i0:i1,j0:j1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, localComm, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_RECV( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, stat, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_4D_R
- !
- ! !DESCRIPTION: scatter 4D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_4D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: glbl_array(:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_4d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop,:,:) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(4)
- status=0
-
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
-
- dist_array(i0:i1,j0:j1,:,:) = glbl_array(i0:i1,j0:j1,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, localComm, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
-
- else
- call MPI_RECV( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, stat, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_IBAND_1D_R
- !
- ! !DESCRIPTION: scatter a meridional real vector (1D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:)
- integer, optional, intent(in) :: iref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%j_strt:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 1D version, along J index, of scatter_2d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_iband_1d_r'
- #ifndef MPI
- dist_array( DistGrid%j_strt:DistGrid%j_stop ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE)
- integer :: x_iref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t
- status=0
- ! ------- Check inputs
- x_iref=1
- if(present(iref)) x_iref=iref
-
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
-
- ! ------- SEND/RECV -------------
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- if ( isRoot ) then
- ! local case
- if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1) = glbl_array(j0:j1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_iref<i0t).or.(x_iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_SSEND( glbl_array(j0t), n, my_real, klm, 1, localComm, ierr)
- end do
-
- else
-
- ! are we on a target processor?
- if((x_iref<i0).or.(x_iref>i1))return
-
- n=j1-j0+1
- call MPI_RECV( dist_array(j0), n, my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_IBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_1D_R
- !
- ! !DESCRIPTION: scatter a zonal real vector (1D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_1D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 1D version, along I index, of scatter_2d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_1d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE)
- integer :: x_jref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1) = glbl_array(i0:i1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_SSEND( glbl_array(i0t), n, my_real, klm, 1, localComm, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0), n, my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_1D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_2D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 2D version, along I index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_2d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:) = glbl_array(i0:i1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_IBAND_2D_R
- !
- ! !DESCRIPTION: scatter a meridional real array (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_IBAND_2D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, optional, intent(in) :: iref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%j_strt:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 2D version, along J index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_iband_2d_r'
- #ifndef MPI
- dist_array( DistGrid%j_strt:DistGrid%j_stop,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_iref, n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- x_iref=1
- if(present(iref)) x_iref=iref
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1,:) = glbl_array(j0:j1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_iref<i0t).or.(x_iref>i1t))cycle
-
- n=j1t-j0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(j0t,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
-
- end do
- else
- ! are we on a target processor?
- if((x_iref<i0).or.(x_iref>i1))return
- n=j1-j0+1
- call MPI_RECV( dist_array(j0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_IBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_3D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- !
- ! !REMARKS: 2D version, along I index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_3d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,:,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(3), gsz(3), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default scatter from global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1)
- tgt_root = RowRootId
- endif
- endif
-
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:) = glbl_array(i0:i1,:,:)
- ! send remaining chunks to other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
-
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_4D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (4D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:,:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 17 Feb 2015 - Ph. Le Sager - v0
- !
- ! !REMARKS: 2D version, along I index, of scatter_4d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_4d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,:,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(4), gsz(4), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default scatter from global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1)
- tgt_root = RowRootId
- endif
- endif
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:,:) = glbl_array(i0:i1,:,:,:)
- ! send remaining chunks to other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3)*sz(4), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1,1,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_2D_R
- !
- ! !DESCRIPTION: gather a zonal slice (2D) on root. For 2D arrays, with first
- ! dimension distributed along I (making it a J-band), and the
- ! other dimension is *not* distributed along J. For example:
- ! [i1:i2, lev], or [i1:i2, trac]
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:)
- integer, intent(in) :: jref ! to find sources
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_2d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,: )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:) = dist_array(i0:i1,:)
- ! receive remaining chunks from other pes
- do klm=1, npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1), n*sz(2), my_real, root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_3D_R
- !
- ! !DESCRIPTION: Gather a zonal slab (3D) on root or rowroot(jref) [i.e. the
- ! root of the row of procs].
- ! Local arrays [i1:i2, a:b, c:d] are gathered into the root
- ! proc as [1:im, 1:b-a+1, 1:d-c+1]. Caller has to ensure that at least
- ! **ALL** the ROW procs call this routine, plus root if needed.
- ! No halo possible yet.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:,:)
- integer, intent(in) :: jref ! to find sources (defines the row we want)
- logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: use in budget for gathering fluxes, advect_cfl
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_3d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(3), gsz(3), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
-
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default gather into global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1)
- tgt_root = RowRootId
- endif
- endif
-
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
-
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:) = dist_array(i0:i1,:,:)
- ! receive remaining chunks from other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
-
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_4D_R
- !
- ! !DESCRIPTION: Gather a zonal slab (4D) on root or rowroot(jref) [i.e. the
- ! root of the row of procs].
- ! Local arrays [i1:i2, a:b, c:d, e:f] are gathered into the root
- ! proc as [1:im, 1:b-a+1, 1:d-c+1, 1:f-e+1]. Caller has to
- ! ensure that at least **ALL** the ROW procs call this routine,
- ! plus root if needed.
- ! No halo possible yet.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:,:,:)
- integer, intent(in) :: jref ! to find sources (defines the row we want)
- logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 17 Feb 2015 - Ph. Le Sager - v0
- !
- ! !REMARKS: use in advectx
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_4d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(4), gsz(4), slab, tgt_root, stack
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default gather into global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1)
- tgt_root = RowRootId
- endif
- endif
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- stack = sz(3)*sz(4)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:,:) = dist_array(i0:i1,:,:,:)
- ! receive remaining chunks from other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (stack, 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1,1,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_IBAND_1D_R
- !
- ! !DESCRIPTION: gather a meridional (with dimension distributed along J)
- ! vector on root. For example: [j1:j2]
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%j_strt:)
- integer, intent(in) :: iref ! to define source processors
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! - all processors with an i-range containing IREF are used.
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_iband_1d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(1), gsz(1)
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- ! write(gol,*)"lbound m",lbound(dist_array); call goPr
- ! write(gol,*)"ubound m",ubound(dist_array); call goPr
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1) = dist_array(j0:j1)
- ! receive remaining chunks from other pes
- do klm=1, npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((iref<i0t).or.(iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_RECV( glbl_array(j0t), n, my_real, klm, iref, localComm, stat, ierr)
- end do
- else
- ! are we on a src processor?
- if((iref<i0).or.(iref>i1)) return
- n=j1-j0+1
- call MPI_SEND( dist_array(j0), n, my_real, root, iref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_IBAND_1D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_IBAND_3D_R
- !
- ! !DESCRIPTION: gather a meridional slice (3D) on root. For arrays like:
- ! [j1:j2, lev, trac], that is without a distributed I dim.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_IBAND_3D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%j_strt:,:,:)
- integer, intent(in) :: iref ! to find sources
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: use in budget for gathering fluxes
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_iband_3d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(3), gsz(3), slab
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1,:,:) = dist_array(j0:j1,:,:)
- ! receive remaining chunks from other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((iref<i0t).or.(iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(j0t,1,1), 1, subarr, klm, iref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((iref<i0).or.(iref>i1))return
- n=j1-j0+1
- call MPI_SEND( dist_array(j0,1,1), n*sz(2)*sz(3), my_real, root, iref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_IBAND_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_2D_R
- !
- ! !DESCRIPTION: Global reduction. Data are gathered on root, where OP is
- ! then done, instead of OPing on each proc and then calling
- ! MPI_REDUCE. This ensures bitwise agreement with serial code
- ! results in case of SUM.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_2D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic mpi_allreduce instead of mpi_reduce
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'REDUCE_2D_R'
- logical :: x_debug
- integer :: shp(2)
- real, allocatable :: glbl_array(:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
-
- #ifdef MPI
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region ))
- else
- allocate( glbl_array(1,1) )
- end if
-
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- deallocate(glbl_array)
- #else
- select case( op )
-
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
-
- #endif
-
- status=0
-
- END SUBROUTINE REDUCE_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_3D_R
- !
- ! !DESCRIPTION: same as REDUCE_2D_R, for 3D arrays.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_3D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'REDUCE_3D_R'
- integer :: shp(3)
- logical :: x_debug
- real, allocatable :: glbl_array(:,:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
- #ifdef MPI
- shp = shape( dist_array )
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3)) )
- else
- allocate( glbl_array(1,1,1) )
- end if
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- #else
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- #endif
- status=0
- END SUBROUTINE REDUCE_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_4D_R
- !
- ! !DESCRIPTION: same as REDUCE_2D_R, for 4D arrays.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_4D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'reduce_4d_r'
- integer :: shp(4)
- logical :: x_debug
- real, allocatable :: glbl_array(:,:,:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
- #ifdef MPI
- shp = shape( dist_array )
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3), shp(4)) )
- else
- allocate( glbl_array(1,1,1,1) )
- end if
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- #else
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- #endif
- status=0
- END SUBROUTINE REDUCE_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DIST_ARR_STAT_2D_R
- !
- ! !DESCRIPTION: calculate and print the MIN, MEAN, MAX of a 2D distributed
- ! real field. This is basically a wrapper around several calls
- ! to REDUCE.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DIST_ARR_STAT_2D_R( DistGrid, name, arr, halo, status)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- character(len=*), intent(in) :: name ! verbose helper
- integer, intent(in) :: halo
- real, intent(in) :: arr(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out):: status
- !
- ! !REVISION HISTORY:
- ! 7 Mar 2012 - P. Le Sager - adapted for lon-lat MPI domain
- ! decomposition, from DD_FIELD_STATISTICS in
- ! both DIFFUSION.F90 and DRY_DEPOSITION.F90
- !
- ! !REMARKS:
- ! (1) FIXME : does not compute the mean of only non-zero data anymore
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname=mname//'dist_arr_stat_2d_r'
- integer :: ntel_non_zero, nx, ny
- real :: maxf, minf, meanf, mean_non_zero
- ! --- begin -------------------------------
- call reduce( DistGrid, arr, halo, 'MAX', maxf, status)
- IF_NOTOK_RETURN(status=1)
- call reduce( DistGrid, arr, halo, 'MIN', minf, status)
- IF_NOTOK_RETURN(status=1)
- call reduce( DistGrid, arr, halo, 'SUM', meanf,status)
- IF_NOTOK_RETURN(status=1)
- ! get mean
- nx = DistGrid%im_region
- ny = DistGrid%jm_region
- meanf = meanf / ( nx*ny )
- if(isRoot) then
- write(gol,'(a10,3(a5,1x,1pe10.3))') name,' max=', maxf,' min=',minf,'mean=',meanf!,'mn0',mean_non_zero
- call goPr
- end if
-
- status=0
-
- END SUBROUTINE DIST_ARR_STAT_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: PRINT_DISTGRID
- !
- ! !DESCRIPTION: utility that prints information about a distributed grid
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE PRINT_DISTGRID ( DistGrid )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
-
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: sz1(1), i1
- ! header
- write(gol,*) "========== Start Distributed Grid ==================="; call goPr
- ! dist_grid
- write(gol,*) "im_region :" , DistGrid%im_region ; call goPr
- write(gol,*) "jm_region :" , DistGrid%jm_region ; call goPr
- write(gol,*) "i_strt :" , DistGrid%i_strt ; call goPr
- write(gol,*) "i_stop :" , DistGrid%i_stop ; call goPr
- write(gol,*) "i_strt_halo :" , DistGrid%i_strt_halo ; call goPr
- write(gol,*) "i_stop_halo :" , DistGrid%i_stop_halo ; call goPr
- write(gol,*) "j_strt :" , DistGrid%j_strt ; call goPr
- write(gol,*) "j_stop :" , DistGrid%j_stop ; call goPr
- write(gol,*) "j_strt_halo :" , DistGrid%j_strt_halo ; call goPr
- write(gol,*) "j_stop_halo :" , DistGrid%j_stop_halo ; call goPr
- write(gol,*) "has_north_pole:" , DistGrid%has_north_pole ; call goPr
- write(gol,*) "has_south_pole:" , DistGrid%has_south_pole ; call goPr
- ! physical grid
- write(gol,*) '------------- physical grid -------------------' ; call goPr
- write(gol,*) "llgrid name:", trim(DistGrid%lli%name) ; call goPr
- write(gol,*) "R[m] :", DistGrid%lli%R ; call goPr
- write(gol,*) "dlon[deg] :", DistGrid%lli%dlon_deg ; call goPr
- write(gol,*) "dlat[deg] :", DistGrid%lli%dlat_deg ; call goPr
- write(gol,*) "im :", DistGrid%lli%im ; call goPr
- write(gol,*) "jm :", DistGrid%lli%jm ; call goPr
- if (associated(DistGrid%lli%lon_deg)) then
- sz1 = shape(DistGrid%lli%lon_deg)
- i1 = min(maxcol,sz1(1))
- write(gol,*) "lon = ",DistGrid%lli%lon_deg(1:i1); call goPr
- endif
- if (associated(DistGrid%lli%lat_deg)) then
- sz1=shape(DistGrid%lli%lat_deg)
- i1=min(maxrow,sz1(1))
- write(gol,*) "lat = ",DistGrid%lli%lat_deg(1:i1); call goPr
- endif
- ! footer
- write(gol,*) "========== End Distributed Grid ===================" ; call goPr
- END SUBROUTINE PRINT_DISTGRID
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: TESTCOMM
- !
- ! !DESCRIPTION: check if the communications are working as expected.
- ! Currently checked:
- ! - GATHER, SCATTER, UPDATE_HALO (2D, 3D, 4D)
- ! - SCATTER_I_BAND, SCATTER_J_BAND (1D, 2D)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE TESTCOMM( DistGrid, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) to run with different halo sizes
- ! (2) note that will not catch some errors in halo_update if using too few processors
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'testcomm'
- ! real, allocatable :: src1(:), tgt1(:), res1(:)
- ! real, allocatable :: src2(:,:), tgt2(:,:), res2(:,:)
-
- ! real, allocatable :: larray2a(:,:), larray2b(:,:), glb2a(:,:), glb2b(:,:)
- ! real, allocatable :: larray3a(:,:,:), larray3b(:,:,:), glb3a(:,:,:), glb3b(:,:,:)
- ! real, allocatable :: larray4a(:,:,:,:), larray4b(:,:,:,:)
- ! real, allocatable :: glb4a(:,:,:,:), glb4b(:,:,:,:)
- ! integer :: i0, j0, i1, j1, x_halo, k, levels, l, trac, iref(2), jref(2)
- ! logical :: south, north, west, east, test
-
- ! character(len=*), parameter :: form='(f4.0)'
- ! levels=17
- ! trac=5
- ! ! General
- ! call Get_DistGrid( DistGrid, I_STRT=i0, I_STOP=i1, J_STRT=j0, J_STOP=j1, &
- ! hasEastBorder=east, hasWestBorder=west, &
- ! hasSouthBorder=south, hasNorthBorder=north )
- ! x_halo=halo
- ! status=0
- ! if(isRoot) print*, "========= TESTING COMM FOR HALO=",x_HALO
- ! call par_barrier()
-
- ! ! *************************** SCATTER BAND ***************************
- ! iref=(/ 1, DistGrid%im_region/) ! to test i_band along west and east border
- ! jref=(/ 1, DistGrid%jm_region/) ! to test j_band along south and north border
-
- ! if(isRoot) then
- ! allocate(src1(DistGrid%im_region))
- ! else
- ! allocate(src1(1))
- ! end if
- ! allocate(tgt1(i0:i1), res1(i0:i1))
- ! if (isRoot) src1 = (/(k, k=1,DistGrid%im_region)/)
- ! res1 = (/(k, k=i0,i1)/)
- ! do trac=1,2
- ! tgt1=0
- ! call scatter_j_band(distgrid, tgt1, src1, status, jref=jref(trac))
- ! IF_NOTOK_RETURN(status=1)
-
- ! test=((trac==1).and.south).or.((trac==2).and.north)
-
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res1-tgt1)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_J_band 1D FAILED at border:"
- ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! status=1
- ! !else ! Expected only if tgt1 has inout attribute in scatter routine
- ! ! print*, "test scatter_J_band 1D PASSED inside:"
- ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_J_band 1D PASSED at border"
- ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! !else ! Expected only if tgt1 has inout attribute in scatter routine
- ! ! print*, "test scatter_J_band 1D FAILED inside"
- ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src1, tgt1, res1)
- ! if(isRoot) then
- ! allocate(src1(DistGrid%jm_region))
- ! else
- ! allocate(src1(1))
- ! end if
- ! allocate(tgt1(j0:j1), res1(j0:j1))
- ! if (isRoot) src1 = (/(k, k=1,DistGrid%jm_region)/)
- ! res1 = (/(k, k=j0,j1)/)
- ! do trac=1,2
- ! tgt1=0
-
- ! call scatter_i_band(distgrid, tgt1, src1, status, iref=iref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.west).or.((trac==2).and.east)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res1-tgt1)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_I_band 1D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_I_band 1D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_I_band 1D PASSED at border"
- ! !else
- ! ! print*, "test scatter_I_band 1D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src1, tgt1, res1)
- ! ! ---------------- 2D
- ! if(isRoot) then
- ! allocate(src2(DistGrid%im_region, levels))
- ! else
- ! allocate(src2(1,1))
- ! end if
- ! allocate(tgt2(i0:i1,levels), res2(i0:i1,levels))
- ! do l=1,levels
- ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%im_region)/) * l
- ! res2(:,l) = (/(k, k=i0,i1)/)* l
- ! end do
- ! do trac=1,2
- ! tgt2=0
- ! call scatter_j_band(distgrid, tgt2, src2, status, jref=jref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.south).or.((trac==2).and.north)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res2-tgt2)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_J_band 2D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_J_band 2D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_J_band 2D PASSED at border"
- ! !else
- ! ! print*, "test scatter_J_band 2D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src2, tgt2, res2)
- ! if(isRoot) then
- ! allocate(src2(DistGrid%jm_region,levels))
- ! else
- ! allocate(src2(1,1))
- ! end if
- ! allocate(tgt2(j0:j1,levels), res2(j0:j1,levels))
- ! do l=1,levels
- ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%jm_region)/)*l
- ! res2(:,l) = (/(k, k=j0,j1)/)*l
- ! enddo
- ! do trac=1,2
- ! tgt2=0
- ! call scatter_i_band(distgrid, tgt2, src2, status, iref=iref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.west).or.((trac==2).and.east)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res2-tgt2)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_I_band 2D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_I_band 2D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_I_band 2D PASSED at border"
- ! !else
- ! ! print*, "test scatter_I_band 2D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src2, tgt2, res2)
-
- ! ! *************************** GATHER/SCATTER ***************************
-
- ! !----------------------
- ! ! Allocate 2D
- ! !----------------------
- ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( glb2a(DistGrid%im_region, DistGrid%jm_region) )
- ! ! in halo, 0, elsewhere myid
- ! larray2a=0.
- ! larray2a(i0:i1,j0:j1)=real(myid)
- ! ! glb2b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb2b( DistGrid%im_region, DistGrid%jm_region) )
- ! else
- ! allocate( glb2b(1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_2D_R
- ! !----------------------
- ! glb2b=0.
- ! larray2b=0.
- ! ! gather
- ! call gather( DistGrid, larray2a, glb2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb2a = glb2b
- ! #ifdef MPI
- ! call MPI_bcast(glb2a, size(glb2a), my_real, root, localComm, ierr)
- ! #endif
- ! larray2b(i0:i1,j0:j1) = glb2a(i0:i1,j0:j1)
- ! larray2b = larray2a-larray2b
- ! ! diff should be 0
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test gather 2d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! call par_barrier()
- ! !----------------------
- ! ! test SCATTER_2D_R
- ! !----------------------
- ! larray2b=0.
- ! call scatter( DistGrid, larray2b, glb2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray2b=larray2a-larray2b
- ! ! diff should be 0
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test scatter 2d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray2a,larray2b,glb2a,glb2b)
- ! call par_barrier()
-
- ! !----------------------
- ! ! Allocate 3D
- ! !----------------------
- ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( glb3a( DistGrid%im_region, DistGrid%jm_region, levels) )
- ! ! in halo, 0, elsewhere myid*level
- ! larray3a=0.
- ! do k=1,levels
- ! larray3a(i0:i1,j0:j1,k)=real(myid*k)
- ! end do
- ! ! glb2b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb3b( DistGrid%im_region, DistGrid%jm_region, levels) )
- ! else
- ! allocate( glb3b(1,1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_3D_R
- ! !----------------------
- ! glb3b=0.
- ! larray3b=0.
- ! ! gather
- ! call gather( DistGrid, larray3a, glb3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb3a = glb3b
- ! #ifdef MPI
- ! call MPI_bcast(glb3a, size(glb3a), my_real, root, localComm, ierr)
- ! #endif
- ! larray3b(i0:i1,j0:j1,:) = glb3a(i0:i1,j0:j1,:)
- ! larray3b = larray3a-larray3b
- ! ! diff should be 0
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test gather 3d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! call par_barrier()
-
- ! !----------------------
- ! ! test SCATTER_3D_R
- ! !----------------------
- ! larray3b=0.
- ! call scatter( DistGrid, larray3b, glb3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray3b=larray3a-larray3b
- ! ! diff should be 0
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test scatter 3d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray3a,larray3b,glb3a,glb3b)
- ! call par_barrier()
- ! !----------------------
- ! ! Allocate 4D
- ! !----------------------
- ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( glb4a( DistGrid%im_region, DistGrid%jm_region, levels, trac) )
- ! ! in halo, 0, elsewhere (myid+1)*level*trac
- ! larray4a=0.
- ! do l=1,trac
- ! do k=1,levels
- ! larray4a(i0:i1,j0:j1,k,l)=real((myid+1)*k*l)
- ! end do
- ! end do
-
- ! ! glb4b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb4b( DistGrid%im_region, DistGrid%jm_region, levels, trac) )
- ! else
- ! allocate( glb4b(1,1,1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_4D_R
- ! !----------------------
- ! glb4b=0.
- ! larray4b=0.
- ! ! gather
- ! call gather( DistGrid, larray4a, glb4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb4a = glb4b
- ! #ifdef MPI
- ! call MPI_bcast(glb4a, size(glb4a), my_real, root, localComm, ierr)
- ! #endif
- ! larray4b(i0:i1,j0:j1,:,:) = glb4a(i0:i1,j0:j1,:,:)
- ! larray4b = larray4a-larray4b
- ! ! diff should be 0
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test gather 4d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! call par_barrier()
-
- ! !----------------------
- ! ! test SCATTER_4D_R
- ! !----------------------
- ! larray4b=0.
- ! call scatter( DistGrid, larray4b, glb4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray4b=larray4a-larray4b
- ! ! diff should be 0
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test scatter 4d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray4a,larray4b,glb4a,glb4b)
-
- ! call par_barrier()
-
- ! ! *************************************** HALO ***************************
-
- ! !----------------------
- ! ! test UPDATE_HALO_2D_R
- ! !----------------------
- ! ! Allocate 2D
- ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray2b=0.
- ! larray2b(i0:i1,j0:j1)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray2a=0.
- ! larray2a( i0-x_halo:i0-1, j0:j1 ) = DistGrid%neighbors(1) ! west halo
- ! larray2a( i1+1:i1+x_halo, j0:j1 ) = DistGrid%neighbors(3) ! east halo
- ! larray2a( i0:i1, j1+1:j1+x_halo ) = DistGrid%neighbors(2) ! north halo
- ! larray2a( i0:i1, j0-x_halo:j0-1 ) = DistGrid%neighbors(4) ! south halo
- ! larray2a(i0:i1,j0:j1)=real(myid)
- ! where (larray2a == MPI_PROC_NULL) larray2a=0.
-
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! if (isRoot.and.(x_halo==1)) then ! 32 is size to look good for 2x2 processes and halo=1
- ! print*, "----------------------------"
- ! print '(32F4.0)', larray2a
- ! print*, "----------------------------"
- ! print '(32F4.0)', larray2b
- ! print*, "----------------------------"
- ! end if
- ! ! compare (diff should be 0)
- ! larray2b=larray2a-larray2b
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test update_halo 2d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray2a,larray2b)
- ! call par_barrier()
-
- ! !----------------------
- ! ! test UPDATE_HALO_3D_R
- ! !----------------------
- ! ! Allocate 3D
- ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray3b=0.
- ! larray3b(i0:i1,j0:j1,:)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray3a=0.
- ! larray3a( i0-x_halo:i0-1, j0:j1, : ) = DistGrid%neighbors(1) ! west halo
- ! larray3a( i1+1:i1+x_halo, j0:j1, : ) = DistGrid%neighbors(3) ! east halo
- ! larray3a( i0:i1, j1+1:j1+x_halo, : ) = DistGrid%neighbors(2) ! north halo
- ! larray3a( i0:i1, j0-x_halo:j0-1, : ) = DistGrid%neighbors(4) ! south halo
- ! larray3a(i0:i1,j0:j1,:)=real(myid) ! interior
- ! where (larray3a == MPI_PROC_NULL) larray3a=0. !if no neighbor
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! compare (diff should be 0)
- ! larray3b=larray3a-larray3b
-
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test update_halo 3d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! ! CLEANUP
- ! deallocate(larray3a,larray3b)
-
- ! call par_barrier()
-
- ! !----------------------
- ! ! test UPDATE_HALO_4D_R
- ! !----------------------
- ! ! Allocate 4D
- ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray4b=0.
- ! larray4b(i0:i1,j0:j1,:,:)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray4a=0.
- ! larray4a( i0-x_halo:i0-1, j0:j1, :,: ) = DistGrid%neighbors(1) ! west halo
- ! larray4a( i1+1:i1+x_halo, j0:j1, :,: ) = DistGrid%neighbors(3) ! east halo
- ! larray4a( i0:i1, j1+1:j1+x_halo, :,: ) = DistGrid%neighbors(2) ! north halo
- ! larray4a( i0:i1, j0-x_halo:j0-1, :,: ) = DistGrid%neighbors(4) ! south halo
- ! larray4a(i0:i1,j0:j1,:,:)=real(myid) ! interior
- ! where (larray4a == MPI_PROC_NULL) larray4a=0. !if no neighbor
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! compare (diff should be 0)
- ! larray4b=larray4a-larray4b
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test update_halo 4d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray4a,larray4b)
-
- ! call par_barrier()
- status=0
-
- END SUBROUTINE TESTCOMM
- !EOC
- END MODULE DOMAIN_DECOMP
|