1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224 |
- !
- #define TRACEBACK write (gol,'("in ",a," (",a,", line",i5,")")') rname, __FILE__, __LINE__; call goErr
- #define IF_NOTOK_RETURN(action) if (status/=0) then; TRACEBACK; action; return; end if
- #define IF_ERROR_RETURN(action) if (status> 0) then; TRACEBACK; action; return; end if
- !
- #define IF_NOTOK_MPI(action) if (ierr/=MPI_SUCCESS) then; TRACEBACK; action; return; end if
- !
- #include "tm5.inc"
- !
- !----------------------------------------------------------------------------
- ! TM5 !
- !----------------------------------------------------------------------------
- !BOP
- !
- ! !MODULE: DOMAIN_DECOMP
- !
- ! !DESCRIPTION: Define a distributed grid object and its methods.
- ! Horizontal grid is decomposed across longitudes and latitudes.
- !\\
- !\\
- ! !INTERFACE:
- !
- MODULE DOMAIN_DECOMP
- !
- ! !USES:
- !
- use grid, only : TllGridInfo ! Type with Lon/Lat Grid Info
- use Go, only : goErr, goPr, gol ! go = general objects
- use dims, only : okdebug
- use partools ! to include mpif.h, ierr, localComm,...
-
- IMPLICIT NONE
- PRIVATE
- !
- ! !PUBLIC MEMBER FUNCTIONS:
- !
- public :: Init_DistGrid, Done_DistGrid ! life cycle routines
- public :: Get_DistGrid ! get bounds & grids
- public :: Print_DistGrid ! print utility (debug)
-
- public :: GATHER, SCATTER, UPDATE_HALO ! communication routines
- public :: GATHER_I_BAND, GATHER_J_BAND ! communication routines
- public :: SCATTER_I_BAND, SCATTER_J_BAND! for distributed slices
- public :: UPDATE_HALO_JBAND !
- public :: UPDATE_HALO_IBAND !
-
- public :: REDUCE ! mimic MPI_REDUCE / MPI_ALLREDUCE
- public :: DIST_ARR_STAT ! basic statitics of a distributed array
- public :: testcomm ! test communication (debug)
- !
- ! !PUBLIC TYPES:
- !
- TYPE, PUBLIC :: DIST_GRID
- PRIVATE
- ! parameters for global domain
- integer :: im_region ! number of longitudes
- integer :: jm_region ! number of latitudes
- ! parameters for local domain
- integer :: i_strt ! begin local domain longitude index
- integer :: i_stop ! end local domain longitude index
- integer :: i_strt_halo ! begin halo longitude index
- integer :: i_stop_halo ! end halo longitude index
- integer :: j_strt ! begin local domain latitude index
- integer :: j_stop ! end local domain latitude index
- integer :: j_strt_halo ! begin halo latitude index
- integer :: j_stop_halo ! end halo latitude index
- type(TllGridInfo) :: lli ! local Lat-Lon grid info
- type(TllGridInfo) :: glbl_lli ! global Lat-Lon grid info
- type(TllGridInfo) :: lli_z ! global meridional grid info
- integer :: neighbors(4) ! rank of (west, north, east, south) processes
- logical :: has_south_pole ! south pole is in local domain
- logical :: has_north_pole ! north pole is in local domain
- logical :: has_south_boundary ! south boundary is in local domain
- logical :: has_north_boundary ! north boundary is in local domain
- logical :: has_west_boundary ! west boundary is in local domain
- logical :: has_east_boundary ! east boundary is in local domain
- logical :: is_periodic ! covers [-180, 180]
- ! i_start, i_stop, j_start, j_stop of all PEs: (4,npes)
- integer, pointer :: bounds(:,:)
-
- END TYPE DIST_GRID
- !
- ! !PRIVATE DATA MEMBERS:
- !
- character(len=*), parameter :: mname='Domain_Decomp_MOD_'
- !
- ! !INTERFACE:
- !
- INTERFACE Init_DistGrid
- MODULE PROCEDURE INIT_DISTGRID_FROM_REGION
- MODULE PROCEDURE INIT_DISTGRID_FROM_LLI
- END INTERFACE
-
- INTERFACE Update_halo ! for arrays distributed accross both I and J (1st and 2nd dim)
- MODULE PROCEDURE Update_halo_2d_r
- MODULE PROCEDURE Update_halo_3d_r
- MODULE PROCEDURE Update_halo_4d_r
- MODULE PROCEDURE Update_halo_2d_i
- END INTERFACE
- INTERFACE Update_halo_jband ! for arrays distributed accross I (1st dim)
- MODULE PROCEDURE Update_halo_jband_1d_r
- MODULE PROCEDURE Update_halo_jband_2d_r
- MODULE PROCEDURE Update_halo_jband_3d_r
- MODULE PROCEDURE Update_halo_jband_4d_r
- END INTERFACE
-
- INTERFACE Update_halo_iband ! for arrays distributed accross J (1st dim)
- MODULE PROCEDURE Update_halo_iband_1d_r
- END INTERFACE
-
- INTERFACE Gather
- MODULE PROCEDURE gather_2d_i
- MODULE PROCEDURE gather_2d_r
- MODULE PROCEDURE gather_3d_r
- MODULE PROCEDURE gather_4d_r
- MODULE PROCEDURE gather_5d_r
- END INTERFACE
-
- INTERFACE Scatter
- MODULE PROCEDURE scatter_2d_r
- MODULE PROCEDURE scatter_3d_r
- MODULE PROCEDURE scatter_4d_r
- END INTERFACE
- INTERFACE Gather_I_Band
- MODULE PROCEDURE gather_iband_1d_r
- ! MODULE PROCEDURE gather_iband_2d_r
- MODULE PROCEDURE gather_iband_3d_r
- END INTERFACE
- INTERFACE Gather_J_Band
- ! MODULE PROCEDURE gather_jband_1d_r
- MODULE PROCEDURE gather_jband_2d_r
- MODULE PROCEDURE gather_jband_3d_r
- MODULE PROCEDURE gather_jband_4d_r
- END INTERFACE
- INTERFACE Scatter_I_Band
- MODULE PROCEDURE scatter_iband_1d_r
- MODULE PROCEDURE scatter_iband_2d_r
- END INTERFACE
- INTERFACE Scatter_J_Band
- MODULE PROCEDURE scatter_jband_1d_r
- MODULE PROCEDURE scatter_jband_2d_r
- MODULE PROCEDURE scatter_jband_3d_r
- MODULE PROCEDURE scatter_jband_4d_r
- END INTERFACE
- INTERFACE Reduce
- MODULE PROCEDURE reduce_2d_r
- MODULE PROCEDURE reduce_3d_r
- MODULE PROCEDURE reduce_4d_r
- END INTERFACE
- INTERFACE Dist_Arr_Stat
- MODULE PROCEDURE dist_arr_stat_2d_r
- END INTERFACE
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 13 Feb 2013 - P. Le Sager - Remove deprecated MPI_GET_EXTENT and
- ! MPI_TYPE_HVECTOR.
- ! - Extra garbage clean.
- ! 21 Jun 2013 - P. Le Sager - Replace all MPI_SEND with MPI_SSEND in all
- ! scatter routines to avoid buffering.
- !
- ! !REMARKS:
- ! (1) There is two categories of subroutines here : one to define the
- ! distributed grid objects, the other for communication b/w processes
- ! (update_halo for one-to-one comm, and scatter/gather/reduce for
- ! collective comm)
- ! Communications routines are for data DECOMPOSED on the domain. For
- ! more general comm, see partools module.
- !
- ! (2) GATHER & SCATTER :
- ! - global arrays have to really be global on root only, and can be
- ! (1,1,..) on other processes.
- ! - global arrays are without halo.
- ! - if not using MPI, gather and scatter just copy arrays, so it may be
- ! better to not call them to save memory in that case.
- !
- ! (3) Be careful when passing a slice (or a pointer to a slice) as argument:
- !
- ! Passing a subarray will *NOT* work most of the time, unless it is a
- ! slice on the last dimension(s). This will give erroneous results:
- !
- ! allocate( gbl_arr(-3:imr, 1:jmr ))
- ! call gather( dgrid, local_arr, gbl_arr(1:imr,:), halo, status)
- !
- ! but passing full size array will work:
- !
- ! allocate( gbl_arr(-3:imr, 1:jmr ))
- ! allocate( temp(1:imr,1:jmr) )
- ! call gather( dgrid, local_arr, temp, halo, status)
- ! gbl_arr(1:imr,:) = temp
- !
- ! (4) Scatter[Gather]_I[J]_band are communications between a row or column
- ! of processors and the root processor (optionally the row_root in few
- ! cases).
- !EOP
- !------------------------------------------------------------------------
- CONTAINS
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DISTGRID_RANGE
- !
- ! !DESCRIPTION: Give range of indices covered by rank when using nprocs.
- ! This is used for one dimension, and thus called twice.
- ! Distribution is done evenly. Eg: it will distribute 11 boxes
- ! across 3 processes as 4,4,3 on each pe.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DISTGRID_RANGE(ij, rank, nprocs, istart, iend)
- !
- ! !INPUT PARAMETERS:
- !
- integer, intent(in) :: ij ! defines range (1,..,ij) to be distributed
- integer, intent(in) :: rank ! current process, in (0,.., nprocs-1)
- integer, intent(in) :: nprocs ! total # of processes
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out):: istart, iend ! index range covered by rank
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- integer :: iwork1, iwork2
- iwork1 = ij/nprocs
- iwork2 = mod(ij,nprocs)
- istart = rank * iwork1 + 1 + min(rank, iwork2)
- iend = istart + iwork1 - 1
- if (iwork2 > rank) iend = iend + 1
- END SUBROUTINE DISTGRID_RANGE
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: INIT_DISTGRID_FROM_REGION
- !
- ! !DESCRIPTION: initialize a distributed grid object for a TM5 region
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE INIT_DISTGRID_FROM_REGION( DistGrid, region, rank, nplon, nplat, halo, status)
- !
- ! !USES:
- !
- use grid, only : init
- use dims, only : xbeg, xend, dx, im, xcyc
- use dims, only : ybeg, yend, dy, jm, touch_np, touch_sp
- !
- ! !RETURN VALUE:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !INPUT PARAMETERS:
- !
- integer, intent(in) :: region ! TM5 region number
- integer, intent(in) :: rank ! current process in (0,.., nprocs-1)
- integer, intent(in) :: nplon ! number of processes along longitude
- integer, intent(in) :: nplat ! number of processes along latitude
- integer, intent(in), optional :: halo ! halo size (default=0)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- !
- ! (1) Ideally: to call with WIDTH=<halo used in the code>... but not the
- ! same halo is used for all data sets. Then, in many subroutines, halo has
- ! to be independent of the halo carried by the distributed grid. We could
- ! simplify things by using a halo of 2 for all distributed data, or
- ! disregard halo in the dist_grid, but we just keep it general (as is).
- !
- ! (2) npe_lat/lon are also available thru partools, but may not have been
- ! initialized yet, so we keep the nplon/nplat inputs here. Could do
- ! without though...
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'init_distgrid'
- integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4)
- real :: yb, xb, dlon, dlat
- character(len=39) :: name
- ! for pretty print
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: i1, j1, j2, k, work
- character(len=3) :: str1
- ! for virtual topology
- integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat)
- integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1)
- integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon)
- integer :: rank_lat ! index of current process in meridional set (1,..,np_lat)
- !---------------------------------------------
- ! 2D process topology, and mapping 1D <-> 2D
- !---------------------------------------------
- ! besides the periodicity, topology is independent of the region
- allocate(dist_map(nplon, nplat))
- npes = nplat*nplon
- dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /))
-
- ! store
- iwork = maxloc(dist_map, mask=dist_map.eq.rank)
- rank_lon = iwork(1) ! in [1..nplon]
- rank_lat = iwork(2) ! in [1..nplat]
- ! Neighbors = 2d process map with extra neighbors
- allocate(dist_mapw(0:nplon+1, 0:nplat+1))
- dist_mapw = MPI_PROC_NULL
- dist_mapw(1:nplon,1:nplat) = dist_map
- ! East-West periodicity
- DistGrid%is_periodic =.false.
- if (xcyc(region)==1) then
- dist_mapw( 0,1:nplat) = dist_map(nplon,:)
- dist_mapw(nplon+1,1:nplat) = dist_map(1,:)
- DistGrid%is_periodic =.true.
- end if
- DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west
- dist_mapw(rank_lon, rank_lat+1), & ! north
- dist_mapw(rank_lon+1, rank_lat ), & ! east
- dist_mapw(rank_lon, rank_lat-1) /) ! south
- !---------------------------------------------
- ! fill in distributed grid info
- !---------------------------------------------
- ! region boundaries
- DistGrid%has_south_boundary = (rank_lat == 1 )
- DistGrid%has_north_boundary = (rank_lat == nplat)
- DistGrid%has_west_boundary = (rank_lon == 1 )
- DistGrid%has_east_boundary = (rank_lon == nplon)
- ! poles
- DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (touch_sp(region) == 1)
- DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (touch_np(region) == 1)
-
- ! max halo that will be used in the code
- iwidth=0
- if (present(halo)) iwidth=halo
- ! index ranges covered by current process
- call DistGrid_range(im(region), rank_lon-1, nplon, start, end)
- ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1.
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in X (longitude) direction! ", nplon, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
-
- DistGrid%im_region = im(region)
- DistGrid%i_strt = start
- DistGrid%i_stop = end
- DistGrid%i_strt_halo = start-iwidth
- DistGrid%i_stop_halo = end+iwidth
- ! To think about it when/if needed:
- ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end
- ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start
- call DistGrid_range(jm(region), rank_lat-1, nplat, start, end)
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in Y (latitude) direction! ", nplat, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%jm_region = jm(region)
- DistGrid%j_strt = start
- DistGrid%j_stop = end
- DistGrid%j_strt_halo = start-iwidth
- DistGrid%j_stop_halo = end+iwidth
-
- #ifdef parallel_cplng
- if ( modulo(im(region),nplon) /= 0) then
- write(gol,'("number of X processors ",i3," does not divide evenly the number of grid boxes",i3)') &
- nplon, im(region) ; call goErr
- status=1; TRACEBACK; return
- endif
- if ( modulo(jm(region),nplat) /= 0) then
- write(gol,'("number of Y processors ",i3," does not divide evenly the number of grid boxes ",i3)') &
- nplat, jm(region) ; call goErr
- status=1; TRACEBACK; return
- endif
- #endif
-
- !---------------------------------------------
- ! geophysical grids
- !---------------------------------------------
- ! grid spacing:
- dlon = real(xend(region)-xbeg(region))/im(region)
- dlat = real(yend(region)-ybeg(region))/jm(region)
- !------ local
- iml = DistGrid%i_stop - DistGrid%i_strt + 1
- jml = DistGrid%j_stop - DistGrid%j_strt + 1
- xb = xbeg(region) + ( DistGrid%i_strt - 0.5 ) * dlon
- yb = ybeg(region) + ( DistGrid%j_strt - 0.5 ) * dlat
- write(name, '("distributed grid on process ", i5.5)') rank
-
- call INIT( DistGrid%lli, xb, dlon, iml, yb, dlat, jml, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !------ global
- xb = xbeg(region) + 0.5 * dlon
- yb = ybeg(region) + 0.5 * dlat
- write(name, '("global grid on process ", i5.5)') rank
- call INIT( DistGrid%glbl_lli, xb, dlon, im(region), yb, dlat, jm(region), status, name=name )
- IF_NOTOK_RETURN(status=1)
-
- !------ global meridional grid
- name = "merid " // trim(name)
- call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, dlat, jm(region), status, name=name )
- IF_NOTOK_RETURN(status=1)
- !---------------------------------------------
- ! store shapes info of all PE on all PE (used only on root so far, but may
- ! become useful if we gather/scatter to/from non-root in the future)
- !---------------------------------------------
- #ifdef MPI
- allocate(DistGrid%bounds(4,0:npes-1))
- lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /)
- call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr)
- #endif
- !---------------------------------------------
- ! PRINT (Debug) 2D process topology
- !---------------------------------------------
- if(okdebug)then
- write(gol,*) '------------- world view ----------------------' ; call goPr
- write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr
- write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr
- i1=min(maxcol,nplon)
- str1='...'
- if (i1==nplon) str1=''
- work=nplat/2
- j1=min(work, maxrow)
- j2=max(nplat-maxrow+1, work+1)
- do k=nplat,j2,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr
- do k=j1,1,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- write(gol,*) ''
- write(gol,*) '-----------------------------------------------' ; call goPr
- end if
-
- ! Done
- deallocate(dist_map)
- deallocate(dist_mapw)
- status = 0
- END SUBROUTINE INIT_DISTGRID_FROM_REGION
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: INIT_DISTGRID_FROM_LLI
- !
- ! !DESCRIPTION: initialize a distributed grid object from a lli object
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE INIT_DISTGRID_FROM_LLI( DistGrid, lli, rank, halo, status)
- !
- ! !USES:
- !
- use grid, only : init, assignment(=)
- !
- ! !INPUT/OUTPUT PARAMETERS:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !INPUT PARAMETERS:
- !
- type(TllGridInfo), intent(in) :: lli
- integer, intent(in) :: rank ! current process in (0,.., nprocs-1)
- integer, optional, intent(in) :: halo ! halo size (default=0)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 18 Nov 2013 - Ph. Le Sager - v0, adapted from init_distgrid_from_region
- !
- ! !REMARKS:
- !
- ! (1) See INIT_DISTGRID_FROM_REGION for comment about halo input.
- ! (2) Uses npe_lat/lon from partools module : TM5_MPI_Init2 must have been
- ! called before.
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'init_distgrid_from_lli'
- integer :: start, end, iwidth, i, iml, jml, iwork(2), lshape(4)
- real :: yb, xb, dlon, dlat
- character(len=39) :: name
- integer :: nplon ! number of processes along longitude
- integer :: nplat ! number of processes along latitude
- ! for pretty print
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: i1, j1, j2, k, work
- character(len=3) :: str1
- ! for virtual topology
- integer, allocatable :: dist_map(:,:) ! 2D process topology (np_lon, np_lat)
- integer, allocatable :: dist_mapw(:,:) ! 2D process topology with neighbors (np_lon+1, np_lat+1)
- integer :: rank_lon ! index of current process in azimuthal set (1,..,np_lon)
- integer :: rank_lat ! index of current process in meridional set (1,..,np_lat)
- !---------------------------------------------
- ! test for valid input
- !---------------------------------------------
- if (.not.(associated( lli%lon_deg ))) then
- write(gol,*)" The LLI object has not been initialized. " ; call goErr
- write(gol,*)" A distributed grid for MPI cannot be build." ; call goErr
- status=1; TRACEBACK; return
- end if
- nplat = npe_lat
- nplon = npe_lon
-
- !---------------------------------------------
- ! 2D process topology, and mapping 1D <-> 2D
- !---------------------------------------------
- ! besides the periodicity, topology is independent of the region
- allocate(dist_map(nplon, nplat))
- npes = nplat*nplon
- dist_map = reshape( (/ (i,i=0,npes-1) /), shape=(/ nplon, nplat /))
- ! store
- iwork = maxloc(dist_map, mask=dist_map.eq.rank)
- rank_lon = iwork(1) ! in [1..nplon]
- rank_lat = iwork(2) ! in [1..nplat]
- ! Neighbors = 2d process map with extra neighbors
- allocate(dist_mapw(0:nplon+1, 0:nplat+1))
- dist_mapw = MPI_PROC_NULL
- dist_mapw(1:nplon,1:nplat) = dist_map
- ! East-West periodicity
- DistGrid%is_periodic =.false.
- if ( lli%dlon_deg*lli%im == 360. ) then
- dist_mapw( 0,1:nplat) = dist_map(nplon,:)
- dist_mapw(nplon+1,1:nplat) = dist_map(1,:)
- DistGrid%is_periodic =.true.
- end if
- DistGrid%neighbors = (/ dist_mapw(rank_lon-1, rank_lat ), & ! west
- dist_mapw(rank_lon, rank_lat+1), & ! north
- dist_mapw(rank_lon+1, rank_lat ), & ! east
- dist_mapw(rank_lon, rank_lat-1) /) ! south
- !---------------------------------------------
- ! fill in distributed grid info
- !---------------------------------------------
- ! region boundaries
- DistGrid%has_south_boundary = (rank_lat == 1 )
- DistGrid%has_north_boundary = (rank_lat == nplat)
- DistGrid%has_west_boundary = (rank_lon == 1 )
- DistGrid%has_east_boundary = (rank_lon == nplon)
- ! poles
- DistGrid%has_south_pole = DistGrid%has_south_boundary .and. (lli%blat_deg(0) == -90.0)
- DistGrid%has_north_pole = DistGrid%has_north_boundary .and. (lli%blat_deg(lli%jm) == 90.0)
- ! max halo that will be used in the code
- iwidth=0
- if (present(halo)) iwidth=halo
- ! index ranges covered by current process
- call DistGrid_range(lli%im, rank_lon-1, nplon, start, end)
- ! check we are within the limit, i.e. we must have #boxes >= halo and at least 1.
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in X (longitude) direction:", nplon, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%im_region = lli%im
- DistGrid%i_strt = start
- DistGrid%i_stop = end
- DistGrid%i_strt_halo = start-iwidth
- DistGrid%i_stop_halo = end+iwidth
- ! To think about it when/if needed:
- ! if (DistGrid%has_north_pole) DistGrid%i_stop_halo = end
- ! if (DistGrid%has_south_pole) DistGrid%i_strt_halo = start
- call DistGrid_range(lli%jm, rank_lat-1, nplat, start, end)
- if ( (end-start+1) < max(1,iwidth) ) then
- write(gol,*)" Too much processors in Y (latitude) direction:", nplat, iwidth, start, end
- call goErr
- status=1; TRACEBACK; return
- end if
- DistGrid%jm_region = lli%jm
- DistGrid%j_strt = start
- DistGrid%j_stop = end
- DistGrid%j_strt_halo = start-iwidth
- DistGrid%j_stop_halo = end+iwidth
- !---------------------------------------------
- ! geophysical grids
- !---------------------------------------------
- !------ tile
- iml = DistGrid%i_stop - DistGrid%i_strt + 1
- jml = DistGrid%j_stop - DistGrid%j_strt + 1
- xb = lli%lon_deg(1) + ( DistGrid%i_strt - 1 ) * lli%dlon_deg
- yb = lli%lat_deg(1) + ( DistGrid%j_strt - 1 ) * lli%dlat_deg
- write(name, '("distributed grid on process ", i5.5)') rank
- call INIT( DistGrid%lli, xb, lli%dlon_deg, iml, yb, lli%dlat_deg, jml, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !------ world
- DistGrid%glbl_lli = lli
-
- !------ world meridional grid
- name = "merid "
- call INIT( DistGrid%lli_z, 0.0, 360., 1, yb, lli%dlat_deg, lli%jm, status, name=name )
- IF_NOTOK_RETURN(status=1)
- !---------------------------------------------
- ! store shapes info of all PE on all PE (used only on root so far, but may
- ! become useful if we gather/scatter to/from non-root in the future)
- !---------------------------------------------
- #ifdef MPI
- allocate(DistGrid%bounds(4,0:npes-1))
- lshape = (/ DistGrid%i_strt, DistGrid%i_stop, DistGrid%j_strt, DistGrid%j_stop /)
- call MPI_ALLGATHER(lshape, 4, MPI_INTEGER, DistGrid%bounds, 4, MPI_INTEGER, localComm, ierr)
- #endif
- !---------------------------------------------
- ! PRINT (Debug) 2D process topology
- !---------------------------------------------
- if(okdebug)then
- write(gol,*) '------------- world view ----------------------' ; call goPr
- write(gol, '("process #", i5, " out of ", i5)') myid, npes ; call goPr
- write(gol, '("2D rank = [",i4,", ",i4,"]")') rank_lon, rank_lat ; call goPr
- i1=min(maxcol,nplon)
- str1='...'
- if (i1==nplon) str1=''
- work=nplat/2
- j1=min(work, maxrow)
- j2=max(nplat-maxrow+1, work+1)
- do k=nplat,j2,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- if ((j2-1) /= j1) write(gol,*)" ..." ; call goPr
- do k=j1,1,-1
- write(gol,*) dist_map(1:i1,k),str1 ; call goPr
- enddo
- write(gol,*) ''
- write(gol,*) '-----------------------------------------------' ; call goPr
- end if
- ! Done
- deallocate(dist_map)
- deallocate(dist_mapw)
- status = 0
- END SUBROUTINE INIT_DISTGRID_FROM_LLI
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DONE_DISTGRID
- !
- ! !DESCRIPTION: deallocate distributed grid object elements
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DONE_DISTGRID( DistGrid, STATUS )
- !
- ! !USES:
- !
- use grid, only : done
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(inout) :: DistGrid
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'Done_Distgrid'
- call done(DistGrid%lli, status)
- IF_NOTOK_RETURN(status=1)
- call done(DistGrid%lli_z, status)
- IF_NOTOK_RETURN(status=1)
- call done(DistGrid%glbl_lli, status)
- IF_NOTOK_RETURN(status=1)
-
- if (associated(DistGrid%bounds)) deallocate(DistGrid%bounds)
-
- status=0
- END SUBROUTINE DONE_DISTGRID
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_DISTGRID
- !
- ! !DESCRIPTION: provide quick access to object elements (bounds and grids),
- ! while keeping them private.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_DISTGRID(DistGrid, &
- i_strt, i_stop, i_strt_halo, i_stop_halo, &
- j_strt, j_stop, j_strt_halo, j_stop_halo, &
- i_wrld, j_wrld, &
- hasSouthPole, hasNorthPole, &
- hasSouthBorder, hasNorthBorder, &
- hasEastBorder, hasWestBorder, &
- lli, lli_z, global_lli )
- !
- ! !USES:
- !
- use grid, only : assignment(=) ! to copy lli
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, optional :: i_strt, i_stop, i_strt_halo, i_stop_halo
- integer, optional :: j_strt, j_stop, j_strt_halo, j_stop_halo
- integer, optional :: i_wrld, j_wrld
- logical, optional :: hasSouthPole, hasNorthPole
- logical, optional :: hasSouthBorder, hasNorthBorder
- logical, optional :: hasWestBorder, hasEastBorder
- type(TllGridInfo), optional :: lli ! local Lat-Lon grid info
- type(TllGridInfo), optional :: global_lli ! global Lat-Lon grid info
- type(TllGridInfo), optional :: lli_z ! global zonal grid info
- !
- ! !REMARKS: You must "call DONE(lli, stat)" if you request lli, once you
- ! are done(!) with it, to avoid memory leak.
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- if (present(i_strt)) i_strt = DistGrid%i_strt
- if (present(i_stop)) i_stop = DistGrid%i_stop
- if (present(i_strt_halo)) i_strt_halo = DistGrid%i_strt_halo
- if (present(i_stop_halo)) i_stop_halo = DistGrid%i_stop_halo
- if (present(j_strt)) j_strt = DistGrid%j_strt
- if (present(j_stop)) j_stop = DistGrid%j_stop
- if (present(j_strt_halo)) j_strt_halo = DistGrid%j_strt_halo
- if (present(j_stop_halo)) j_stop_halo = DistGrid%j_stop_halo
- if (present(i_wrld)) i_wrld = DistGrid%im_region
- if (present(j_wrld)) j_wrld = DistGrid%jm_region
- if (present(hasSouthPole)) hasSouthPole = DistGrid%has_south_pole
- if (present(hasNorthPole)) hasNorthPole = DistGrid%has_north_pole
-
- if (present(hasSouthBorder)) hasSouthBorder = DistGrid%has_south_boundary
- if (present(hasNorthBorder)) hasNorthBorder = DistGrid%has_north_boundary
- if (present(hasEastBorder )) hasEastBorder = DistGrid%has_east_boundary
- if (present(hasWestBorder )) hasWestBorder = DistGrid%has_west_boundary
- if (present(lli)) lli = DistGrid%lli
- if (present(global_lli)) global_lli = DistGrid%glbl_lli
- if (present(lli_z)) lli_z = DistGrid%lli_z
-
- END SUBROUTINE GET_DISTGRID
- !EOC
-
- #ifdef MPI /* MPI TYPES */
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_HALO_TYPE
- !
- ! !DESCRIPTION: Returns derived MPI types needed for halo communications,
- ! and the start indices for the send & receive communications.
- ! Four of each are returned, one for each side of the domain,
- ! in the following order: West, North, East, South.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_HALO_TYPE( DistGrid, arr_shape, halo, datatype, srtype, ijsr, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: arr_shape(:) ! shape of local array
- integer, intent(in) :: halo ! halo size
- integer, intent(in) :: datatype ! basic MPI datatype
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: srtype(4) ! derived MPI datatypes for send/recv
- integer, intent(out) :: ijsr(4,4) ! start indices for send/recv
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) Not tested on all imaginable cases, but should work with any size
- ! GE 2, and any of the basic numerical MPI_xxx datatype
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'get_halo_type'
- integer :: nsslice, weslice, nstype, wetype ! MPI derived datatypes
- integer :: n, m, hstride ! sizes to build datatypes
- integer :: ndims, sz, i0, j0, i1, j1
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vstride
- sz = size(arr_shape)
- ! collapse third and above dimensions
- ndims = 1
- if (sz > 2) ndims = product(arr_shape(3:))
- ! strides
- CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr)
- IF_NOTOK_MPI(status=1)
- hstride = arr_shape(1)
- vstride = arr_shape(1)*arr_shape(2)*sizeoftype
-
- ! size of data slice to carry
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- ! Type for North and South halo regions
- ! --------------------------------------
- ! halo lines of lenght N, separated by hstride
- call MPI_TYPE_VECTOR (halo, n, hstride, datatype, nsslice, ierr)
- IF_NOTOK_MPI(status=1)
- ! stack 3rd (and above) dimension if any
- if (ndims == 1) then
- nstype = nsslice
- else
- ! note : also works with ndims=1
- call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, nsslice, nstype, ierr)
- IF_NOTOK_MPI(status=1)
-
- call MPI_TYPE_FREE(nsslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
- call MPI_TYPE_COMMIT (nstype, ierr)
- IF_NOTOK_MPI(status=1)
- ! Type for West and East halo regions
- ! ------------------------------------
- ! M lines of lenght 'halo', separated by hstride
- call MPI_TYPE_VECTOR (m, halo, hstride, datatype, weslice, ierr)
- IF_NOTOK_MPI(status=1)
- ! stack 3rd (and above) dimension
- if (ndims == 1) then
- wetype = weslice
- else
- ! note : also works with ndims=1
- call MPI_TYPE_CREATE_HVECTOR (ndims, 1, vstride, weslice, wetype, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(weslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (wetype, ierr)
- IF_NOTOK_MPI(status=1)
- ! Buffers anchors
- ! ----------------
- ! Assuming that neighbors are stored in (West, North, East, South) order
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- srtype = (/ wetype, nstype, wetype, nstype /)
- ijsr(:,1) = (/ i0, i0, i1+1-halo, i0 /) ! i-start location of buffer to send
- ijsr(:,2) = (/ j0, j1-halo+1, j0, j0 /) ! j-start location of buffer to send
- ijsr(:,3) = (/ i0-halo, i0, i1+1, i0 /) ! i-start location of buffer to receive
- ijsr(:,4) = (/ j0, j1+1, j0, j0-halo /) ! j-start location of buffer to receive
- status=0
-
- END SUBROUTINE GET_HALO_TYPE
- !EOC
-
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GET_INTERIOR_TYPE
- !
- ! !DESCRIPTION: Returns derived MPI types that describe the interior domains
- ! needed for collective communications.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GET_INTERIOR_TYPE( DistGrid, shp, datatype, linterior, ginterior, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: shp(:) ! shape of local array
- integer, intent(in) :: datatype ! basic MPI datatype
- !
- ! !OUTPUT PARAMETERS:
- !
- ! derived MPI datatypes describing distributed interior domains:
- integer, intent(out) :: ginterior(npes-1) ! within global array (used by root, as many as NPES-1)
- integer, intent(out) :: linterior ! within local array (used by non-root)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) input must be checked before by calling CHECK_DIST_ARR first
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'get_interior_type'
- integer :: gslice, lslice ! intermediate datatypes
- integer :: n, m ! sizes to build datatypes
- integer :: hlstride, hgstride ! strides to build datatypes
- integer :: stack, sz, klm
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, vlstride, vgstride
- ! init : number of dimensions, default value
- sz = size(shp)
- ginterior = MPI_DATATYPE_NULL
- linterior = MPI_DATATYPE_NULL
- ! collapse third and above dimensions
- stack = 1
- if (sz > 2) stack = product(shp(3:))
-
- ! size of data slice to carry
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- CALL MPI_TYPE_GET_EXTENT( datatype, lb, sizeoftype, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! horizontal global stride (in data)
- hgstride = DistGrid%im_region
- ! vertical global stride (in byte)
- vgstride = DistGrid%im_region * DistGrid%jm_region * sizeoftype
- ! local strides (may be different than n and n*m because of halo)
- hlstride = shp(1) ! in data
- vlstride = shp(1)*shp(2)*sizeoftype ! in byte
- if ( isRoot ) then
- do klm=1,npes-1
- ! horizontal chunk is M lines of lenght N, separated by a global
- ! stride
- n = DistGrid%bounds(2,klm) - DistGrid%bounds(1,klm) + 1
- m = DistGrid%bounds(4,klm) - DistGrid%bounds(3,klm) + 1
- call MPI_TYPE_VECTOR (m, n, hgstride, datatype, gslice, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! stack 3rd and above dimensions
- if (stack == 1) then
- ginterior(klm) = gslice
- else
- ! note : also works with stack=1
- call MPI_TYPE_CREATE_HVECTOR(stack, 1, vgstride, gslice, ginterior(klm), ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(gslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (ginterior(klm), ierr)
- IF_NOTOK_MPI(status=1)
-
- end do
- else
-
- ! local interior is basically M lines of lenght N, separated by a local
- ! stride
- call MPI_TYPE_VECTOR (m, n, hlstride, datatype, lslice, ierr)
- IF_NOTOK_MPI(status=1)
-
- ! stack 3rd and above dimensions
- if (stack == 1) then
- linterior = lslice
- else
- ! note : also works with stack=1
- call MPI_TYPE_CREATE_HVECTOR (stack, 1, vlstride, lslice, linterior, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(lslice, ierr)
- IF_NOTOK_MPI(status=1)
- end if
-
- call MPI_TYPE_COMMIT (linterior, ierr)
- IF_NOTOK_MPI(status=1)
-
- end if
- status=0
-
- END SUBROUTINE GET_INTERIOR_TYPE
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: FREE_DERIVED_TYPE
- !
- ! !DESCRIPTION: free all MPI derived datatypes in a vector
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE FREE_DERIVED_TYPE( datatype )
- !
- ! !INPUT/OUTPUT PARAMETERS:
- !
- integer, intent(inout) :: datatype(:) ! set of derived MPI datatypes
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- integer :: i, j
- integer :: res(size(datatype)) ! hold unique elements
- integer :: k ! number of unique elements
-
- ! Get list of unique handle(s)
- ! ----------------------------
- k = 1
- res(1) = 1
-
- outer: do i=2,size(datatype)
- ! look for a match
- do j=1,k
- if (datatype(res(j)) == datatype(i)) cycle outer ! match
- end do
-
- ! no match : add it to the list
- k = k + 1
- res(k) = i
-
- end do outer
- ! Free handle(s)
- ! ---------------------------
- do i=1,k
- if (datatype(res(i)) /= MPI_DATATYPE_NULL) &
- call MPI_TYPE_FREE(datatype(res(i)), ierr)
- end do
- END SUBROUTINE FREE_DERIVED_TYPE
- !EOC
- #endif /* MPI TYPES */
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: CHECK_DIST_ARR
- !
- ! !DESCRIPTION: Check that the shape of a local array correspond to an array
- ! distributed on current process. This check is done on the
- ! first 2 dimensions only, along which the domain
- ! decomposition is done.
- !
- ! Optionally: check shape of a global array. If arrays are 3D
- ! or more, the 3rd and above dimensions of local and global
- ! arrays are also compared. (This becomes "2D or more" for I-
- ! and J-Slices.)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE CHECK_DIST_ARR( DistGrid, shp, halo, status, glbl_shp, jband, iband, has_global )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: shp(:) ! shape of local array
- integer, intent(in) :: halo ! halo size
- !
- integer, intent(in), optional :: glbl_shp(:) ! shape of global array
- logical, intent(in), optional :: jband, iband ! is it a zonal or meridional slice?
- logical, intent(in), optional :: has_global ! current proc hold global array (default is root)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: i-band refers to a unique i value, i.e. a meridional dataset.
- ! j-band refers to a unique j value, i.e. a zonal dataset.
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'check_dist_arr '
- integer :: n, m, sz
- integer, allocatable :: glbsz(:)
- logical :: x_jband, x_iband, hold_global
- status = 0
- ! check inputs
- x_jband=.false.
- x_iband=.false.
- if(present(iband))x_iband=iband
- if(present(jband))x_jband=jband
- if(x_iband.and.x_jband)then
- write (gol,*) "CHECK_DIST_ARR: cannot have **both** I- and J-Slices" ; call goErr
- TRACEBACK; status=1; return
- end if
- ! by default global array is expected on root
- hold_global = isRoot
- if (present(has_global)) hold_global=has_global
-
- ! required size w/o halo
- n = DistGrid%I_STOP - DistGrid%I_STRT + 1
- m = DistGrid%J_STOP - DistGrid%J_STRT + 1
- ! check shape of array
- if (x_iband) then
- if (shp(1) /= m+2*halo) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ", i4)') shp(1) ; call goErr
- write (gol,'(" should be : ", i4)') m+2*halo ; call goErr
- write (gol,'(" with J-stop : ", i4)') DistGrid%J_STOP ; call goErr
- write (gol,'(" J-start: ", i4)') DistGrid%J_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- else if (x_jband) then
- if (shp(1) /= n+2*halo) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ",2i4)') shp(1) ; call goErr
- write (gol,'(" should be : ",2i4)') n+2*halo ; call goErr
- write (gol,'(" with J-stop : ", i4)') DistGrid%I_STOP ; call goErr
- write (gol,'(" J-start: ", i4)') DistGrid%I_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- else
- if ((shp(1) /= n+2*halo).or.(shp(2) /= m+2*halo)) then
- write (gol,*) "CHECK_DIST_ARR: local array shape does not conform" ; call goErr
- write (gol,'(" local array : ",2i4)') shp(1:2) ; call goErr
- write (gol,'(" should be : ",2i4)') n+2*halo, m+2*halo ; call goErr
- write (gol,'(" w/ I/J-stop : ", i4)') DistGrid%I_STOP, DistGrid%J_STOP ; call goErr
- write (gol,'(" I/J-start: ", i4)') DistGrid%I_STRT, DistGrid%J_STRT ; call goErr
- TRACEBACK; status=1; return
- end if
- end if
- ! check shape of global array on root
- if (present(glbl_shp) .and. hold_global) then
- sz = size(shp)
-
- if (sz /= size(glbl_shp)) then
- write (gol,'("CHECK_DIST_ARR : global and local arrays have different rank:")') ; call goErr
- write (gol,'(" local rank : ", i4)') sz ; call goErr
- write (gol,'(" global rank : ", i4)') size(glbl_shp) ; call goErr
- TRACEBACK; status=1; return
- end if
- allocate(glbsz(sz))
- glbsz = shp
- if (x_iband) then
- glbsz(1) = DistGrid%jm_region
- else if (x_jband) then
- glbsz(1) = DistGrid%im_region
- else
- glbsz(1:2) = (/ DistGrid%im_region, DistGrid%jm_region /)
- end if
-
- if ( any (glbl_shp /= glbsz) ) then
- write (gol,'("CHECK_DIST_ARR : global array has wrong shape:")') ; call goErr
- write (gol,*) " shape(global) : ", glbl_shp ; call goErr
- write (gol,*) " im [and/or] jm_region/-extra dims- : ", glbsz ; call goErr
- TRACEBACK; status=1; return
- end if
- deallocate(glbsz)
- end if
- END SUBROUTINE CHECK_DIST_ARR
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_IBAND_1D_R
- !
- ! !DESCRIPTION: update halo cells of a vector distributed along latitudes
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_IBAND_1D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 7 Jan 2016 - Ph. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_iband_1d_r'
- integer :: j0, j1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4)
- integer :: k, sz(1), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! degenerate case
- if (npe_lat==1) return
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, iband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ j1-halo+1, j0 /) ! j-start location of north and south buffers to send
- ijsr(:,2) = (/ j1+1, j0-halo /) ! j-start location of north and south buffers to receive
- ! Communicate
- ! ---------------- ! only south and north
- tag_snd = (/2,4/)
- tag_rcv = (/4,2/)
- nghbr = (/2,4/)
- neigh : do k=1,2
- call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_IBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAN_1D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal vector
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_1D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_1d_r'
- integer :: i0, i1
-
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4)
- integer :: k, sz(1), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1)
- dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
-
- ! Communicate
- ! ---------------- ! only east and west
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
-
- neigh : do k=1,2
- call MPI_ISEND( dist_array( ijsr(k,1)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,2)), halo, my_real, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1) = dist_array(i1-halo+1:i1)
- dist_array(i1+1:i1+halo) = dist_array(i0:i0+halo-1)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_2D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 2d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_2D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_2d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(2), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:)
- dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
-
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:) = dist_array(i1-halo+1:i1,:)
- dist_array(i1+1:i1+halo,:) = dist_array(i0:i0+halo-1,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_3D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 3d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_3D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_3d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(3), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:)
- dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2)*sz(3), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:,:) = dist_array(i1-halo+1:i1,:,:)
- dist_array(i1+1:i1+halo,:,:) = dist_array(i0:i0+halo-1,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_JBAND_4D_R
- !
- ! !DESCRIPTION: update halo cells of a decomposed zonal 4d slice
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_JBAND_4D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 20 Feb 2012 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_jband_4d_r'
- integer :: i0, i1
- #ifdef MPI
- integer :: stat(MPI_STATUS_SIZE,4), req(4), wetype
- integer :: k, sz(4), tag_snd(2), tag_rcv(2)
- integer :: ijsr(2,2), nghbr(2)
- status = 0
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- ! degenerate case
- if (npe_lon==1) then
- if (DistGrid%is_periodic) then
- dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:)
- dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:)
- end if
- return
- end if
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, jband=.true. )
- IF_NOTOK_RETURN(status=1)
- end if
- ! Buffers anchors
- ! ----------------
- ijsr(:,1) = (/ i0, i1+1-halo /) ! i-start location of buffer to send
- ijsr(:,2) = (/ i0-halo, i1+1 /) ! i-start location of buffer to receive
- ! pack data
- !----------
- call MPI_TYPE_VECTOR (sz(2)*sz(3)*sz(4), halo, sz(1), my_real, wetype, ierr)
- call MPI_TYPE_COMMIT (wetype, ierr)
- ! Communicate
- ! ----------------
- tag_snd = (/1,3/)
- tag_rcv = (/3,1/)
- nghbr = (/1,3/)
- neigh : do k=1,2 ! only east and west
- call MPI_ISEND( dist_array(ijsr(k,1),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array(ijsr(k,2),1,1,1), 1, wetype, DistGrid%neighbors(nghbr(k)), tag_rcv(k), localComm, req(k+2), ierr)
- end do neigh
- call MPI_WAITALL(4, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_TYPE_FREE(wetype, ierr)
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- dist_array(i0-halo:i0-1,:,:,:) = dist_array(i1-halo+1:i1,:,:,:)
- dist_array(i1+1:i1+halo,:,:,:) = dist_array(i0:i0+halo-1,:,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_JBAND_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_2D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 2D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_2D_R( DistGrid, dist_array, halo, status, i_only, j_only )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- logical, optional, intent(in) :: i_only ! update East & West halo only
- logical, optional, intent(in) :: j_only ! update North & South halo only
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_2d_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8)
- integer :: k, sz(2), tag_snd(4), tag_rcv(4)
- integer :: srtype(4), ijsr(4,4)
-
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
-
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
-
- neigh : do k=1,4
-
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), &
- tag_rcv(k), localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call FREE_DERIVED_TYPE( srtype )
- #else
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
-
- dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2)
- dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2)
- end if
- #endif
- status = 0
-
- END SUBROUTINE UPDATE_HALO_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_2D_I
- !
- ! !DESCRIPTION: update halo cells of a distributed 2D integer array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_2D_I( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- integer, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) not tested yet, but the version for 'real' has been...
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_2D_i'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(2)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( distGrid, sz, halo, MPI_INTEGER, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2)), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4)), 1, srtype(k), DistGrid%neighbors(k), &
- tag_rcv(k), localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2) = dist_array(i2-halo+1:i2,j1:j2)
- dist_array(i2+1:i2+halo,j1:j2) = dist_array(i1:i1+halo-1,j1:j2)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_2D_I
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_3D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 3D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_3D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_3d_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(3)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), localComm, &
- req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), localComm, &
- req(k+4), ierr)
- end do neigh
-
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2,:) = dist_array(i2-halo+1:i2,j1:j2,:)
- dist_array(i2+1:i2+halo,j1:j2,:) = dist_array(i1:i1+halo-1,j1:j2,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: UPDATE_HALO_4D_R
- !
- ! !DESCRIPTION: update halo cells of a distributed 4D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE UPDATE_HALO_4D_R( DistGrid, dist_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(inout) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'update_halo_4D_r'
- integer :: i1, i2, j1, j2
- #ifdef MPI
-
- integer :: stat(MPI_STATUS_SIZE,8), req(8), k, sz(4)
- integer :: srtype(4), ijsr(4,4), tag_snd(4), tag_rcv(4)
- ! check input
- if ( halo == 0 ) return
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status )
- IF_NOTOK_RETURN(status=1)
- end if
- ! get types and I/J start
- CALL GET_HALO_TYPE( DistGrid, sz, halo, my_real, srtype, ijsr, status )
- IF_NOTOK_RETURN(status=1)
- ! Communicate
- tag_snd = (/1,2,3,4/)
- tag_rcv = (/3,4,1,2/)
- neigh : do k=1,4
- call MPI_ISEND( dist_array( ijsr(k,1), ijsr(k,2), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_snd(k), &
- localComm, req(k), ierr)
- call MPI_IRECV( dist_array( ijsr(k,3), ijsr(k,4), 1, 1), 1, srtype(k), DistGrid%neighbors(k), tag_rcv(k), &
- localComm, req(k+4), ierr)
- end do neigh
- call MPI_WAITALL(8, req, stat, ierr)
- IF_NOTOK_MPI(status=1)
-
- call FREE_DERIVED_TYPE( srtype )
- #else
-
- if ((halo/=0).and.DistGrid%is_periodic) then
- i1 = DistGrid%i_strt
- i2 = DistGrid%i_stop
- j1 = DistGrid%j_strt
- j2 = DistGrid%j_stop
- dist_array(i1-halo:i1-1,j1:j2,:,:) = dist_array(i2-halo+1:i2,j1:j2,:,:)
- dist_array(i2+1:i2+halo,j1:j2,:,:) = dist_array(i1:i1+halo-1,j1:j2,:,:)
- end if
- #endif
- status = 0
- END SUBROUTINE UPDATE_HALO_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_2D_R
- !
- ! !DESCRIPTION: gather local 2D arrays into a global 2D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_2D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) I have not use mpi_gatherv because of varying *receiving* size
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_2d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
-
- ! check input, get derived types
- !-------------------------------
- sz = shape(dist_array)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
-
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
-
- END SUBROUTINE GATHER_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_2D_I
- !
- ! !DESCRIPTION: gather local 2D arrays into a global 2D array (integer version)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_2D_I( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- integer, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_2d_i'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
- ! check input, get derived types
- !-------------------------------
- sz = shape(dist_array)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, MPI_INTEGER, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1) = dist_array(i0:i1,j0:j1)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_2D_I
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_3D_R
- !
- ! !DESCRIPTION: gather local 3D arrays into a global 3D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_3D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_3d_r'
-
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, : )
- status = 0
- #else
-
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(3)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:) = dist_array(i0:i1,j0:j1,:)
- ! get remaining chunks from other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_SEND( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_4D_R
- !
- ! !DESCRIPTION: gather local 4D arrays into a global 4D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_4D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_4d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :, :)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(4)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then ! RECV
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:,:) = dist_array(i0:i1,j0:j1,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else !SEND
- call MPI_SEND( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_5D_R
- !
- ! !DESCRIPTION: gather local 5D arrays into a global 5D array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_5D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_5d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(5)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- GATHER -------------
- if ( isRoot ) then ! RECV
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- glbl_array(i0:i1,j0:j1,:,:,:) = dist_array(i0:i1,j0:j1,:,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_RECV( glbl_array(i,j,1,1,1), 1, ginterior(klm), klm, 1, &
- localComm, stat, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else !SEND
- call MPI_SEND( dist_array(i0,j0,1,1,1), 1, linterior, root, 1, localComm, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_5D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_2D_R
- !
- ! !DESCRIPTION: scatter a 2D global real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_2D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, intent(in) :: halo
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: exactly the same as GATHER_2D_R, but inverting send/recv
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_2d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop ) = glbl_array
- status = 0
- #else
-
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(2)
- status=0
-
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- dist_array(i0:i1,j0:j1) = glbl_array(i0:i1,j0:j1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j), 1, ginterior(klm), klm, klm, localComm, ierr)
- IF_NOTOK_MPI(status=1)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_COMM_RANK(localComm, klm, ierr)
- IF_NOTOK_MPI(status=1)
- call MPI_RECV( dist_array(i0,j0), 1, linterior, root, klm, localComm, stat, ierr)
- IF_NOTOK_MPI(status=1)
-
- call MPI_TYPE_FREE(linterior, ierr)
- IF_NOTOK_MPI(status=1)
- end if
- #endif
- END SUBROUTINE SCATTER_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_3D_R
- !
- ! !DESCRIPTION: scatter 3D global real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_3D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: glbl_array(:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: global array has to really be global on root only
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_3d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop, :) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(3)
- status=0
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
-
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
- dist_array(i0:i1,j0:j1,:) = glbl_array(i0:i1,j0:j1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j,1), 1, ginterior(klm), klm, 1, localComm, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
- else
- call MPI_RECV( dist_array(i0,j0,1), 1, linterior, root, 1, localComm, stat, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_4D_R
- !
- ! !DESCRIPTION: scatter 4D real array
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_4D_R( DistGrid, dist_array, glbl_array, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: glbl_array(:,:,:,:)
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_4d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop, DistGrid%j_strt:DistGrid%j_stop,:,:) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), linterior, ginterior(npes-1)
- integer :: i, j, klm, i0, j0, i1, j1, sz(4)
- status=0
-
- ! ------- Check input & get derived types
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, halo, status, shape(glbl_array))
- IF_NOTOK_RETURN(status=1)
- end if
- CALL GET_INTERIOR_TYPE( DistGrid, sz, my_real, linterior, ginterior, status )
- IF_NOTOK_RETURN(status=1)
- i0 = DistGrid%i_strt
- j0 = DistGrid%j_strt
- ! ------- SCATTER -------------
- if ( isRoot ) then ! send
- ! get first chunk locally
- i1 = DistGrid%i_stop
- j1 = DistGrid%j_stop
-
- dist_array(i0:i1,j0:j1,:,:) = glbl_array(i0:i1,j0:j1,:,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i = DistGrid%bounds(1,klm)
- j = DistGrid%bounds(3,klm)
- call MPI_SSEND( glbl_array(i,j,1,1), 1, ginterior(klm), klm, 1, localComm, ierr)
- end do
- call FREE_DERIVED_TYPE( ginterior )
-
- else
- call MPI_RECV( dist_array(i0,j0,1,1), 1, linterior, root, 1, localComm, stat, ierr)
- call MPI_TYPE_FREE(linterior, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_4D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_IBAND_1D_R
- !
- ! !DESCRIPTION: scatter a meridional real vector (1D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:)
- integer, optional, intent(in) :: iref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%j_strt:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 1D version, along J index, of scatter_2d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_iband_1d_r'
- #ifndef MPI
- dist_array( DistGrid%j_strt:DistGrid%j_stop ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE)
- integer :: x_iref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t
- status=0
- ! ------- Check inputs
- x_iref=1
- if(present(iref)) x_iref=iref
-
- sz = shape(dist_array)
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
-
- ! ------- SEND/RECV -------------
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- if ( isRoot ) then
- ! local case
- if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1) = glbl_array(j0:j1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_iref<i0t).or.(x_iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_SSEND( glbl_array(j0t), n, my_real, klm, 1, localComm, ierr)
- end do
-
- else
-
- ! are we on a target processor?
- if((x_iref<i0).or.(x_iref>i1))return
-
- n=j1-j0+1
- call MPI_RECV( dist_array(j0), n, my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_IBAND_1D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_1D_R
- !
- ! !DESCRIPTION: scatter a zonal real vector (1D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_1D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 1D version, along I index, of scatter_2d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_1d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE)
- integer :: x_jref, n, klm, i0, j0, i1, j1, sz(1), i0t, j0t, i1t, j1t
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- sz = shape(dist_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, shape(glbl_array), jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1) = glbl_array(i0:i1)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_SSEND( glbl_array(i0t), n, my_real, klm, 1, localComm, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0), n, my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_1D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_2D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 2D version, along I index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_2d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:) = glbl_array(i0:i1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_IBAND_2D_R
- !
- ! !DESCRIPTION: scatter a meridional real array (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_IBAND_2D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:)
- integer, optional, intent(in) :: iref ! to find targets, default=1
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%j_strt:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- ! 21 Jun 2013 - P. Le Sager - MPI_SEND -> MPI_SSEND to avoid buffering
- !
- ! !REMARKS: 2D version, along J index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_iband_2d_r'
- #ifndef MPI
- dist_array( DistGrid%j_strt:DistGrid%j_stop,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_iref, n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- x_iref=1
- if(present(iref)) x_iref=iref
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((x_iref>=i0).and.(x_iref<=i1)) dist_array(j0:j1,:) = glbl_array(j0:j1,:)
- ! send remaining chunks to other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! is klm a target processor?
- if((x_iref<i0t).or.(x_iref>i1t))cycle
-
- n=j1t-j0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(j0t,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
-
- end do
- else
- ! are we on a target processor?
- if((x_iref<i0).or.(x_iref>i1))return
- n=j1-j0+1
- call MPI_RECV( dist_array(j0,1), n*sz(2), my_real, root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_IBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_3D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (2D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- !
- ! !REMARKS: 2D version, along I index, of scatter_3d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_3d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,:,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(3), gsz(3), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default scatter from global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1)
- tgt_root = RowRootId
- endif
- endif
-
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:) = glbl_array(i0:i1,:,:)
- ! send remaining chunks to other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
-
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: SCATTER_JBAND_4D_R
- !
- ! !DESCRIPTION: scatter a zonal slice (4D) from root
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE SCATTER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: glbl_array(:,:,:,:)
- integer, optional, intent(in) :: jref ! to find targets, default=1
- logical, optional, intent(in) :: rowcom ! to scatter from row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: dist_array(DistGrid%i_strt:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 17 Feb 2015 - Ph. Le Sager - v0
- !
- ! !REMARKS: 2D version, along I index, of scatter_4d_r
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'scatter_jband_4d_r'
- #ifndef MPI
- dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:,: ) = glbl_array
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: x_jref, n, klm, sz(4), gsz(4), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- x_jref=1
- if(present(jref)) x_jref=jref
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default scatter from global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(x_jref>=j0).and.(x_jref<=j1)
- tgt_root = RowRootId
- endif
- endif
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- !local case
- if((x_jref>=j0).and.(x_jref<=j1)) dist_array(i0:i1,:,:,:) = glbl_array(i0:i1,:,:,:)
- ! send remaining chunks to other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a target processor, pack and send
- if((x_jref<j0t).or.(x_jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3)*sz(4), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_SSEND( glbl_array(i0t,1,1,1), 1, subarr, klm, 1, localComm, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a target processor?
- if((x_jref<j0).or.(x_jref>j1))return
- n=i1-i0+1
- call MPI_RECV( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, 1, localComm, stat, ierr)
- end if
- #endif
- END SUBROUTINE SCATTER_JBAND_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_2D_R
- !
- ! !DESCRIPTION: gather a zonal slice (2D) on root. For 2D arrays, with first
- ! dimension distributed along I (making it a J-band), and the
- ! other dimension is *not* distributed along J. For example:
- ! [i1:i2, lev], or [i1:i2, trac]
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_2D_R( DistGrid, dist_array, glbl_array, status, jref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:)
- integer, intent(in) :: jref ! to find sources
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_2d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,: )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(2), gsz(2)
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:) = dist_array(i0:i1,:)
- ! receive remaining chunks from other pes
- do klm=1, npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, subarr, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1), n*sz(2), my_real, root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_3D_R
- !
- ! !DESCRIPTION: Gather a zonal slab (3D) on root or rowroot(jref) [i.e. the
- ! root of the row of procs].
- ! Local arrays [i1:i2, a:b, c:d] are gathered into the root
- ! proc as [1:im, 1:b-a+1, 1:d-c+1]. Caller has to ensure that at least
- ! **ALL** the ROW procs call this routine, plus root if needed.
- ! No halo possible yet.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_3D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:,:)
- integer, intent(in) :: jref ! to find sources (defines the row we want)
- logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: use in budget for gathering fluxes, advect_cfl
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_3d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(3), gsz(3), slab, tgt_root
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
-
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default gather into global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1)
- tgt_root = RowRootId
- endif
- endif
-
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
-
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:) = dist_array(i0:i1,:,:)
- ! receive remaining chunks from other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
-
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1,1), n*sz(2)*sz(3), my_real, tgt_root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_JBAND_4D_R
- !
- ! !DESCRIPTION: Gather a zonal slab (4D) on root or rowroot(jref) [i.e. the
- ! root of the row of procs].
- ! Local arrays [i1:i2, a:b, c:d, e:f] are gathered into the root
- ! proc as [1:im, 1:b-a+1, 1:d-c+1, 1:f-e+1]. Caller has to
- ! ensure that at least **ALL** the ROW procs call this routine,
- ! plus root if needed.
- ! No halo possible yet.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_JBAND_4D_R( DistGrid, dist_array, glbl_array, status, jref, rowcom)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%i_strt:,:,:,:)
- integer, intent(in) :: jref ! to find sources (defines the row we want)
- logical, optional, intent(in) :: rowcom ! to gather on row_root instead of global root
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 17 Feb 2015 - Ph. Le Sager - v0
- !
- ! !REMARKS: use in advectx
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_jband_4d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%i_strt:DistGrid%i_stop,:,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(4), gsz(4), slab, tgt_root, stack
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- logical :: selectRoot
- status=0
- ! ------- Check inputs
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! by default gather into global root
- selectRoot = isRoot
- tgt_root = root
- if (present(rowcom)) then
- if (rowcom) then
- selectRoot = isRowRoot.and.(jref>=j0).and.(jref<=j1)
- tgt_root = RowRootId
- endif
- endif
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- stack = sz(3)*sz(4)
-
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, jband=.true., has_global=selectRoot)
- IF_NOTOK_RETURN(status=1)
- end if
- ! ------- SEND/RECV -------------
- if ( selectRoot ) then
- ! local case
- if((jref>=j0).and.(jref<=j1)) glbl_array(i0:i1,:,:,:) = dist_array(i0:i1,:,:,:)
- ! receive remaining chunks from other pes
- do klm=0,npes-1
- if (klm==myid) cycle ! skip local case
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((jref<j0t).or.(jref>j1t))cycle
- n=i1t-i0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (stack, 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(i0t,1,1,1), 1, subarr, klm, jref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((jref<j0).or.(jref>j1))return
- n=i1-i0+1
- call MPI_SEND( dist_array(i0,1,1,1), n*sz(2)*sz(3)*sz(4), my_real, tgt_root, jref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_JBAND_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_IBAND_1D_R
- !
- ! !DESCRIPTION: gather a meridional (with dimension distributed along J)
- ! vector on root. For example: [j1:j2]
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_IBAND_1D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%j_strt:)
- integer, intent(in) :: iref ! to define source processors
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! - all processors with an i-range containing IREF are used.
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_iband_1d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop )
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(1), gsz(1)
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- ! write(gol,*)"lbound m",lbound(dist_array); call goPr
- ! write(gol,*)"ubound m",ubound(dist_array); call goPr
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1) = dist_array(j0:j1)
- ! receive remaining chunks from other pes
- do klm=1, npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((iref<i0t).or.(iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_RECV( glbl_array(j0t), n, my_real, klm, iref, localComm, stat, ierr)
- end do
- else
- ! are we on a src processor?
- if((iref<i0).or.(iref>i1)) return
- n=j1-j0+1
- call MPI_SEND( dist_array(j0), n, my_real, root, iref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_IBAND_1D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: GATHER_IBAND_3D_R
- !
- ! !DESCRIPTION: gather a meridional slice (3D) on root. For arrays like:
- ! [j1:j2, lev, trac], that is without a distributed I dim.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE GATHER_IBAND_3D_R( DistGrid, dist_array, glbl_array, status, iref )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- real, intent(in) :: dist_array(DistGrid%j_strt:,:,:)
- integer, intent(in) :: iref ! to find sources
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: glbl_array(:,:,:)
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS: use in budget for gathering fluxes
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'gather_iband_3d_r'
- #ifndef MPI
- glbl_array = dist_array( DistGrid%j_strt:DistGrid%j_stop,:,:)
- status = 0
- #else
- integer :: stat(MPI_STATUS_SIZE), subarr
- integer :: i0, j0, i1, j1, i0t, j0t, i1t, j1t
- integer :: n, klm, sz(3), gsz(3), slab
- integer(kind=MPI_ADDRESS_KIND) :: sizeoftype, lb, stride
- status=0
- ! ------- Check inputs
- sz = shape(dist_array)
- gsz = shape(glbl_array)
- if(okdebug)then
- CALL CHECK_DIST_ARR( DistGrid, sz, 0, status, gsz, iband=.true.)
- IF_NOTOK_RETURN(status=1)
- end if
- i0 = DistGrid%i_strt
- i1 = DistGrid%i_stop
- j0 = DistGrid%j_strt
- j1 = DistGrid%j_stop
- ! ------- SEND/RECV -------------
- if ( isRoot ) then
- ! local case
- if((iref>=i0).and.(iref<=i1)) glbl_array(j0:j1,:,:) = dist_array(j0:j1,:,:)
- ! receive remaining chunks from other pes
- do klm=1,npes-1
- i0t = DistGrid%bounds(1,klm)
- i1t = DistGrid%bounds(2,klm)
- j0t = DistGrid%bounds(3,klm)
- j1t = DistGrid%bounds(4,klm)
- ! if klm is a source processor, receive from it
- if((iref<i0t).or.(iref>i1t))cycle
- n=j1t-j0t+1
- call MPI_TYPE_VECTOR (sz(2), n, gsz(1), my_real, slab, ierr)
- CALL MPI_TYPE_GET_EXTENT( my_real, lb, sizeoftype, ierr)
- stride = gsz(1)*gsz(2)*sizeoftype
- call MPI_TYPE_CREATE_HVECTOR (sz(3), 1, stride, slab, subarr, ierr)
- call MPI_TYPE_FREE (slab, ierr)
- call MPI_TYPE_COMMIT (subarr, ierr)
- call MPI_RECV( glbl_array(j0t,1,1), 1, subarr, klm, iref, localComm, stat, ierr)
- call MPI_TYPE_FREE (subarr, ierr)
- end do
- else
- ! are we on a src processor?
- if((iref<i0).or.(iref>i1))return
- n=j1-j0+1
- call MPI_SEND( dist_array(j0,1,1), n*sz(2)*sz(3), my_real, root, iref, localComm, ierr)
- end if
- #endif
- END SUBROUTINE GATHER_IBAND_3D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_2D_R
- !
- ! !DESCRIPTION: Global reduction. Data are gathered on root, where OP is
- ! then done, instead of OPing on each proc and then calling
- ! MPI_REDUCE. This ensures bitwise agreement with serial code
- ! results in case of SUM.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_2D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic mpi_allreduce instead of mpi_reduce
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'REDUCE_2D_R'
- logical :: x_debug
- integer :: shp(2)
- real, allocatable :: glbl_array(:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
-
- #ifdef MPI
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region ))
- else
- allocate( glbl_array(1,1) )
- end if
-
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- deallocate(glbl_array)
- #else
- select case( op )
-
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
-
- #endif
-
- status=0
-
- END SUBROUTINE REDUCE_2D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_3D_R
- !
- ! !DESCRIPTION: same as REDUCE_2D_R, for 3D arrays.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_3D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'REDUCE_3D_R'
- integer :: shp(3)
- logical :: x_debug
- real, allocatable :: glbl_array(:,:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
- #ifdef MPI
- shp = shape( dist_array )
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3)) )
- else
- allocate( glbl_array(1,1,1) )
- end if
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- #else
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- #endif
- status=0
- END SUBROUTINE REDUCE_3D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: REDUCE_4D_R
- !
- ! !DESCRIPTION: same as REDUCE_2D_R, for 4D arrays.
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE REDUCE_4D_R( DistGrid, dist_array, halo, op, resultat, status, all, debug)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- real, intent(in) :: dist_array(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:,:,:)
- character(len=3), intent(in) :: op ! 'MAX', 'MIN' or 'SUM'
- logical, intent(in), optional :: all ! mimic MPI_ALLREDUCE instead of MPI_REDUCE
- logical, intent(in), optional :: debug ! print additional information: location of Min/Max
- !
- ! !OUTPUT PARAMETERS:
- !
- real, intent(out) :: resultat
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'reduce_4d_r'
- integer :: shp(4)
- logical :: x_debug
- real, allocatable :: glbl_array(:,:,:,:)
- x_debug=.false.
- if(present(debug)) x_debug=debug
- #ifdef MPI
- shp = shape( dist_array )
- if (isRoot) then
- allocate( glbl_array( DistGrid%im_region, DistGrid%jm_region, shp(3), shp(4)) )
- else
- allocate( glbl_array(1,1,1,1) )
- end if
- call gather(DistGrid, dist_array, glbl_array, halo, status)
- IF_NOTOK_RETURN(status=1)
- if (isRoot) then
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(glbl_array)
- case('max', 'Max', 'MAX')
- resultat = maxval(glbl_array)
- if(x_debug) then
- shp=maxloc(glbl_array)
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(glbl_array)
- if(x_debug) then
- shp=minloc(glbl_array)
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- end if
- if (present(all)) then
- if (all) call MPI_bcast(resultat, 1, my_real, root, localComm, ierr)
- end if
- #else
- select case( op )
- case('sum', 'Sum', 'SUM')
- resultat = sum(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- case('max', 'Max', 'MAX')
- resultat = maxval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- if(x_debug) then
- shp=maxloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- write(gol,*) rname //": Max location =", shp; call goPr
- end if
- case('min', 'Min', 'MIN')
- resultat = minval(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- if(x_debug) then
- shp=minloc(dist_array(1:DistGrid%im_region, 1:DistGrid%jm_region,:,:))
- write(gol,*) rname //": Min location =", shp; call goPr
- end if
- case default
- write(gol,*) 'UNSUPPORTED OPERATION'; call goPr; status=1
- IF_NOTOK_RETURN(status=1)
- end select
- #endif
- status=0
- END SUBROUTINE REDUCE_4D_R
- !EOC
-
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: DIST_ARR_STAT_2D_R
- !
- ! !DESCRIPTION: calculate and print the MIN, MEAN, MAX of a 2D distributed
- ! real field. This is basically a wrapper around several calls
- ! to REDUCE.
- !
- ! *** SHOULD BE CALLED ONLY FOR DEBUGGING PURPOSES ***
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE DIST_ARR_STAT_2D_R( DistGrid, name, arr, halo, status)
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- character(len=*), intent(in) :: name ! verbose helper
- integer, intent(in) :: halo
- real, intent(in) :: arr(DistGrid%i_strt-halo:,DistGrid%j_strt-halo:)
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out):: status
- !
- ! !REVISION HISTORY:
- ! 7 Mar 2012 - P. Le Sager - adapted for lon-lat MPI domain
- ! decomposition, from DD_FIELD_STATISTICS in
- ! both DIFFUSION.F90 and DRY_DEPOSITION.F90
- ! !REMARKS:
- ! (1) FIXME : does not compute the mean of only non-zero data anymore
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname=mname//'dist_arr_stat_2d_r'
- integer :: ntel_non_zero, nx, ny
- real :: maxf, minf, meanf, mean_non_zero
- call reduce( DistGrid, arr, halo, 'MAX', maxf, status)
- IF_NOTOK_RETURN(status=1)
- call reduce( DistGrid, arr, halo, 'MIN', minf, status)
- IF_NOTOK_RETURN(status=1)
- call reduce( DistGrid, arr, halo, 'SUM', meanf, status)
- IF_NOTOK_RETURN(status=1)
- if(isRoot) then
- nx = DistGrid%im_region
- ny = DistGrid%jm_region
- meanf = meanf / ( nx*ny )
- write(gol,'(a10,3(a5,1x,1pe10.3))') name,' max=', maxf,' min=',minf,'mean=',meanf!,'mn0',mean_non_zero
- call goPr
- end if
-
- status=0
-
- END SUBROUTINE DIST_ARR_STAT_2D_R
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: PRINT_DISTGRID
- !
- ! !DESCRIPTION: utility that prints information about a distributed grid
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE PRINT_DISTGRID ( DistGrid )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
-
- integer, parameter :: maxrow=5
- integer, parameter :: maxcol=5
- integer :: sz1(1), i1
- ! header
- write(gol,*) "========== Start Distributed Grid ==================="; call goPr
- ! dist_grid
- write(gol,*) "im_region :" , DistGrid%im_region ; call goPr
- write(gol,*) "jm_region :" , DistGrid%jm_region ; call goPr
- write(gol,*) "i_strt :" , DistGrid%i_strt ; call goPr
- write(gol,*) "i_stop :" , DistGrid%i_stop ; call goPr
- write(gol,*) "i_strt_halo :" , DistGrid%i_strt_halo ; call goPr
- write(gol,*) "i_stop_halo :" , DistGrid%i_stop_halo ; call goPr
- write(gol,*) "j_strt :" , DistGrid%j_strt ; call goPr
- write(gol,*) "j_stop :" , DistGrid%j_stop ; call goPr
- write(gol,*) "j_strt_halo :" , DistGrid%j_strt_halo ; call goPr
- write(gol,*) "j_stop_halo :" , DistGrid%j_stop_halo ; call goPr
- write(gol,*) "has_north_pole:" , DistGrid%has_north_pole ; call goPr
- write(gol,*) "has_south_pole:" , DistGrid%has_south_pole ; call goPr
- ! physical grid
- write(gol,*) '------------- physical grid -------------------' ; call goPr
- write(gol,*) "llgrid name:", trim(DistGrid%lli%name) ; call goPr
- write(gol,*) "R[m] :", DistGrid%lli%R ; call goPr
- write(gol,*) "dlon[deg] :", DistGrid%lli%dlon_deg ; call goPr
- write(gol,*) "dlat[deg] :", DistGrid%lli%dlat_deg ; call goPr
- write(gol,*) "im :", DistGrid%lli%im ; call goPr
- write(gol,*) "jm :", DistGrid%lli%jm ; call goPr
- if (associated(DistGrid%lli%lon_deg)) then
- sz1 = shape(DistGrid%lli%lon_deg)
- i1 = min(maxcol,sz1(1))
- write(gol,*) "lon = ",DistGrid%lli%lon_deg(1:i1); call goPr
- endif
- if (associated(DistGrid%lli%lat_deg)) then
- sz1=shape(DistGrid%lli%lat_deg)
- i1=min(maxrow,sz1(1))
- write(gol,*) "lat = ",DistGrid%lli%lat_deg(1:i1); call goPr
- endif
- ! footer
- write(gol,*) "========== End Distributed Grid ===================" ; call goPr
- END SUBROUTINE PRINT_DISTGRID
- !EOC
- !--------------------------------------------------------------------------
- ! TM5 !
- !--------------------------------------------------------------------------
- !BOP
- !
- ! !IROUTINE: TESTCOMM
- !
- ! !DESCRIPTION: check if the communications are working as expected.
- ! Currently checked:
- ! - GATHER, SCATTER, UPDATE_HALO (2D, 3D, 4D)
- ! - SCATTER_I_BAND, SCATTER_J_BAND (1D, 2D)
- !\\
- !\\
- ! !INTERFACE:
- !
- SUBROUTINE TESTCOMM( DistGrid, halo, status )
- !
- ! !INPUT PARAMETERS:
- !
- type(dist_grid), intent(in) :: DistGrid
- integer, intent(in) :: halo
- !
- ! !OUTPUT PARAMETERS:
- !
- integer, intent(out) :: status
- !
- ! !REVISION HISTORY:
- ! 01 Nov 2011 - P. Le Sager - v0
- !
- ! !REMARKS:
- ! (1) to run with different halo sizes
- ! (2) note that will not catch some errors in halo_update if using too few processors
- !
- !EOP
- !------------------------------------------------------------------------
- !BOC
- character(len=*), parameter :: rname = mname//'testcomm'
- ! real, allocatable :: src1(:), tgt1(:), res1(:)
- ! real, allocatable :: src2(:,:), tgt2(:,:), res2(:,:)
-
- ! real, allocatable :: larray2a(:,:), larray2b(:,:), glb2a(:,:), glb2b(:,:)
- ! real, allocatable :: larray3a(:,:,:), larray3b(:,:,:), glb3a(:,:,:), glb3b(:,:,:)
- ! real, allocatable :: larray4a(:,:,:,:), larray4b(:,:,:,:)
- ! real, allocatable :: glb4a(:,:,:,:), glb4b(:,:,:,:)
- ! integer :: i0, j0, i1, j1, x_halo, k, levels, l, trac, iref(2), jref(2)
- ! logical :: south, north, west, east, test
-
- ! character(len=*), parameter :: form='(f4.0)'
- ! levels=17
- ! trac=5
- ! ! General
- ! call Get_DistGrid( DistGrid, I_STRT=i0, I_STOP=i1, J_STRT=j0, J_STOP=j1, &
- ! hasEastBorder=east, hasWestBorder=west, &
- ! hasSouthBorder=south, hasNorthBorder=north )
- ! x_halo=halo
- ! status=0
- ! if(isRoot) print*, "========= TESTING COMM FOR HALO=",x_HALO
- ! call par_barrier()
-
- ! ! *************************** SCATTER BAND ***************************
- ! iref=(/ 1, DistGrid%im_region/) ! to test i_band along west and east border
- ! jref=(/ 1, DistGrid%jm_region/) ! to test j_band along south and north border
-
- ! if(isRoot) then
- ! allocate(src1(DistGrid%im_region))
- ! else
- ! allocate(src1(1))
- ! end if
- ! allocate(tgt1(i0:i1), res1(i0:i1))
- ! if (isRoot) src1 = (/(k, k=1,DistGrid%im_region)/)
- ! res1 = (/(k, k=i0,i1)/)
- ! do trac=1,2
- ! tgt1=0
- ! call scatter_j_band(distgrid, tgt1, src1, status, jref=jref(trac))
- ! IF_NOTOK_RETURN(status=1)
-
- ! test=((trac==1).and.south).or.((trac==2).and.north)
-
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res1-tgt1)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_J_band 1D FAILED at border:"
- ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! status=1
- ! !else ! Expected only if tgt1 has inout attribute in scatter routine
- ! ! print*, "test scatter_J_band 1D PASSED inside:"
- ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_J_band 1D PASSED at border"
- ! !print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! !else ! Expected only if tgt1 has inout attribute in scatter routine
- ! ! print*, "test scatter_J_band 1D FAILED inside"
- ! ! print*, i0,i1,tgt1(i0), tgt1(i1), res1(i0), res1(i1)
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src1, tgt1, res1)
- ! if(isRoot) then
- ! allocate(src1(DistGrid%jm_region))
- ! else
- ! allocate(src1(1))
- ! end if
- ! allocate(tgt1(j0:j1), res1(j0:j1))
- ! if (isRoot) src1 = (/(k, k=1,DistGrid%jm_region)/)
- ! res1 = (/(k, k=j0,j1)/)
- ! do trac=1,2
- ! tgt1=0
-
- ! call scatter_i_band(distgrid, tgt1, src1, status, iref=iref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.west).or.((trac==2).and.east)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res1-tgt1)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_I_band 1D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_I_band 1D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_I_band 1D PASSED at border"
- ! !else
- ! ! print*, "test scatter_I_band 1D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src1, tgt1, res1)
- ! ! ---------------- 2D
- ! if(isRoot) then
- ! allocate(src2(DistGrid%im_region, levels))
- ! else
- ! allocate(src2(1,1))
- ! end if
- ! allocate(tgt2(i0:i1,levels), res2(i0:i1,levels))
- ! do l=1,levels
- ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%im_region)/) * l
- ! res2(:,l) = (/(k, k=i0,i1)/)* l
- ! end do
- ! do trac=1,2
- ! tgt2=0
- ! call scatter_j_band(distgrid, tgt2, src2, status, jref=jref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.south).or.((trac==2).and.north)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res2-tgt2)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_J_band 2D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_J_band 2D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_J_band 2D PASSED at border"
- ! !else
- ! ! print*, "test scatter_J_band 2D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src2, tgt2, res2)
- ! if(isRoot) then
- ! allocate(src2(DistGrid%jm_region,levels))
- ! else
- ! allocate(src2(1,1))
- ! end if
- ! allocate(tgt2(j0:j1,levels), res2(j0:j1,levels))
- ! do l=1,levels
- ! if (isRoot) src2(:,l) = (/(k, k=1,DistGrid%jm_region)/)*l
- ! res2(:,l) = (/(k, k=j0,j1)/)*l
- ! enddo
- ! do trac=1,2
- ! tgt2=0
- ! call scatter_i_band(distgrid, tgt2, src2, status, iref=iref(trac))
- ! IF_NOTOK_RETURN(status=1)
- ! test=((trac==1).and.west).or.((trac==2).and.east)
- ! ! diff should be 0 along borders only
- ! if (maxval(abs(res2-tgt2)) /= 0.) then
- ! if(test) then
- ! print*, "test scatter_I_band 2D FAILED at border"
- ! status=1
- ! !else
- ! ! print*, "test scatter_I_band 2D PASSED inside"
- ! end if
- ! else
- ! if(test) then
- ! print*, "test scatter_I_band 2D PASSED at border"
- ! !else
- ! ! print*, "test scatter_I_band 2D FAILED inside"
- ! ! status=1
- ! end if
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! end do
- ! deallocate(src2, tgt2, res2)
-
- ! ! *************************** GATHER/SCATTER ***************************
-
- ! !----------------------
- ! ! Allocate 2D
- ! !----------------------
- ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( glb2a(DistGrid%im_region, DistGrid%jm_region) )
- ! ! in halo, 0, elsewhere myid
- ! larray2a=0.
- ! larray2a(i0:i1,j0:j1)=real(myid)
- ! ! glb2b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb2b( DistGrid%im_region, DistGrid%jm_region) )
- ! else
- ! allocate( glb2b(1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_2D_R
- ! !----------------------
- ! glb2b=0.
- ! larray2b=0.
- ! ! gather
- ! call gather( DistGrid, larray2a, glb2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb2a = glb2b
- ! #ifdef MPI
- ! call MPI_bcast(glb2a, size(glb2a), my_real, root, localComm, ierr)
- ! #endif
- ! larray2b(i0:i1,j0:j1) = glb2a(i0:i1,j0:j1)
- ! larray2b = larray2a-larray2b
- ! ! diff should be 0
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test gather 2d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! call par_barrier()
- ! !----------------------
- ! ! test SCATTER_2D_R
- ! !----------------------
- ! larray2b=0.
- ! call scatter( DistGrid, larray2b, glb2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray2b=larray2a-larray2b
- ! ! diff should be 0
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test scatter 2d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray2a,larray2b,glb2a,glb2b)
- ! call par_barrier()
-
- ! !----------------------
- ! ! Allocate 3D
- ! !----------------------
- ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( glb3a( DistGrid%im_region, DistGrid%jm_region, levels) )
- ! ! in halo, 0, elsewhere myid*level
- ! larray3a=0.
- ! do k=1,levels
- ! larray3a(i0:i1,j0:j1,k)=real(myid*k)
- ! end do
- ! ! glb2b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb3b( DistGrid%im_region, DistGrid%jm_region, levels) )
- ! else
- ! allocate( glb3b(1,1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_3D_R
- ! !----------------------
- ! glb3b=0.
- ! larray3b=0.
- ! ! gather
- ! call gather( DistGrid, larray3a, glb3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb3a = glb3b
- ! #ifdef MPI
- ! call MPI_bcast(glb3a, size(glb3a), my_real, root, localComm, ierr)
- ! #endif
- ! larray3b(i0:i1,j0:j1,:) = glb3a(i0:i1,j0:j1,:)
- ! larray3b = larray3a-larray3b
- ! ! diff should be 0
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test gather 3d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! call par_barrier()
-
- ! !----------------------
- ! ! test SCATTER_3D_R
- ! !----------------------
- ! larray3b=0.
- ! call scatter( DistGrid, larray3b, glb3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray3b=larray3a-larray3b
- ! ! diff should be 0
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test scatter 3d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray3a,larray3b,glb3a,glb3b)
- ! call par_barrier()
- ! !----------------------
- ! ! Allocate 4D
- ! !----------------------
- ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( glb4a( DistGrid%im_region, DistGrid%jm_region, levels, trac) )
- ! ! in halo, 0, elsewhere (myid+1)*level*trac
- ! larray4a=0.
- ! do l=1,trac
- ! do k=1,levels
- ! larray4a(i0:i1,j0:j1,k,l)=real((myid+1)*k*l)
- ! end do
- ! end do
-
- ! ! glb4b is global array, used in root only
- ! if (isRoot) then
- ! allocate( glb4b( DistGrid%im_region, DistGrid%jm_region, levels, trac) )
- ! else
- ! allocate( glb4b(1,1,1,1) )
- ! end if
- ! !----------------------
- ! ! test GATHER_4D_R
- ! !----------------------
- ! glb4b=0.
- ! larray4b=0.
- ! ! gather
- ! call gather( DistGrid, larray4a, glb4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! broadcast result
- ! if (isRoot) glb4a = glb4b
- ! #ifdef MPI
- ! call MPI_bcast(glb4a, size(glb4a), my_real, root, localComm, ierr)
- ! #endif
- ! larray4b(i0:i1,j0:j1,:,:) = glb4a(i0:i1,j0:j1,:,:)
- ! larray4b = larray4a-larray4b
- ! ! diff should be 0
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test gather 4d FAILED"
- ! status=1
- ! else
- ! print*, "test gather 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! call par_barrier()
-
- ! !----------------------
- ! ! test SCATTER_4D_R
- ! !----------------------
- ! larray4b=0.
- ! call scatter( DistGrid, larray4b, glb4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! larray4b=larray4a-larray4b
- ! ! diff should be 0
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test scatter 4d FAILED"
- ! status=1
- ! else
- ! print*, "test scatter 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray4a,larray4b,glb4a,glb4b)
-
- ! call par_barrier()
-
- ! ! *************************************** HALO ***************************
-
- ! !----------------------
- ! ! test UPDATE_HALO_2D_R
- ! !----------------------
- ! ! Allocate 2D
- ! allocate( larray2a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! allocate( larray2b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray2b=0.
- ! larray2b(i0:i1,j0:j1)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray2a=0.
- ! larray2a( i0-x_halo:i0-1, j0:j1 ) = DistGrid%neighbors(1) ! west halo
- ! larray2a( i1+1:i1+x_halo, j0:j1 ) = DistGrid%neighbors(3) ! east halo
- ! larray2a( i0:i1, j1+1:j1+x_halo ) = DistGrid%neighbors(2) ! north halo
- ! larray2a( i0:i1, j0-x_halo:j0-1 ) = DistGrid%neighbors(4) ! south halo
- ! larray2a(i0:i1,j0:j1)=real(myid)
- ! where (larray2a == MPI_PROC_NULL) larray2a=0.
-
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray2b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! if (isRoot.and.(x_halo==1)) then ! 32 is size to look good for 2x2 processes and halo=1
- ! print*, "----------------------------"
- ! print '(32F4.0)', larray2a
- ! print*, "----------------------------"
- ! print '(32F4.0)', larray2b
- ! print*, "----------------------------"
- ! end if
- ! ! compare (diff should be 0)
- ! larray2b=larray2a-larray2b
- ! if (maxval(abs(larray2b)) /= 0.) then
- ! print*, "test update_halo 2d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 2d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray2a,larray2b)
- ! call par_barrier()
-
- ! !----------------------
- ! ! test UPDATE_HALO_3D_R
- ! !----------------------
- ! ! Allocate 3D
- ! allocate( larray3a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! allocate( larray3b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray3b=0.
- ! larray3b(i0:i1,j0:j1,:)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray3a=0.
- ! larray3a( i0-x_halo:i0-1, j0:j1, : ) = DistGrid%neighbors(1) ! west halo
- ! larray3a( i1+1:i1+x_halo, j0:j1, : ) = DistGrid%neighbors(3) ! east halo
- ! larray3a( i0:i1, j1+1:j1+x_halo, : ) = DistGrid%neighbors(2) ! north halo
- ! larray3a( i0:i1, j0-x_halo:j0-1, : ) = DistGrid%neighbors(4) ! south halo
- ! larray3a(i0:i1,j0:j1,:)=real(myid) ! interior
- ! where (larray3a == MPI_PROC_NULL) larray3a=0. !if no neighbor
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray3b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! compare (diff should be 0)
- ! larray3b=larray3a-larray3b
-
- ! if (maxval(abs(larray3b)) /= 0.) then
- ! print*, "test update_halo 3d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 3d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
-
- ! ! CLEANUP
- ! deallocate(larray3a,larray3b)
-
- ! call par_barrier()
-
- ! !----------------------
- ! ! test UPDATE_HALO_4D_R
- ! !----------------------
- ! ! Allocate 4D
- ! allocate( larray4a( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! allocate( larray4b( i0-x_halo:i1+x_halo, j0-x_halo:j1+x_halo, levels, trac) )
- ! ! array to update : in halo set to 0, elsewhere to myid
- ! larray4b=0.
- ! larray4b(i0:i1,j0:j1,:,:)=real(myid)
- ! ! test array : fill halo with neighbors rank
- ! larray4a=0.
- ! larray4a( i0-x_halo:i0-1, j0:j1, :,: ) = DistGrid%neighbors(1) ! west halo
- ! larray4a( i1+1:i1+x_halo, j0:j1, :,: ) = DistGrid%neighbors(3) ! east halo
- ! larray4a( i0:i1, j1+1:j1+x_halo, :,: ) = DistGrid%neighbors(2) ! north halo
- ! larray4a( i0:i1, j0-x_halo:j0-1, :,: ) = DistGrid%neighbors(4) ! south halo
- ! larray4a(i0:i1,j0:j1,:,:)=real(myid) ! interior
- ! where (larray4a == MPI_PROC_NULL) larray4a=0. !if no neighbor
- ! ! update
- ! CALL UPDATE_HALO( DISTGRID, larray4b, x_halo, status)
- ! IF_NOTOK_RETURN(status=1)
- ! ! compare (diff should be 0)
- ! larray4b=larray4a-larray4b
- ! if (maxval(abs(larray4b)) /= 0.) then
- ! print*, "test update_halo 4d FAILED"
- ! status=1
- ! else
- ! print*, "test update_halo 4d PASSED"
- ! end if
- ! IF_NOTOK_RETURN(status=1)
- ! ! CLEANUP
- ! deallocate(larray4a,larray4b)
-
- ! call par_barrier()
- status=0
-
- END SUBROUTINE TESTCOMM
- !EOC
- END MODULE DOMAIN_DECOMP
|