machine-ecmwf-cca-cray.rc 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. !===============================================
  2. ! compiler information (CRAY compiler)
  3. !===============================================
  4. ! Cray compiler: do not include a compiler.rc, since CRAY uses ftn
  5. ! wrapper. Just set flag here. Flags dedicated to hardware are normally
  6. ! already included in the ftn wrapper.
  7. compiler.fc : ftn
  8. compiler.f77 : ${compiler.fc}
  9. compiler.fc.openmp : ${compiler.fc}
  10. mpi.compiler.fc : ${compiler.fc}
  11. mpi.compiler.fc.openmp : ${compiler.fc}
  12. ! Cray Fortran : Version 8.2.2 Tue Jun 24, 2014 09:47:40
  13. compiler.getversion_flag : -V
  14. ! openMP is on by default
  15. #if "${par.openmp}" in ["T","True"] :
  16. my.default.fflags :
  17. #else
  18. my.default.fflags : -hnoomp
  19. #endif
  20. compiler.flags.default.fflags : ${my.default.fflags}
  21. compiler.flags.default.ldflags :
  22. compiler.flags.real8.fflags : -s real64
  23. compiler.flags.real8.ldflags :
  24. compiler.flags.mpi.fflags :
  25. compiler.flags.mpi.ldflags :
  26. compiler.flags.openmp.fflags :
  27. compiler.flags.openmp.ldflags :
  28. ! Not recommanded by Cray
  29. compiler.flags.optim-none.fflags : -O0
  30. compiler.flags.optim-none.ldflags :
  31. ! Optim-srict:
  32. ! It uses the -O2 default optimiztion, which is equivalent to -O3 in most other compiler.
  33. !
  34. ! Note that -hflex_mp=intolerant,strict,conservative,default, OR tolerant.
  35. !
  36. ! Note that "hflex_mp=intolerant is the option most certain to provide bit reproducibility,
  37. ! although it also has the highest impact on performance. hflex_mp=conservative has
  38. ! comparatively little impact on performance, but is not strict enough for some applications"
  39. !
  40. ! Conclusion: replace 'conservative' with 'intolerant' for bit reproducibility
  41. compiler.flags.optim-strict.fflags : -hflex_mp=conservative –hadd_paren –hfp1
  42. compiler.flags.optim-strict.ldflags :
  43. compiler.flags.optim-fast.fflags : -O3 -hfp3
  44. compiler.flags.optim-fast.ldflags : -O3
  45. compiler.flags.optim-vfast.fflags :
  46. compiler.flags.optim-vfast.ldflags :
  47. ! runtime checks: floating point operations and check bounds
  48. compiler.flags.check-all.fflags : -Ktrap=fp -Rb
  49. compiler.flags.check-all.ldflags :
  50. ! debug: "-eD" is same as "-g -m2 -rl -R bcdsp". Add -m0 for exhaustive messaging.
  51. compiler.flags.debug.fflags : -eD
  52. compiler.flags.debug.ldflags :
  53. !===============================================
  54. ! libraries
  55. !===============================================
  56. ! fnt is a wrapper, which already knows all libraries locations. Then we just need to specify the flags here.
  57. ! Z library (used for compression in HDF)
  58. compiler.lib.z.fflags :
  59. compiler.lib.z.libs : -lz
  60. ! JPEG library (used for compression in HDF)
  61. compiler.lib.jpeg.fflags :
  62. compiler.lib.jpeg.libs : -ljpeg
  63. ! SZ library (used for compression in HDF)
  64. SZIP_VERSION :
  65. SZIP_HOME :
  66. compiler.lib.sz.fflags :
  67. compiler.lib.sz.libs :
  68. ! HDF4 library:
  69. HDF_VERSION :
  70. HDF_HOME :
  71. compiler.lib.hdf4.fflags :
  72. compiler.lib.hdf4.libs :
  73. ! HDF5 library:
  74. HDF5_VERSION :
  75. HDF5_HOME :
  76. compiler.lib.hdf5.fflags :
  77. compiler.lib.hdf5.libs : -lhdf5_hl -lhdf5
  78. ! HDF5 library with parallel features enabled:
  79. HDF5_PAR_VERSION :
  80. HDF5_PAR_HOME :
  81. compiler.lib.hdf5_par.fflags :
  82. compiler.lib.hdf5_par.libs : -lhdf5_hl -lhdf5
  83. ! NetCDF library:
  84. NETCDF_VERSION :
  85. NETCDF_HOME :
  86. compiler.lib.netcdf.fflags :
  87. compiler.lib.netcdf.libs : -lnetcdff -lnetcdf
  88. ! NetCDF4 library:
  89. NETCDF4_VERSION :
  90. NETCDF4_HOME :
  91. compiler.lib.netcdf4.fflags :
  92. compiler.lib.netcdf4.libs : -lnetcdff -lnetcdf
  93. ! NetCDF4 library with parallel features enabled:
  94. NETCDF4_PAR_VERSION :
  95. NETCDF4_PAR_HOME :
  96. compiler.lib.netcdf4_par.fflags :
  97. compiler.lib.netcdf4_par.libs : -lnetcdff -lnetcdf
  98. ! UDUNITS v1.x library (not available)
  99. UDUNITS_HOME :
  100. compiler.lib.udunits1.fflags : -I${UDUNITS_HOME}/include
  101. compiler.lib.udunits1.libs : -L${UDUNITS_HOME}/lib -ludunits
  102. ! UDUNITS v2.x library (available through module, nothing to specify here)
  103. compiler.lib.udunits2.fflags :
  104. compiler.lib.udunits2.libs :
  105. ! OASIS3 library ( ${ECEARTH} is defined in your environment )
  106. ! Bother with this only if we are within EC-Earth
  107. #if "oasis3" in "${my.tm5.define}":
  108. OASIS3_ARCH : aix
  109. OASIS3_MPIV : MPI1
  110. OASIS3_HOME : ${ECEARTH}/oasis3/prism_2-5/prism/${OASIS3_ARCH}
  111. compiler.lib.oasis3.fflags : -I${OASIS3_HOME}/build/lib/psmile.${OASIS3_MPIV}
  112. compiler.lib.oasis3.libs : -L${OASIS3_HOME}/lib -lpsmile.${OASIS3_MPIV} -lmpp_io
  113. #endif
  114. ! MPI library
  115. compiler.lib.mpi.fflags :
  116. compiler.lib.mpi.libs :
  117. ! GRIBEX library
  118. compiler.lib.gribex.fflags :
  119. compiler.lib.gribex.libs :
  120. ! GRIB-API library
  121. compiler.lib.grib_api.fflags : ${GRIB_API_INCLUDE}
  122. compiler.lib.grib_api.libs : ${GRIB_API_LIB}
  123. ! LAPACK library:
  124. compiler.lib.lapack.fflags :
  125. compiler.lib.lapack.libs :
  126. !===============================================================
  127. ! SETTINGS FOR BATCH SCHEDULER (qsub,bsub,loadleveler,slurm,pbs)
  128. !===============================================================
  129. queue : pbs
  130. ! passed directly to qsub command
  131. queue.pbs.submit.options :
  132. ! If set to 2, we have access to 72 (logical) cores per node, if set to 1,
  133. ! only to the 36 physical cores. In both cases, 120 GB per node are available.
  134. hyperthread : 2
  135. ! number of tasks (trap non-MPI cases)
  136. #if "${par.mpi}" in ["T","True"] :
  137. my.queue.ntask : ${par.ntask}
  138. #else
  139. my.queue.ntask : 1
  140. #endif
  141. ! number of threads (trap non-OpenMP cases)
  142. #if "${par.openmp}" in ["T","True"] :
  143. my.queue.nthread : ${par.nthread}
  144. #else
  145. my.queue.nthread : 1
  146. #endif
  147. !------------------------------------------------------------------
  148. ! PBS queue options: if set to <none>, it is skipped, to <auto> it is set in
  149. ! the python script. If empty it is used as 'option without
  150. ! argument'.
  151. !
  152. ! "l" option has special treatment: it is seen as a space-
  153. ! separated list of options itself, and is skipped if empty.
  154. !
  155. ! See bin/submit_tm5_tools.py/QueueOptions_PBS for details.
  156. !-----------------------------------------------
  157. ! ** queue options with same value for ALL steps
  158. !-----------------------------------------------
  159. ! space seperated list:
  160. queue.pbs.options.all : j m l
  161. ! shell (DO NOT USE IT! KEPT HERE TO REMEMBER)
  162. queue.pbs.option.all.S : /opt/pbs/default/bin/pbs_python
  163. ! join output/error log: oe (joined in out) | eo (joined in err) | n (not joined)
  164. queue.pbs.option.all.j : oe
  165. ! when to send emails : a (abort) | b (begin) | e (end) | n (never)
  166. queue.pbs.option.all.m : n
  167. ! pass environment (ECMWF doc says to be careful)
  168. queue.pbs.option.all.V :
  169. ! account (to overwrite default one attached to $USER)
  170. #if ("${my.queue.account}" == ""):
  171. queue.pbs.option.all.l :
  172. #else
  173. queue.pbs.option.all.l : EC_billing_account=${my.queue.account}
  174. #endif
  175. !------------------------------------------------
  176. ! ** queue options with different values per step
  177. !
  178. ! To set a walltime request (which can lower the waiting in the queue), add walltime=<hh:mm:ss> to the list *run.l:
  179. !
  180. !------------------------------------------------
  181. ! list options per step:
  182. queue.pbs.options.init : N q o l
  183. queue.pbs.options.run : N q o l
  184. queue.pbs.options.done : N q o
  185. ! ~~~~~ step init options
  186. queue.pbs.option.init.q : ns
  187. queue.pbs.option.init.N : ${job.name}_init
  188. queue.pbs.option.init.o : <auto>
  189. queue.pbs.option.init.l : EC_ecfs=1
  190. ! ~~~~~ step done options
  191. queue.pbs.option.done.q : ns
  192. queue.pbs.option.done.N : ${job.name}_done
  193. queue.pbs.option.done.o : <auto>
  194. queue.pbs.option.done.W : block=true
  195. ! ~~~~~ step run options
  196. queue.pbs.option.run.N : ${job.name}_run
  197. queue.pbs.option.run.o : <auto>
  198. ! check if run step's gonna be fractional (nf) or full (nq) node occupancy,
  199. ! which depends on: hyperthreading (1 or 2), ntasks and nthreads
  200. #if (${my.queue.ntask}*${my.queue.nthread} == 1 ) :
  201. queue.pbs.option.run.q : ns
  202. module.cmd:
  203. #elif (${my.queue.ntask}*${my.queue.nthread} > ${hyperthread}*18 ) :
  204. queue.pbs.option.run.q : np
  205. module.cmd:
  206. #else
  207. queue.pbs.option.run.q : nf
  208. ! NOTE: the module command, even with the qualified path, is not found in the
  209. ! python script. It is here just to put mpiexec on the $PATH - so we revert to
  210. ! using the fully qualified path for mpiexec hereafter.
  211. module.cmd:
  212. !module.cmd: /opt/modules/3.2.6.7/bin/modulecmd bash load cray-snplauncher
  213. #endif
  214. #if (${my.queue.nthread} > 1):
  215. queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_threads_per_task=${my.queue.nthread} EC_hyperthreads=${hyperthread}
  216. #else
  217. queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_hyperthreads=${hyperthread}
  218. #endif
  219. queue.pbs.option.run.W : block=true
  220. ! TODO if needed: Resources (thru -l EC_memory_per_task)
  221. !queue.pbs.option.run.resources : ${my.resource.memory}
  222. !===============================================
  223. ! make
  224. !===============================================
  225. ! the setup script will insert the 'build.jobs' specified in the expert.rc
  226. ! or passed as argument to the setup script, and add Makefile and target to
  227. ! the command:
  228. maker : gmake -j %{build.jobs}
  229. ! Submit compilation with a job script?
  230. ! - if F (default), pycasso calls the ${maker}
  231. ! - if T, then compilation is submitted according to "submit.to" key (default
  232. ! of which is set in expert.rc, and can be overwritten at the CLI): either "queue", or foreground.
  233. ! THEN you SHOULD call "setup_tm5 -m..." to wait that compilation is done.
  234. !
  235. ! "my.queue.make" is set in the main rc file (so user can switch it on/off from there).
  236. !
  237. build.make.submit : ${my.queue.make}
  238. ! sub options: assume that %{build.jobs} is less than 24 (default is set in expert, probably to 8)
  239. queue.pbs.options.build : N q o l
  240. queue.pbs.option.build.q : nf
  241. queue.pbs.option.build.N : build-tm
  242. queue.pbs.option.build.o : <auto>
  243. queue.pbs.option.build.l : EC_total_tasks=${build.jobs} EC_hyperthreads=1
  244. !==========================================================================
  245. ! MPI runner (LUCKILY, THE ENVIRONMENT VARIABLES ARE **NOT** EXPANDED UNTIL
  246. ! SCRIPTS ARE SUBMITTED). Depends on:
  247. ! (1) pure openMP, pure MPI or hybrid runs
  248. ! (2) node occupancy: fractional or full (see above)
  249. !==========================================================================
  250. #if "${queue.pbs.option.run.q}" == "np":
  251. mpirun.command : aprun
  252. #if (${my.queue.nthread} > 1):
  253. ! Hybrid (TODO: does it covers pure openMP?)
  254. mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -d $OMP_NUM_THREADS -j $EC_hyperthreads
  255. #else
  256. !pure MPI
  257. mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads
  258. #endif
  259. #else
  260. mpirun.command : /opt/cray/snplauncher/6.3.1/bin/mpiexec
  261. ! pure MPI only. TODO: hybrid, and pure openMP
  262. mpirun.args : -n $EC_total_tasks
  263. #endif
  264. ! name of command and host files (leave empty to not write):
  265. mpirun.cmdfile :
  266. mpirun.hostfile :
  267. !===============================================
  268. ! debugger (FIXME)
  269. !===============================================
  270. ! debugger type: totalview | idb | kdbg
  271. debugger : totalview
  272. ! command for debugger:
  273. debugger.command : totalview -searchPath=${build.sourcedir}
  274. !debugger.command : totalview
  275. !===============================================
  276. ! model data
  277. !===============================================
  278. ! the user scratch directory:
  279. my.scratch : ${SCRATCH}
  280. ! base path to various data files (static):
  281. my.data.dir : /perm/ms/nl/nm6/TM5_INPUT
  282. ! permanent archives to search for meteo files:
  283. my.meteo.search : ecfs:/nlh/TM/meteo-nc
  284. ! extra install tasks:
  285. my.install.tasks :