machine-ecmwf-cca-ifort.rc 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. !===============================================
  2. ! compiler information (Intel compiler [ifort])
  3. !===============================================
  4. ! Ifort compiler: do not include a compiler.rc, since CRAY uses ftn
  5. ! wrapper. Just set flags here. Flags dedicated to hardware are normally
  6. ! already included in the ftn wrapper.
  7. compiler.fc : ftn
  8. compiler.f77 : ${compiler.fc}
  9. compiler.fc.openmp : ${compiler.fc}
  10. mpi.compiler.fc : ${compiler.fc}
  11. mpi.compiler.fc.openmp : ${compiler.fc}
  12. ! ifort (IFORT) 14.0.1 20131008 or 15.0.1 20141023
  13. compiler.getversion_flag : --version
  14. compiler.flags.default.fflags : -warn declarations -traceback
  15. compiler.flags.default.ldflags :
  16. compiler.flags.real8.fflags : -real-size 64
  17. compiler.flags.real8.ldflags :
  18. compiler.flags.mpi.fflags :
  19. compiler.flags.mpi.ldflags :
  20. compiler.flags.openmp.fflags : -fopenmp
  21. compiler.flags.openmp.ldflags :
  22. compiler.flags.optim-none.fflags : -O0
  23. compiler.flags.optim-none.ldflags :
  24. ! for bit reproducibility (note: '-xCORE-AVX-I' is on by default, since hardware related)
  25. compiler.flags.optim-strict.fflags : -O2 -fp-model strict -fp-model source
  26. compiler.flags.optim-strict.ldflags :
  27. compiler.flags.optim-fast.fflags : -fast
  28. compiler.flags.optim-fast.ldflags : -fast
  29. compiler.flags.optim-vfast.fflags :
  30. compiler.flags.optim-vfast.ldflags :
  31. ! runtime checks
  32. compiler.flags.check-all.fflags : -check bounds,uninit -ftrapuv -fpe0 -fp-model strict -fp-model source
  33. !compiler.flags.check-all.fflags : -check bounds -ftrapuv -fpe0
  34. compiler.flags.check-all.ldflags :
  35. ! copied from pycasso-compiler-ifort-12.1.rc
  36. compiler.flags.debug.fflags : -g -debug all -fp-model strict -fp-model source
  37. compiler.flags.debug.ldflags :
  38. !===============================================
  39. ! libraries
  40. !===============================================
  41. ! fnt is a wrapper, which already knows all libraries locations. Then we just need to specify the flags here.
  42. ! Z library (used for compression in HDF)
  43. compiler.lib.z.fflags :
  44. compiler.lib.z.libs : -lz
  45. ! JPEG library (used for compression in HDF)
  46. compiler.lib.jpeg.fflags :
  47. compiler.lib.jpeg.libs : -ljpeg
  48. ! SZ library (used for compression in HDF)
  49. SZIP_VERSION :
  50. SZIP_HOME :
  51. compiler.lib.sz.fflags :
  52. compiler.lib.sz.libs :
  53. ! HDF4 library (without netcdf interface)
  54. HDF_VERSION :
  55. HDF_HOME : ${HDF_DIR}
  56. compiler.lib.hdf4.fflags : -I${HDF_HOME}/include
  57. compiler.lib.hdf4.libs : -L${HDF_HOME}/lib -lmfhdf -ldf
  58. ! HDF5 library:
  59. HDF5_VERSION :
  60. HDF5_HOME :
  61. compiler.lib.hdf5.fflags :
  62. compiler.lib.hdf5.libs : -lhdf5_hl -lhdf5
  63. ! HDF5 library with parallel features enabled:
  64. HDF5_PAR_VERSION :
  65. HDF5_PAR_HOME :
  66. compiler.lib.hdf5_par.fflags :
  67. compiler.lib.hdf5_par.libs : -lhdf5_hl -lhdf5
  68. ! NetCDF library:
  69. NETCDF_VERSION :
  70. NETCDF_HOME :
  71. compiler.lib.netcdf.fflags :
  72. compiler.lib.netcdf.libs : -lnetcdff -lnetcdf
  73. ! NetCDF4 library:
  74. NETCDF4_VERSION :
  75. NETCDF4_HOME :
  76. compiler.lib.netcdf4.fflags :
  77. compiler.lib.netcdf4.libs : -lnetcdff -lnetcdf
  78. ! NetCDF4 library with parallel features enabled:
  79. NETCDF4_PAR_VERSION :
  80. NETCDF4_PAR_HOME :
  81. compiler.lib.netcdf4_par.fflags :
  82. compiler.lib.netcdf4_par.libs : -lnetcdff -lnetcdf
  83. ! UDUNITS v1.x library (not available)
  84. UDUNITS_HOME :
  85. compiler.lib.udunits1.fflags : -I${UDUNITS_HOME}/include
  86. compiler.lib.udunits1.libs : -L${UDUNITS_HOME}/lib -ludunits
  87. ! UDUNITS v2.x library (available through module, nothing to specify here)
  88. compiler.lib.udunits2.fflags :
  89. compiler.lib.udunits2.libs :
  90. ! OASIS3 library ( ${ECEARTH} is defined in your environment )
  91. ! Bother with this only if we are within EC-Earth
  92. #if "oasis3" in "${my.tm5.define}":
  93. OASIS3_ARCH : aix
  94. OASIS3_MPIV : MPI1
  95. OASIS3_HOME : ${ECEARTH}/oasis3/prism_2-5/prism/${OASIS3_ARCH}
  96. compiler.lib.oasis3.fflags : -I${OASIS3_HOME}/build/lib/psmile.${OASIS3_MPIV}
  97. compiler.lib.oasis3.libs : -L${OASIS3_HOME}/lib -lpsmile.${OASIS3_MPIV} -lmpp_io
  98. #endif
  99. ! MPI library
  100. compiler.lib.mpi.fflags :
  101. compiler.lib.mpi.libs :
  102. ! GRIBEX library
  103. compiler.lib.gribex.fflags :
  104. compiler.lib.gribex.libs :
  105. ! GRIB-API library
  106. compiler.lib.grib_api.fflags : ${GRIB_API_INCLUDE}
  107. compiler.lib.grib_api.libs : ${GRIB_API_LIB}
  108. ! LAPACK library:
  109. LAPACK_LIB_DIR : /opt/intel/composer_xe_2015.1.133/mkl/lib/intel64
  110. compiler.lib.lapack.fflags :
  111. compiler.lib.lapack.libs : -L${LAPACK_LIB_DIR} -lmkl_intel_lp64 -lmkl_core -lmkl_sequential
  112. !===============================================================
  113. ! SETTINGS FOR BATCH SCHEDULER (qsub,bsub,loadleveler,slurm,pbs)
  114. !===============================================================
  115. queue : pbs
  116. ! passed directly to qsub command
  117. queue.pbs.submit.options :
  118. ! If set to 2, we have access to 72 (logical) cores per node, if set to 1,
  119. ! only to the 36 physical cores. In both cases, 120 GB per node are available.
  120. hyperthread : 2
  121. ! number of tasks (trap non-MPI cases)
  122. #if "${par.mpi}" in ["T","True"] :
  123. my.queue.ntask : ${par.ntask}
  124. #else
  125. my.queue.ntask : 1
  126. #endif
  127. ! number of threads (trap non-OpenMP cases)
  128. #if "${par.openmp}" in ["T","True"] :
  129. my.queue.nthread : ${par.nthread}
  130. #else
  131. my.queue.nthread : 1
  132. #endif
  133. !------------------------------------------------------------------
  134. ! PBS queue options: if set to <none>, it is skipped, to <auto> it is set in
  135. ! the python script. If empty it is used as 'option without
  136. ! argument'.
  137. !
  138. ! "l" option has special treatment: it is seen as a space-
  139. ! separated list of options itself, and is skipped if empty.
  140. !
  141. ! See bin/submit_tm5_tools.py/QueueOptions_PBS for details.
  142. !-----------------------------------------------
  143. ! ** queue options with same value for ALL steps
  144. !-----------------------------------------------
  145. ! space seperated list:
  146. queue.pbs.options.all : j m l
  147. ! shell (DO NOT USE IT! KEPT HERE TO REMEMBER)
  148. queue.pbs.option.all.S : /opt/pbs/default/bin/pbs_python
  149. ! join output/error log: oe (joined in out) | eo (joined in err) | n (not joined)
  150. queue.pbs.option.all.j : oe
  151. ! when to send emails : a (abort) | b (begin) | e (end) | n (never)
  152. queue.pbs.option.all.m : n
  153. ! pass environment (ECMWF doc says to be careful)
  154. queue.pbs.option.all.V :
  155. ! account (to overwrite default one attached to $USER)
  156. #if ("${my.queue.account}" == ""):
  157. queue.pbs.option.all.l :
  158. #else
  159. queue.pbs.option.all.l : EC_billing_account=${my.queue.account}
  160. #endif
  161. !------------------------------------------------
  162. ! ** queue options with different values per step
  163. !
  164. ! To set a walltime request (which can lower the waiting in the queue), add walltime=<hh:mm:ss> to the list *run.l:
  165. !
  166. !------------------------------------------------
  167. ! list options per step:
  168. queue.pbs.options.init : N q o l
  169. queue.pbs.options.run : N q o l
  170. queue.pbs.options.done : N q o
  171. ! ~~~~~ step init options
  172. queue.pbs.option.init.q : ns
  173. queue.pbs.option.init.N : ${job.name}_init
  174. queue.pbs.option.init.o : <auto>
  175. queue.pbs.option.init.l : EC_ecfs=1
  176. ! ~~~~~ step done options
  177. queue.pbs.option.done.q : ns
  178. queue.pbs.option.done.N : ${job.name}_done
  179. queue.pbs.option.done.o : <auto>
  180. queue.pbs.option.done.W : block=true
  181. ! ~~~~~ step run options
  182. queue.pbs.option.run.N : ${job.name}_run
  183. queue.pbs.option.run.o : <auto>
  184. ! check if run step's gonna be fractional (nf) or full (nq) node occupancy,
  185. ! which depends on: hyperthreading (1 or 2), ntasks and nthreads
  186. #if (${my.queue.ntask}*${my.queue.nthread} == 1 ) :
  187. queue.pbs.option.run.q : ns
  188. module.cmd:
  189. #elif (${my.queue.ntask}*${my.queue.nthread} > ${hyperthread}*18 ) :
  190. queue.pbs.option.run.q : np
  191. module.cmd:
  192. #else
  193. queue.pbs.option.run.q : nf
  194. ! NOTE: the module command, even with the qualified path, is not found in the
  195. ! python script. It is here just to put mpiexec on the $PATH - so we revert to
  196. ! using the fully qualified path for mpiexec hereafter.
  197. module.cmd:
  198. !module.cmd: /opt/modules/3.2.6.7/bin/modulecmd bash load cray-snplauncher
  199. #endif
  200. #if (${my.queue.nthread} > 1):
  201. queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_threads_per_task=${my.queue.nthread} EC_hyperthreads=${hyperthread}
  202. #else
  203. queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_hyperthreads=${hyperthread}
  204. #endif
  205. queue.pbs.option.run.W : block=true
  206. ! TODO if needed: Resources (thru -l EC_memory_per_task)
  207. !queue.pbs.option.run.resources : ${my.resource.memory}
  208. !===============================================
  209. ! make
  210. !===============================================
  211. ! the setup script will insert the 'build.jobs' specified in the expert.rc
  212. ! or passed as argument to the setup script:
  213. maker : gmake -j %{build.jobs}
  214. ! Submit compilation with a job script?
  215. ! - if F (default), pycasso calls the ${maker}
  216. ! - if T, then compilation is submitted according to "submit.to" key (default
  217. ! of which is set in expert.rc, and can be overwritten at the CLI): either "queue", or foreground.
  218. !
  219. ! "my.queue.make" is set in the main rc file (so user can switch it on/off from there).
  220. !
  221. build.make.submit : ${my.queue.make}
  222. ! sub options: assume that %{build.jobs} is less than 24 (default is set in expert, probably to 8)
  223. queue.pbs.options.build : N q o l
  224. queue.pbs.option.build.q : nf
  225. queue.pbs.option.build.N : build-tm
  226. queue.pbs.option.build.o : <auto>
  227. queue.pbs.option.build.l : EC_total_tasks=${build.jobs} EC_hyperthreads=1
  228. !==========================================================================
  229. ! MPI runner (LUCKILY, THE ENVIRONMENT VARIABLES ARE **NOT** EXPANDED UNTIL
  230. ! SCRIPTS ARE SUBMITTED). Depends on:
  231. ! (1) pure openMP, pure MPI or hybrid runs
  232. ! (2) node occupancy: fractional or full (see above)
  233. !==========================================================================
  234. #if "${queue.pbs.option.run.q}" == "np":
  235. mpirun.command : aprun
  236. #if (${my.queue.nthread} > 1):
  237. ! Hybrid (TODO: does it covers pure openMP?)
  238. mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -d $OMP_NUM_THREADS -j $EC_hyperthreads
  239. #else
  240. !pure MPI
  241. mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads
  242. #endif
  243. #else
  244. mpirun.command : /opt/cray/snplauncher/6.3.1/bin/mpiexec
  245. ! pure MPI only. TODO: hybrid, and pure openMP
  246. mpirun.args : -n $EC_total_tasks
  247. #endif
  248. ! name of command and host files (leave empty to not write):
  249. mpirun.cmdfile :
  250. mpirun.hostfile :
  251. !===============================================
  252. ! debugger (FIXME)
  253. !===============================================
  254. ! debugger type: totalview | idb | kdbg
  255. debugger : totalview
  256. ! command for debugger:
  257. debugger.command : totalview -searchPath=${build.sourcedir}
  258. !debugger.command : totalview
  259. !===============================================
  260. ! model data
  261. !===============================================
  262. ! the user scratch directory:
  263. my.scratch : ${SCRATCH}
  264. ! base path to various data files (static):
  265. my.data.dir : /perm/ms/nl/nm6/TM5_INPUT
  266. ! permanent archives to search for meteo files:
  267. #if "${my.meteo.format}" == "tm5-nc"
  268. my.meteo.search : ecfs:/nlh/TM/meteo-nc
  269. #else
  270. my.meteo.search : ecfs:/nlh/TM/meteo
  271. #endif
  272. ! extra install tasks:
  273. my.install.tasks :