pycasso-machine-cineca-sp6.rc 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. !
  2. ! compiler information
  3. !
  4. ! template settings for IBM xlf compiler:
  5. #include base/${my.branch}/rc/pycasso-compiler-xlf-12.1.rc
  6. ! Suppress warnings on Cineca/SP6:
  7. my.default.fflags : -qsuppress=1501-245
  8. !
  9. ! libraries
  10. !
  11. ! Z library (used for compression in HDF4)
  12. compiler.lib.z.fflags :
  13. compiler.lib.z.libs : -L${ZLIB_LIB} -lz
  14. ! JPEG library (used for compression in HDF4)
  15. compiler.lib.jpeg.fflags :
  16. compiler.lib.jpeg.libs : -L${LIBJPEG_LIB} -ljpeg
  17. ! SZ library (used for compression in HDF4)
  18. compiler.lib.sz.fflags :
  19. compiler.lib.sz.libs : -L${SZLIB_HOME}/lib -lsz
  20. ! HDF4 library:
  21. compiler.lib.hdf4.fflags : -I${HDF4_INC}
  22. compiler.lib.hdf4.libs : -L${HDF4_LIB} -lmfhdf -ldf
  23. ! NetCDF libraries:
  24. NETCDF_VERSION : 4.0.1
  25. !
  26. NETCDF_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}/xl--10.1
  27. compiler.lib.netcdf.fflags : -I${NETCDF_HOME}/include
  28. compiler.lib.netcdf.libs : -L${NETCDF_HOME}/lib -lnetcdf
  29. !
  30. NETCDF4_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}_ser/xl--10.1
  31. compiler.lib.netcdf4.fflags : -I${NETCDF4_HOME}/include
  32. compiler.lib.netcdf4.libs : -L${NETCDF4_HOME}/lib -lnetcdf
  33. !
  34. NETCDF4_PAR_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}_gpfs/xl--10.1
  35. compiler.lib.netcdf4_par.fflags : -I${NETCDF4_PAR_HOME}/include
  36. compiler.lib.netcdf4_par.libs : -L${NETCDF4_PAR_HOME}/lib -lnetcdf
  37. ! HDF5 libraries:
  38. HDF5_VERSION : 1.8.4
  39. !
  40. HDF5_HOME : /cineca/prod/libraries/hdf5/${HDF5_VERSION}_ser/xl--10.1
  41. compiler.lib.hdf5.fflags :
  42. compiler.lib.hdf5.libs : -L${HDF5_HOME}/lib -lhdf5_hl -lhdf5
  43. !
  44. HDF5_PAR_HOME : /cineca/prod/libraries/hdf5/${HDF5_VERSION}_par/xl--10.1
  45. compiler.lib.hdf5_par.fflags :
  46. compiler.lib.hdf5_par.libs : -L${HDF5_PAR_HOME}/lib -lhdf5_hl -lhdf5
  47. ! MPI library: automatically included using 'mp'xlf wrapper
  48. compiler.lib.mpi.fflags :
  49. compiler.lib.mpi.libs :
  50. ! MPI library: automatically included using xlf'_r' wrapper
  51. compiler.lib.openmp.fflags :
  52. compiler.lib.openmp.libs :
  53. !! GRIB library:
  54. !compiler.lib.grib.fflags :
  55. !compiler.lib.grib.libs : ${EMOS_LIB}
  56. !! Lapack library:
  57. !compiler.lib.lapack.fflags :
  58. !compiler.lib.lapack.libs : ${LAPACK_LIB}
  59. ! TotalView memory debugging: (DOES NOT WORK ON CINECA YET)
  60. !TV_VERSION : 8.7.0-4
  61. !TV_ARCH : rs6000
  62. !TV_HOME : /cineca/prod/tools/totalview/${TV_VERSION}/binary/toolworks/totalview.${TV_VERSION}/${TV_ARCH}
  63. !TV_HOME_MR : ${CCU_DATA}/opt/totalview.${TV_VERSION}/${TV_ARCH}
  64. !compiler.lib.tv.fflags :
  65. !compiler.lib.tv.libs : -L${TV_HOME_MR}/lib -L${TV_HOME}/lib ${TV_HOME}/lib/aix_malloctype64_5.o
  66. !
  67. ! settings for LoadLeveler queue
  68. !
  69. !
  70. ! loadleveler classes
  71. !
  72. ! o llclass
  73. ! o https://hpc.cineca.it/docs/startup-guides/SpClasses
  74. !
  75. ! +-----------+---------+------------+-----+---------+---------------------+
  76. ! | | max | max | max | default | possible |
  77. ! |queue name | Tasks | wall time | mem (Gby) | wait time |
  78. ! +-----------+---------+------------+-----+---------+---------------------+
  79. ! |debug 32 10 min 7 0.5 minutes |
  80. ! | |
  81. ! |serial 4 6 days 96 2 days |
  82. ! |small 64 6 hours 56 0.5 hours |
  83. ! |parallel 9728 6 hours 112 1 hours |
  84. ! |longpar 1024 1 day 56 1 days |
  85. ! | |
  86. ! |monster 9728 6 days 112 1 only special |
  87. ! | |
  88. ! |longdebug |
  89. ! |archive 1 4 hours - - for "cart" only |
  90. ! |private only internal use |
  91. ! +-----------+---------+------------+-----+---------+---------------------+
  92. !
  93. ! If not defined, the class is set automically given the requested
  94. ! number of cpu's and the wall_clock_limit .
  95. ! However, for TM5 we explicitly need to the define class 'archive'
  96. ! for the 'init' and 'done' steps of the jobs, therefore we need
  97. ! to define a 'class' for the 'run' step explicitly too since it
  98. ! remains 'archive' otherwise. Now set to 'serial', but the submit
  99. ! command will probalby re-set it to something else if the number
  100. ! of cpu's or the wall_clock_limit do not fit for this class.
  101. !
  102. ! Architecture:
  103. ! 168 nodes ; per node:
  104. ! - 128 Gb memory (4 Gb/core ?)
  105. ! - 32 cores ;
  106. ! - ST (Single Thread) mode : 1 'cpu'/core
  107. ! - SMT (Symmetric Multi-Threading) mode : 2 cpu/core
  108. !
  109. ! In theory 2 Gb memory per cpu, but errors about limit of 650 Mb .
  110. !
  111. ! MPI/OpenMP following the "User Guide 2010" :
  112. ! o serial job:
  113. ! # @ job_type = serial
  114. ! o MPI job: 32 tasks, each task on 1 core
  115. ! # @ job_type = parallel
  116. ! # @ total_tasks = 32
  117. ! # @ task_affinity = core(1)
  118. ! o OpenMP job: 1 task on 32 cpu's, 32 threads
  119. ! # @ job_type = serial
  120. ! # @ task_affinity = cpu(32)
  121. ! # @ parallel_threads = 32
  122. ! o MPI/OpenMP job: 8 MPI tasks, each task running on
  123. ! 4 cores in ST mode providing 4 OpenMP threads/task :
  124. ! # @ job_type = parallel
  125. ! # @ total_tasks = 8
  126. ! # @ task_affinity = core(4)
  127. ! # @ parallel_threads = 4
  128. ! o MPI/OpenMP job: 8 MPI tasks, each task running on
  129. ! 4 cores in SMT mode (8 cpu's) providing 8 OpenMP threads/task :
  130. ! # @ job_type = parallel
  131. ! # @ total_tasks = 8
  132. ! # @ task_affinity = cpu(8)
  133. ! # @ parallel_threads = 8
  134. !
  135. ! queue system (bsub,loadleveler)
  136. queue : loadleveler
  137. ! passed directly:
  138. queue.ll.submit.options :
  139. ! number of processors per node on this machine:
  140. queue.ll.npe_per_node : 32
  141. ! ** queue options with same value for all steps:
  142. ! space seperated list:
  143. queue.ll.options.default : job_name notification initialdir environment
  144. ! job name:
  145. queue.ll.option.default.job_name : ${job.name}
  146. ! when to send emails : always | error | start | never | complete*
  147. !queue.ll.option.default.notification : never
  148. queue.ll.option.default.notification : error
  149. ! run directory:
  150. queue.ll.option.default.initialdir : ${rundir}
  151. ! current path:
  152. queue.ll.option.default.environment : PATH = ${PATH} ; DISPLAY = ${DISPLAY}
  153. ! ** queue options with different values per step:
  154. ! same for all:
  155. my.ll.options.steps : output error wall_clock_limit class job_type task_affinity parallel_threads
  156. ! list options per step:
  157. queue.ll.options.init : step_name ${my.ll.options.steps} queue
  158. queue.ll.options.run : step_name dependency ${my.ll.options.steps} total_tasks resources queue
  159. queue.ll.options.done : step_name dependency ${my.ll.options.steps} queue
  160. ! ~ step init options
  161. queue.ll.option.init.step_name : init
  162. queue.ll.option.init.class : archive
  163. queue.ll.option.init.output : <auto>
  164. queue.ll.option.init.error : <auto>
  165. queue.ll.option.init.job_type : serial
  166. queue.ll.option.init.task_affinity : cpu(1) ! serial runs always in SMT mode
  167. queue.ll.option.init.parallel_threads : 1
  168. queue.ll.option.init.wall_clock_limit : 0:30:00
  169. queue.ll.option.init.queue :
  170. ! ~ step run options
  171. queue.ll.option.run.step_name : run
  172. #if 'init' in "${job.steps}".split() :
  173. queue.ll.option.run.dependency : (init == 0)
  174. #else
  175. queue.ll.option.run.dependency : <none>
  176. #endif
  177. queue.ll.option.run.output : <auto>
  178. queue.ll.option.run.error : <auto>
  179. !
  180. ! parallel (MPI) run ?
  181. #if "${par.mpi}" in ["T","True"] :
  182. queue.ll.option.run.class : serial ! up to 4 tasks is still called 'serial' ...
  183. queue.ll.option.run.job_type : parallel
  184. queue.ll.option.run.total_tasks : ${par.ntask}
  185. #else
  186. queue.ll.option.run.class : serial
  187. queue.ll.option.run.job_type : serial
  188. queue.ll.option.run.total_tasks : <none>
  189. #endif
  190. !
  191. ! SMT or ST mode ?
  192. queue.ll.option.run.task_affinity : cpu(${par.nthread}) ! SMT mode
  193. !queue.ll.option.run.task_affinity : core(${par.nthread}) ! ST mode
  194. !
  195. ! number of OpenMP threads:
  196. queue.ll.option.run.parallel_threads : ${par.nthread}
  197. !
  198. queue.ll.option.run.wall_clock_limit : 6:00:00
  199. queue.ll.option.run.resources : ConsumableMemory(1Gb)
  200. queue.ll.option.run.queue :
  201. ! ~ step done options
  202. queue.ll.option.done.step_name : done
  203. #if 'run' in "${job.steps}".split() :
  204. queue.ll.option.done.dependency : (run == 0)
  205. #else
  206. queue.ll.option.done.dependency : <done>
  207. #endif
  208. queue.ll.option.done.output : <auto>
  209. queue.ll.option.done.error : <auto>
  210. queue.ll.option.done.class : archive
  211. queue.ll.option.done.job_type : serial
  212. queue.ll.option.done.task_affinity : cpu(1) ! serial runs always in SMT mode
  213. queue.ll.option.done.parallel_threads : 1
  214. queue.ll.option.done.wall_clock_limit : 0:30:00
  215. queue.ll.option.done.queue :
  216. !
  217. ! maker
  218. !
  219. ! make command;
  220. ! the setup script will insert the 'build.jobs' specified in the expert.rc
  221. ! or passed as argument to the setup script:
  222. !
  223. maker : gmake -j %{build.jobs}
  224. !
  225. ! MPI runner
  226. !
  227. ! Parallel Operating Environment (POE)
  228. ! Common arguments:
  229. ! -procs procs # number of processors
  230. ! -cmdfile <commandfile>
  231. ! -hostfile <hostfile>
  232. ! -labelio {yes | no} # label standard output lines with pe id
  233. !
  234. mpirun.command : poe
  235. mpirun.args : -labelio yes -procs ${par.ntask} -cmdfile ${mpirun.cmdfile}
  236. ! name of command and host files (empty for not written):
  237. ! (hostfile is not necessary on cineca)
  238. mpirun.cmdfile : ${my.basename}.cmdfile
  239. mpirun.hostfile :
  240. !
  241. ! debugger
  242. !
  243. ! debugger type: totalview | idb | kdbg
  244. debugger : totalview
  245. ! command for debugger:
  246. debugger.command : totalview -searchPath=${build.sourcedir}
  247. !
  248. ! model data
  249. !
  250. ! per-user scratch directory:
  251. my.scratch : ${CINECA_SCRATCH}
  252. ! base path to various data files:
  253. !my.data.dir : ${TM5_DATA}/TM
  254. ! when running in queue, only scratch dir is available:
  255. my.data.dir : ${CINECA_SCRATCH}/TM
  256. ! local temporary meteo archive, shared by all users:
  257. my.meteo.dir : ${my.scratch}/tmm-buf/${my.meteo.class}
  258. ! permanent archives to search for meteo files:
  259. !my.meteo.search : ${TM5_DATA}/TM/meteo
  260. my.meteo.search : cart:TM-meteo
  261. ! extra install tasks:
  262. my.install.tasks : TM_input
  263. ! specify how to install TM_input on runtime scratch:
  264. TM_input.install.dir : ${my.data.dir}
  265. TM_input.install.arch : ${TM5_DATA}/TM
  266. TM_input.install.rsync : input