123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321 |
- !
- ! compiler information
- !
- ! template settings for IBM xlf compiler:
- #include base/${my.branch}/rc/pycasso-compiler-xlf-12.1.rc
- ! Suppress warnings on Cineca/SP6:
- my.default.fflags : -qsuppress=1501-245
- !
- ! libraries
- !
- ! Z library (used for compression in HDF4)
- compiler.lib.z.fflags :
- compiler.lib.z.libs : -L${ZLIB_LIB} -lz
- ! JPEG library (used for compression in HDF4)
- compiler.lib.jpeg.fflags :
- compiler.lib.jpeg.libs : -L${LIBJPEG_LIB} -ljpeg
- ! SZ library (used for compression in HDF4)
- compiler.lib.sz.fflags :
- compiler.lib.sz.libs : -L${SZLIB_HOME}/lib -lsz
- ! HDF4 library:
- compiler.lib.hdf4.fflags : -I${HDF4_INC}
- compiler.lib.hdf4.libs : -L${HDF4_LIB} -lmfhdf -ldf
- ! NetCDF libraries:
- NETCDF_VERSION : 4.0.1
- !
- NETCDF_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}/xl--10.1
- compiler.lib.netcdf.fflags : -I${NETCDF_HOME}/include
- compiler.lib.netcdf.libs : -L${NETCDF_HOME}/lib -lnetcdf
- !
- NETCDF4_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}_ser/xl--10.1
- compiler.lib.netcdf4.fflags : -I${NETCDF4_HOME}/include
- compiler.lib.netcdf4.libs : -L${NETCDF4_HOME}/lib -lnetcdf
- !
- NETCDF4_PAR_HOME : /cineca/prod/libraries/netcdf/${NETCDF_VERSION}_gpfs/xl--10.1
- compiler.lib.netcdf4_par.fflags : -I${NETCDF4_PAR_HOME}/include
- compiler.lib.netcdf4_par.libs : -L${NETCDF4_PAR_HOME}/lib -lnetcdf
- ! HDF5 libraries:
- HDF5_VERSION : 1.8.4
- !
- HDF5_HOME : /cineca/prod/libraries/hdf5/${HDF5_VERSION}_ser/xl--10.1
- compiler.lib.hdf5.fflags :
- compiler.lib.hdf5.libs : -L${HDF5_HOME}/lib -lhdf5_hl -lhdf5
- !
- HDF5_PAR_HOME : /cineca/prod/libraries/hdf5/${HDF5_VERSION}_par/xl--10.1
- compiler.lib.hdf5_par.fflags :
- compiler.lib.hdf5_par.libs : -L${HDF5_PAR_HOME}/lib -lhdf5_hl -lhdf5
- ! MPI library: automatically included using 'mp'xlf wrapper
- compiler.lib.mpi.fflags :
- compiler.lib.mpi.libs :
- ! MPI library: automatically included using xlf'_r' wrapper
- compiler.lib.openmp.fflags :
- compiler.lib.openmp.libs :
- !! GRIB library:
- !compiler.lib.grib.fflags :
- !compiler.lib.grib.libs : ${EMOS_LIB}
- !! Lapack library:
- !compiler.lib.lapack.fflags :
- !compiler.lib.lapack.libs : ${LAPACK_LIB}
- ! TotalView memory debugging: (DOES NOT WORK ON CINECA YET)
- !TV_VERSION : 8.7.0-4
- !TV_ARCH : rs6000
- !TV_HOME : /cineca/prod/tools/totalview/${TV_VERSION}/binary/toolworks/totalview.${TV_VERSION}/${TV_ARCH}
- !TV_HOME_MR : ${CCU_DATA}/opt/totalview.${TV_VERSION}/${TV_ARCH}
- !compiler.lib.tv.fflags :
- !compiler.lib.tv.libs : -L${TV_HOME_MR}/lib -L${TV_HOME}/lib ${TV_HOME}/lib/aix_malloctype64_5.o
- !
- ! settings for LoadLeveler queue
- !
- !
- ! loadleveler classes
- !
- ! o llclass
- ! o https://hpc.cineca.it/docs/startup-guides/SpClasses
- !
- ! +-----------+---------+------------+-----+---------+---------------------+
- ! | | max | max | max | default | possible |
- ! |queue name | Tasks | wall time | mem (Gby) | wait time |
- ! +-----------+---------+------------+-----+---------+---------------------+
- ! |debug 32 10 min 7 0.5 minutes |
- ! | |
- ! |serial 4 6 days 96 2 days |
- ! |small 64 6 hours 56 0.5 hours |
- ! |parallel 9728 6 hours 112 1 hours |
- ! |longpar 1024 1 day 56 1 days |
- ! | |
- ! |monster 9728 6 days 112 1 only special |
- ! | |
- ! |longdebug |
- ! |archive 1 4 hours - - for "cart" only |
- ! |private only internal use |
- ! +-----------+---------+------------+-----+---------+---------------------+
- !
- ! If not defined, the class is set automically given the requested
- ! number of cpu's and the wall_clock_limit .
- ! However, for TM5 we explicitly need to the define class 'archive'
- ! for the 'init' and 'done' steps of the jobs, therefore we need
- ! to define a 'class' for the 'run' step explicitly too since it
- ! remains 'archive' otherwise. Now set to 'serial', but the submit
- ! command will probalby re-set it to something else if the number
- ! of cpu's or the wall_clock_limit do not fit for this class.
- !
- ! Architecture:
- ! 168 nodes ; per node:
- ! - 128 Gb memory (4 Gb/core ?)
- ! - 32 cores ;
- ! - ST (Single Thread) mode : 1 'cpu'/core
- ! - SMT (Symmetric Multi-Threading) mode : 2 cpu/core
- !
- ! In theory 2 Gb memory per cpu, but errors about limit of 650 Mb .
- !
- ! MPI/OpenMP following the "User Guide 2010" :
- ! o serial job:
- ! # @ job_type = serial
- ! o MPI job: 32 tasks, each task on 1 core
- ! # @ job_type = parallel
- ! # @ total_tasks = 32
- ! # @ task_affinity = core(1)
- ! o OpenMP job: 1 task on 32 cpu's, 32 threads
- ! # @ job_type = serial
- ! # @ task_affinity = cpu(32)
- ! # @ parallel_threads = 32
- ! o MPI/OpenMP job: 8 MPI tasks, each task running on
- ! 4 cores in ST mode providing 4 OpenMP threads/task :
- ! # @ job_type = parallel
- ! # @ total_tasks = 8
- ! # @ task_affinity = core(4)
- ! # @ parallel_threads = 4
- ! o MPI/OpenMP job: 8 MPI tasks, each task running on
- ! 4 cores in SMT mode (8 cpu's) providing 8 OpenMP threads/task :
- ! # @ job_type = parallel
- ! # @ total_tasks = 8
- ! # @ task_affinity = cpu(8)
- ! # @ parallel_threads = 8
- !
- ! queue system (bsub,loadleveler)
- queue : loadleveler
- ! passed directly:
- queue.ll.submit.options :
- ! number of processors per node on this machine:
- queue.ll.npe_per_node : 32
- ! ** queue options with same value for all steps:
- ! space seperated list:
- queue.ll.options.default : job_name notification initialdir environment
- ! job name:
- queue.ll.option.default.job_name : ${job.name}
- ! when to send emails : always | error | start | never | complete*
- !queue.ll.option.default.notification : never
- queue.ll.option.default.notification : error
- ! run directory:
- queue.ll.option.default.initialdir : ${rundir}
- ! current path:
- queue.ll.option.default.environment : PATH = ${PATH} ; DISPLAY = ${DISPLAY}
- ! ** queue options with different values per step:
- ! same for all:
- my.ll.options.steps : output error wall_clock_limit class job_type task_affinity parallel_threads
- ! list options per step:
- queue.ll.options.init : step_name ${my.ll.options.steps} queue
- queue.ll.options.run : step_name dependency ${my.ll.options.steps} total_tasks resources queue
- queue.ll.options.done : step_name dependency ${my.ll.options.steps} queue
- ! ~ step init options
- queue.ll.option.init.step_name : init
- queue.ll.option.init.class : archive
- queue.ll.option.init.output : <auto>
- queue.ll.option.init.error : <auto>
- queue.ll.option.init.job_type : serial
- queue.ll.option.init.task_affinity : cpu(1) ! serial runs always in SMT mode
- queue.ll.option.init.parallel_threads : 1
- queue.ll.option.init.wall_clock_limit : 0:30:00
- queue.ll.option.init.queue :
- ! ~ step run options
- queue.ll.option.run.step_name : run
- #if 'init' in "${job.steps}".split() :
- queue.ll.option.run.dependency : (init == 0)
- #else
- queue.ll.option.run.dependency : <none>
- #endif
- queue.ll.option.run.output : <auto>
- queue.ll.option.run.error : <auto>
- !
- ! parallel (MPI) run ?
- #if "${par.mpi}" in ["T","True"] :
- queue.ll.option.run.class : serial ! up to 4 tasks is still called 'serial' ...
- queue.ll.option.run.job_type : parallel
- queue.ll.option.run.total_tasks : ${par.ntask}
- #else
- queue.ll.option.run.class : serial
- queue.ll.option.run.job_type : serial
- queue.ll.option.run.total_tasks : <none>
- #endif
- !
- ! SMT or ST mode ?
- queue.ll.option.run.task_affinity : cpu(${par.nthread}) ! SMT mode
- !queue.ll.option.run.task_affinity : core(${par.nthread}) ! ST mode
- !
- ! number of OpenMP threads:
- queue.ll.option.run.parallel_threads : ${par.nthread}
- !
- queue.ll.option.run.wall_clock_limit : 6:00:00
- queue.ll.option.run.resources : ConsumableMemory(1Gb)
- queue.ll.option.run.queue :
- ! ~ step done options
- queue.ll.option.done.step_name : done
- #if 'run' in "${job.steps}".split() :
- queue.ll.option.done.dependency : (run == 0)
- #else
- queue.ll.option.done.dependency : <done>
- #endif
- queue.ll.option.done.output : <auto>
- queue.ll.option.done.error : <auto>
- queue.ll.option.done.class : archive
- queue.ll.option.done.job_type : serial
- queue.ll.option.done.task_affinity : cpu(1) ! serial runs always in SMT mode
- queue.ll.option.done.parallel_threads : 1
- queue.ll.option.done.wall_clock_limit : 0:30:00
- queue.ll.option.done.queue :
- !
- ! maker
- !
- ! make command;
- ! the setup script will insert the 'build.jobs' specified in the expert.rc
- ! or passed as argument to the setup script:
- !
- maker : gmake -j %{build.jobs}
- !
- ! MPI runner
- !
- ! Parallel Operating Environment (POE)
- ! Common arguments:
- ! -procs procs # number of processors
- ! -cmdfile <commandfile>
- ! -hostfile <hostfile>
- ! -labelio {yes | no} # label standard output lines with pe id
- !
- mpirun.command : poe
- mpirun.args : -labelio yes -procs ${par.ntask} -cmdfile ${mpirun.cmdfile}
- ! name of command and host files (empty for not written):
- ! (hostfile is not necessary on cineca)
- mpirun.cmdfile : ${my.basename}.cmdfile
- mpirun.hostfile :
- !
- ! debugger
- !
- ! debugger type: totalview | idb | kdbg
- debugger : totalview
- ! command for debugger:
- debugger.command : totalview -searchPath=${build.sourcedir}
- !
- ! model data
- !
- ! per-user scratch directory:
- my.scratch : ${CINECA_SCRATCH}
- ! base path to various data files:
- !my.data.dir : ${TM5_DATA}/TM
- ! when running in queue, only scratch dir is available:
- my.data.dir : ${CINECA_SCRATCH}/TM
- ! local temporary meteo archive, shared by all users:
- my.meteo.dir : ${my.scratch}/tmm-buf/${my.meteo.class}
- ! permanent archives to search for meteo files:
- !my.meteo.search : ${TM5_DATA}/TM/meteo
- my.meteo.search : cart:TM-meteo
- ! extra install tasks:
- my.install.tasks : TM_input
- ! specify how to install TM_input on runtime scratch:
- TM_input.install.dir : ${my.data.dir}
- TM_input.install.arch : ${TM5_DATA}/TM
- TM_input.install.rsync : input
|