123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301 |
- !===============================================
- ! compiler information
- !===============================================
- ! template settings for GCC suite:
- #include base/${my.branch}/rc/pycasso-compiler-gfortran-4.4.7.rc
- ! Problem with (non-standard) system routines 'Exit_' and 'Sleep_' on ECMWF :
- ! these do not have the underscores here. Flag '-qnoextname' resets to the
- ! official names again, but let linking with HDF fail.
- ! Therefore, a marco __ecmwf__ is now defined to distuinguish in the code
- ! between the various XLF impelementations.
- !
- my.default.fflags : -WF,-D__ecmwf__
- !===============================================
- ! libraries
- !===============================================
- ! location of installed user libraries:
- APPS_HOME : /usr/local/apps
- ! adressing mode:
- !AMODE : ILP32
- AMODE : LP64
- ! Z library (used for compression in HDF)
- ZLIB_VERSION : 1.2.7
- ZLIB_HOME : ${APPS_HOME}/zlib/${ZLIB_VERSION}/${AMODE}
- compiler.lib.z.fflags :
- compiler.lib.z.libs : -L${ZLIB_HOME}/lib -lz
- ! JPEG library (used for compression in HDF)
- compiler.lib.jpeg.fflags : ${JASPER_INCLUDE}
- compiler.lib.jpeg.libs : ${JASPER_LIB}
- ! SZ library (used for compression in HDF)
- SZIP_VERSION : 2.1
- SZIP_HOME : ${APPS_HOME}/szip/${SZIP_VERSION}/${AMODE}
- compiler.lib.sz.fflags : -I${SZIP_HOME}/include
- compiler.lib.sz.libs : -L${SZIP_HOME}/lib -lsz -Wl,-rpath -Wl,${SZIP_HOME}/lib
- ! HDF4 library:
- HDF_VERSION : 4.2.9
- HDF_HOME : ${APPS_HOME}/hdf/${HDF_VERSION}/${AMODE}
- compiler.lib.hdf4.fflags : -I${HDF_HOME}/include
- compiler.lib.hdf4.libs : -L${HDF_HOME}/lib -lmfhdf -ldf
- ! HDF5 library:
- HDF5_VERSION : 1.8.10p1
- HDF5_HOME : ${APPS_HOME}/hdf5/${HDF5_VERSION}/${AMODE}
- compiler.lib.hdf5.fflags : -I${HDF5_HOME}/include
- compiler.lib.hdf5.libs : -L${HDF5_HOME}/lib -lhdf5_hl -lhdf5
- ! NetCDF library:
- !NETCDF_VERSION : netCDF/4.0.1
- !NETCDF_HOME : ${APPS_HOME}/${NETCDF_VERSION}/${AMODE}
- !compiler.lib.netcdf.fflags : -I${NETCDF_HOME}/include
- !compiler.lib.netcdf.libs : -L${NETCDF_HOME}/lib -lnetcdf
- compiler.lib.netcdf.fflags : -I${NETCDF_INCLUDE}
- compiler.lib.netcdf.libs : -L${NETCDF_LIB}
- ! NetCDF4 library:
- !NETCDF4_VERSION : netcdf4/4.0.1
- !NETCDF4_HOME : ${APPS_HOME}/${NETCDF4_VERSION}/${AMODE}
- !compiler.lib.netcdf4.fflags : -I${NETCDF4_HOME}/include
- !compiler.lib.netcdf4.libs : -L${NETCDF4_HOME}/lib -lnetcdf
- compiler.lib.netcdf4.fflags : -I${NETCDF_INCLUDE}
- compiler.lib.netcdf4.libs : -L${NETCDF_LIB}
- !! UDUNITS library:
- !UDUNITS_VERSION : udunits/1.12.9
- !UDUNITS_HOME : ${APPS_HOME}/${UDUNITS_VERSION}/${AMODE}
- !compiler.lib.udunits.fflags : -I${UDUNITS_HOME}/include
- !compiler.lib.udunits.libs : -L${UDUNITS_HOME}/lib -ludunits
- ! GRIB library:
- ! do not use the standard variable:
- ! EMOSLIB = -L/usr/local/lib -lemos.new.R32.D64.I32 -lxlf90 -lxlopt
- ! but rather the real(8) version:
- !EMOS_VERSION :
- !EMOS_HOME : /usr/local
- !compiler.lib.grib.fflags :
- !compiler.lib.grib.libs : -L${EMOS_HOME}/lib -lemos.R64.D64.I32 -lxlf90
- compiler.lib.grib.fflags :
- compiler.lib.grib.libs : ${EMOSLIB}
- !! Lapack library:
- !compiler.lib.lapack.fflags :
- !compiler.lib.lapack.libs : ${LAPACKLIB_OPT}
- !===============================================
- ! settings for LoadLeveler queue
- !===============================================
- !! queue system (bsub,loadleveler)
- !queue : loadleveler
- !
- !! passed directly:
- !queue.ll.submit.options :
- !
- !!------------------------------------------------
- !! ** queue options with same value for all steps:
- !!------------------------------------------------
- !
- !! space seperated list:
- !queue.ll.options.default : job_name account_no notification initialdir shell
- !
- !! job name:
- !queue.ll.option.default.job_name : ${job.name}
- !
- !! acount name for payment ...
- !queue.ll.option.default.account_no : ${my.queue.account}
- !
- !! when to send emails : always | error | start | never | complete*
- !queue.ll.option.default.notification : never
- !
- !! run directory:
- !queue.ll.option.default.initialdir : ${rundir}
- !
- !! shell of job script:
- !queue.ll.option.default.shell : /bin/sh
- !
- !!------------------------------------------------
- !! ** queue options with different values per step:
- !!------------------------------------------------
- !
- !! list options per step:
- !queue.ll.options.init : step_name output error class job_type queue
- !queue.ll.options.run : step_name dependency output error class job_type environment queue
- !queue.ll.options.done : step_name dependency output error class job_type queue
- !
- !! ~ step init options
- !
- !queue.ll.option.init.step_name : init
- !queue.ll.option.init.output : <auto>
- !queue.ll.option.init.error : <auto>
- !queue.ll.option.init.class : normal
- !queue.ll.option.init.job_type : serial
- !queue.ll.option.init.queue :
- !
- !! ~ step run optionsueue.loadleveler.outputdir : ${rundir}
- !! Run classes on ECMWF/ecgate
- !!
- !! normal : 3 hr CPU, 1 GB mem
- !! long : 6 hr CPU, 2 GB mem
- !!
- !queue.ll.option.run.step_name : run
- !#if "init" in "${job.steps}" :
- !queue.ll.option.run.dependency : (init == 0)
- !#else :
- !queue.ll.option.run.dependency : <none>
- !#endif
- !queue.ll.option.run.output : <auto>
- !queue.ll.option.run.error : <auto>
- !queue.ll.option.run.class : normal
- !queue.ll.option.run.job_type : serial
- !queue.ll.option.run.environment : UDUNITS_PATH = ${UDUNITS_PATH}
- !queue.ll.option.run.queue :
- !
- !! ~ step done options
- !
- !queue.ll.option.done.step_name : done
- !#if "run" in "${job.steps}" :
- !queue.ll.option.done.dependency : (run == 0)
- !#elif "init" in "${job.steps}" :
- !queue.ll.option.done.dependency : (init == 0)
- !#else :
- !queue.ll.option.done.dependency : <none>
- !#endif
- !queue.ll.option.done.output : <auto>
- !queue.ll.option.done.error : <auto>
- !queue.ll.option.done.class : normal
- !queue.ll.option.done.job_type : serial
- !queue.ll.option.done.queue :
- !
- ! Settings for SLURM job manager - for details and other options: "man sbatch" or "web documentation"
- !
- queue : slurm
- ! (1) passed directly (ie "as is" at the command line)
- !------------------------------------------------------------------
- ! (--share or -s for example to share node(s) with other jobs)
- ! add here also the long options
- queue.slurm.submit.options :
- !queue.slurm.submit.options : --share
- ! Note that this is added to the 'submit.options' key value from the pycasso-tm5-expert.rc
- ! (2) Short options set by end user (and processed by submit_tm5_tools.py script
- !----------------------------------------------
- !! first, list of queue options (same for each step):
- !queue.slurm.options.default : J N p t
- !
- !! job name:
- !queue.slurm.option.default.J : ${job.name}
- !
- !! minimum number of node
- !queue.slurm.option.default.N : 1
- !! ${slurm.nnode}
- !
- !!queue.slurm.option.run.c : 1
- !
- !! maximum number of tasks (but @workshop, was told to use it as number of proc... over-allocate?)
- !!queue.slurm.option.n : <auto>
- !
- !! tasks per node
- !!queue.slurm.option.tpn :
- ! ensure correct environment for init step
- queue.slurm.options.init : J n e o
- queue.slurm.option.init.J : ${job.name}
- queue.slurm.option.init.n : 1
- queue.slurm.option.init.o : <auto>
- queue.slurm.option.init.e : <auto>
- ! ensure correct environment for run step
- queue.slurm.options.run : J N n c e o
- queue.slurm.option.run.J : ${job.name}
- queue.slurm.option.run.N : 1
- queue.slurm.option.run.n : ${par.ntask}
- queue.slurm.option.run.c : ${par.nthread}
- !queue.slurm.option.run.t : ${ll.wall_clock_limit}
- queue.slurm.option.run.o : <auto>
- queue.slurm.option.run.e : <auto>
- ! ensure correct environment for done step
- queue.slurm.options.done : J n e o
- queue.slurm.option.done.J : ${job.name}
- !queue.slurm.option.done.N : 1
- queue.slurm.option.done.n : 1
- !queue.slurm.option.done.p : short
- !queue.slurm.option.done.t : 00:10:00
- queue.slurm.option.done.o : <auto>
- queue.slurm.option.done.e : <auto>
- !! list of node names. Can be simple: neuron224
- !! or complicated: mynode[1-5,7,..]
- !!queue.slurm.option.w : ${slurm.nodename}
- !
- !!
- !!queue.slurm.option.s: F
- !
- !! partition
- !queue.slurm.option.default.p : normal
- !===============================================
- ! maker
- !===============================================
- ! make command;
- ! the setup script will insert the 'build.jobs' specified in the expert.rc
- ! or passed as argument to the setup script:
- !
- maker : gmake -j %{build.jobs}
- !===============================================
- ! MPI runner
- !===============================================
- ! no parallel support on this machine ...
- !===============================================
- ! debugger
- !===============================================
- ! debugger type: totalview | idb | kdbg
- debugger : totalview
- ! command for debugger:
- debugger.command : totalview -searchPath=${build.sourcedir}
- !===============================================
- ! model data
- !===============================================
- ! the user scratch directory:
- my.scratch : ${SCRATCH}
- ! base path to various data files:
- my.data.dir : ${SCRATCH}/TM
- ! local temporary meteo archive:
- my.meteo.dir : ${my.scratch}/METEO
- ! permanent archives to search for meteo files:
- my.meteo.search : ecfs:/nlh/TM/meteo
- my.meteo.nc.search : ecfs:/nlh/TM/meteo-nc
- ! extra install tasks:
- my.install.tasks :
|