123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350 |
- !===============================================
- ! compiler information (CRAY compiler)
- !===============================================
- ! Cray compiler: do not include a compiler.rc, since CRAY uses ftn
- ! wrapper. Just set flag here. Flags dedicated to hardware are normally
- ! already included in the ftn wrapper.
- compiler.fc : ftn
- compiler.f77 : ${compiler.fc}
- compiler.fc.openmp : ${compiler.fc}
- mpi.compiler.fc : ${compiler.fc}
- mpi.compiler.fc.openmp : ${compiler.fc}
- ! Cray Fortran : Version 8.2.2 Tue Jun 24, 2014 09:47:40
- compiler.getversion_flag : -V
- ! openMP is on by default
- #if "${par.openmp}" in ["T","True"] :
- my.default.fflags :
- #else
- my.default.fflags : -hnoomp
- #endif
- compiler.flags.default.fflags : ${my.default.fflags}
- compiler.flags.default.ldflags :
- compiler.flags.real8.fflags : -s real64
- compiler.flags.real8.ldflags :
- compiler.flags.mpi.fflags :
- compiler.flags.mpi.ldflags :
- compiler.flags.openmp.fflags :
- compiler.flags.openmp.ldflags :
- ! Not recommanded by Cray
- compiler.flags.optim-none.fflags : -O0
- compiler.flags.optim-none.ldflags :
- ! Optim-srict:
- ! It uses the -O2 default optimiztion, which is equivalent to -O3 in most other compiler.
- !
- ! Note that -hflex_mp=intolerant,strict,conservative,default, OR tolerant.
- !
- ! Note that "hflex_mp=intolerant is the option most certain to provide bit reproducibility,
- ! although it also has the highest impact on performance. hflex_mp=conservative has
- ! comparatively little impact on performance, but is not strict enough for some applications"
- !
- ! Conclusion: replace 'conservative' with 'intolerant' for bit reproducibility
- compiler.flags.optim-strict.fflags : -hflex_mp=conservative –hadd_paren –hfp1
- compiler.flags.optim-strict.ldflags :
- compiler.flags.optim-fast.fflags : -O3 -hfp3
- compiler.flags.optim-fast.ldflags : -O3
- compiler.flags.optim-vfast.fflags :
- compiler.flags.optim-vfast.ldflags :
- ! runtime checks: floating point operations and check bounds
- compiler.flags.check-all.fflags : -Ktrap=fp -Rb
- compiler.flags.check-all.ldflags :
- ! debug: "-eD" is same as "-g -m2 -rl -R bcdsp". Add -m0 for exhaustive messaging.
- compiler.flags.debug.fflags : -eD
- compiler.flags.debug.ldflags :
- !===============================================
- ! libraries
- !===============================================
- ! fnt is a wrapper, which already knows all libraries locations. Then we just need to specify the flags here.
- ! Z library (used for compression in HDF)
- compiler.lib.z.fflags :
- compiler.lib.z.libs : -lz
- ! JPEG library (used for compression in HDF)
- compiler.lib.jpeg.fflags :
- compiler.lib.jpeg.libs : -ljpeg
- ! SZ library (used for compression in HDF)
- SZIP_VERSION :
- SZIP_HOME :
- compiler.lib.sz.fflags :
- compiler.lib.sz.libs :
- ! HDF4 library:
- HDF_VERSION :
- HDF_HOME :
- compiler.lib.hdf4.fflags :
- compiler.lib.hdf4.libs :
- ! HDF5 library:
- HDF5_VERSION :
- HDF5_HOME :
- compiler.lib.hdf5.fflags :
- compiler.lib.hdf5.libs : -lhdf5_hl -lhdf5
- ! HDF5 library with parallel features enabled:
- HDF5_PAR_VERSION :
- HDF5_PAR_HOME :
- compiler.lib.hdf5_par.fflags :
- compiler.lib.hdf5_par.libs : -lhdf5_hl -lhdf5
- ! NetCDF library:
- NETCDF_VERSION :
- NETCDF_HOME :
- compiler.lib.netcdf.fflags :
- compiler.lib.netcdf.libs : -lnetcdff -lnetcdf
- ! NetCDF4 library:
- NETCDF4_VERSION :
- NETCDF4_HOME :
- compiler.lib.netcdf4.fflags :
- compiler.lib.netcdf4.libs : -lnetcdff -lnetcdf
- ! NetCDF4 library with parallel features enabled:
- NETCDF4_PAR_VERSION :
- NETCDF4_PAR_HOME :
- compiler.lib.netcdf4_par.fflags :
- compiler.lib.netcdf4_par.libs : -lnetcdff -lnetcdf
- ! UDUNITS v1.x library (not available)
- UDUNITS_HOME :
- compiler.lib.udunits1.fflags : -I${UDUNITS_HOME}/include
- compiler.lib.udunits1.libs : -L${UDUNITS_HOME}/lib -ludunits
- ! UDUNITS v2.x library (available through module, nothing to specify here)
- compiler.lib.udunits2.fflags :
- compiler.lib.udunits2.libs :
- ! OASIS3 library ( ${ECEARTH} is defined in your environment )
- ! Bother with this only if we are within EC-Earth
- #if "oasis3" in "${my.tm5.define}":
- OASIS3_ARCH : aix
- OASIS3_MPIV : MPI1
- OASIS3_HOME : ${ECEARTH}/oasis3/prism_2-5/prism/${OASIS3_ARCH}
- compiler.lib.oasis3.fflags : -I${OASIS3_HOME}/build/lib/psmile.${OASIS3_MPIV}
- compiler.lib.oasis3.libs : -L${OASIS3_HOME}/lib -lpsmile.${OASIS3_MPIV} -lmpp_io
- #endif
- ! MPI library
- compiler.lib.mpi.fflags :
- compiler.lib.mpi.libs :
- ! GRIBEX library
- compiler.lib.gribex.fflags :
- compiler.lib.gribex.libs :
- ! GRIB-API library
- compiler.lib.grib_api.fflags : ${GRIB_API_INCLUDE}
- compiler.lib.grib_api.libs : ${GRIB_API_LIB}
- ! LAPACK library:
- compiler.lib.lapack.fflags :
- compiler.lib.lapack.libs :
- !===============================================================
- ! SETTINGS FOR BATCH SCHEDULER (qsub,bsub,loadleveler,slurm,pbs)
- !===============================================================
- queue : pbs
- ! passed directly to qsub command
- queue.pbs.submit.options :
- ! If set to 2, we have access to 72 (logical) cores per node, if set to 1,
- ! only to the 36 physical cores. In both cases, 120 GB per node are available.
- hyperthread : 2
- ! number of tasks (trap non-MPI cases)
- #if "${par.mpi}" in ["T","True"] :
- my.queue.ntask : ${par.ntask}
- #else
- my.queue.ntask : 1
- #endif
- ! number of threads (trap non-OpenMP cases)
- #if "${par.openmp}" in ["T","True"] :
- my.queue.nthread : ${par.nthread}
- #else
- my.queue.nthread : 1
- #endif
- !------------------------------------------------------------------
- ! PBS queue options: if set to <none>, it is skipped, to <auto> it is set in
- ! the python script. If empty it is used as 'option without
- ! argument'.
- !
- ! "l" option has special treatment: it is seen as a space-
- ! separated list of options itself, and is skipped if empty.
- !
- ! See bin/submit_tm5_tools.py/QueueOptions_PBS for details.
- !-----------------------------------------------
- ! ** queue options with same value for ALL steps
- !-----------------------------------------------
- ! space seperated list:
- queue.pbs.options.all : j m l
- ! shell (DO NOT USE IT! KEPT HERE TO REMEMBER)
- queue.pbs.option.all.S : /opt/pbs/default/bin/pbs_python
- ! join output/error log: oe (joined in out) | eo (joined in err) | n (not joined)
- queue.pbs.option.all.j : oe
- ! when to send emails : a (abort) | b (begin) | e (end) | n (never)
- queue.pbs.option.all.m : n
- ! pass environment (ECMWF doc says to be careful)
- queue.pbs.option.all.V :
- ! account (to overwrite default one attached to $USER)
- #if ("${my.queue.account}" == ""):
- queue.pbs.option.all.l :
- #else
- queue.pbs.option.all.l : EC_billing_account=${my.queue.account}
- #endif
- !------------------------------------------------
- ! ** queue options with different values per step
- !
- ! To set a walltime request (which can lower the waiting in the queue), add walltime=<hh:mm:ss> to the list *run.l:
- !
- !------------------------------------------------
- ! list options per step:
- queue.pbs.options.init : N q o l
- queue.pbs.options.run : N q o l
- queue.pbs.options.done : N q o
- ! ~~~~~ step init options
- queue.pbs.option.init.q : ns
- queue.pbs.option.init.N : ${job.name}_init
- queue.pbs.option.init.o : <auto>
- queue.pbs.option.init.l : EC_ecfs=1
- ! ~~~~~ step done options
- queue.pbs.option.done.q : ns
- queue.pbs.option.done.N : ${job.name}_done
- queue.pbs.option.done.o : <auto>
- queue.pbs.option.done.W : block=true
- ! ~~~~~ step run options
- queue.pbs.option.run.N : ${job.name}_run
- queue.pbs.option.run.o : <auto>
- ! check if run step's gonna be fractional (nf) or full (nq) node occupancy,
- ! which depends on: hyperthreading (1 or 2), ntasks and nthreads
- #if (${my.queue.ntask}*${my.queue.nthread} == 1 ) :
- queue.pbs.option.run.q : ns
- module.cmd:
- #elif (${my.queue.ntask}*${my.queue.nthread} > ${hyperthread}*18 ) :
- queue.pbs.option.run.q : np
- module.cmd:
- #else
- queue.pbs.option.run.q : nf
- ! NOTE: the module command, even with the qualified path, is not found in the
- ! python script. It is here just to put mpiexec on the $PATH - so we revert to
- ! using the fully qualified path for mpiexec hereafter.
- module.cmd:
- !module.cmd: /opt/modules/3.2.6.7/bin/modulecmd bash load cray-snplauncher
- #endif
- #if (${my.queue.nthread} > 1):
- queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_threads_per_task=${my.queue.nthread} EC_hyperthreads=${hyperthread}
- #else
- queue.pbs.option.run.l : EC_total_tasks=${my.queue.ntask} EC_hyperthreads=${hyperthread}
- #endif
- queue.pbs.option.run.W : block=true
- ! TODO if needed: Resources (thru -l EC_memory_per_task)
- !queue.pbs.option.run.resources : ${my.resource.memory}
- !===============================================
- ! make
- !===============================================
- ! the setup script will insert the 'build.jobs' specified in the expert.rc
- ! or passed as argument to the setup script, and add Makefile and target to
- ! the command:
- maker : gmake -j %{build.jobs}
- ! Submit compilation with a job script?
- ! - if F (default), pycasso calls the ${maker}
- ! - if T, then compilation is submitted according to "submit.to" key (default
- ! of which is set in expert.rc, and can be overwritten at the CLI): either "queue", or foreground.
- ! THEN you SHOULD call "setup_tm5 -m..." to wait that compilation is done.
- !
- ! "my.queue.make" is set in the main rc file (so user can switch it on/off from there).
- !
- build.make.submit : ${my.queue.make}
- ! sub options: assume that %{build.jobs} is less than 24 (default is set in expert, probably to 8)
- queue.pbs.options.build : N q o l
- queue.pbs.option.build.q : nf
- queue.pbs.option.build.N : build-tm
- queue.pbs.option.build.o : <auto>
- queue.pbs.option.build.l : EC_total_tasks=${build.jobs} EC_hyperthreads=1
- !==========================================================================
- ! MPI runner (LUCKILY, THE ENVIRONMENT VARIABLES ARE **NOT** EXPANDED UNTIL
- ! SCRIPTS ARE SUBMITTED). Depends on:
- ! (1) pure openMP, pure MPI or hybrid runs
- ! (2) node occupancy: fractional or full (see above)
- !==========================================================================
- #if "${queue.pbs.option.run.q}" == "np":
- mpirun.command : aprun
- #if (${my.queue.nthread} > 1):
- ! Hybrid (TODO: does it covers pure openMP?)
- mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -d $OMP_NUM_THREADS -j $EC_hyperthreads
- #else
- !pure MPI
- mpirun.args : -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads
- #endif
- #else
- mpirun.command : /opt/cray/snplauncher/6.3.1/bin/mpiexec
- ! pure MPI only. TODO: hybrid, and pure openMP
- mpirun.args : -n $EC_total_tasks
- #endif
- ! name of command and host files (leave empty to not write):
- mpirun.cmdfile :
- mpirun.hostfile :
- !===============================================
- ! debugger (FIXME)
- !===============================================
- ! debugger type: totalview | idb | kdbg
- debugger : totalview
- ! command for debugger:
- debugger.command : totalview -searchPath=${build.sourcedir}
- !debugger.command : totalview
- !===============================================
- ! model data
- !===============================================
- ! the user scratch directory:
- my.scratch : ${SCRATCH}
- ! base path to various data files (static):
- my.data.dir : /perm/ms/nl/nm6/TM5_INPUT
- ! permanent archives to search for meteo files:
- my.meteo.search : ecfs:/nlh/TM/meteo-nc
- ! extra install tasks:
- my.install.tasks :
|