123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304 |
- !===============================================
- ! compiler information
- !===============================================
- !
- ! All the keys in this section should be filled here or put in a compiler rc
- ! file. See the other machine rc files for examples.
- !
- ! So, either include a file:
- #include rc/compiler-<name>-<version>.rc
- ! *OR* set all these keys:
- ! Compiler for Fortran 90, F77 (fixed form), openMP, MPI wrappers. It is
- ! possible that the same compiler is used for all.
- compiler.fc :
- compiler.f77 :
- compiler.fc.openmp :
- mpi.compiler.fc :
- mpi.compiler.fc.openmp :
- ! flag to retrieve the version of the compiler
- compiler.getversion_flag :
- ! *** For all the remaining flags, you must specify one for compilation and
- ! *** one for linking.
- ! defaults flags are always used, regardless of optimization and runtime checks
- compiler.flags.default.fflags :
- compiler.flags.default.ldflags :
- ! to enforce all real to be double precision
- compiler.flags.real8.fflags :
- compiler.flags.real8.ldflags :
- ! to use MPI
- compiler.flags.mpi.fflags :
- compiler.flags.mpi.ldflags :
- ! to use openMP
- compiler.flags.openmp.fflags :
- compiler.flags.openmp.ldflags :
- ! ** the pycasso script expects four levels of optimization that you define
- ! ** here. You have to choose one of them in your main rc file.
- compiler.flags.optim-none.fflags :
- compiler.flags.optim-none.ldflags :
- compiler.flags.optim-strict.fflags :
- compiler.flags.optim-strict.ldflags :
- compiler.flags.optim-fast.fflags :
- compiler.flags.optim-fast.ldflags :
- compiler.flags.optim-vfast.fflags :
- compiler.flags.optim-vfast.ldflags :
- ! ** you can also define two sets of runtime check flags, that you can add in
- ! ** your main rc file when developing
- compiler.flags.check-all.fflags :
- compiler.flags.check-all.ldflags :
- compiler.flags.debug.fflags :
- compiler.flags.debug.ldflags :
- !===============================================
- ! libraries for HDF4 support (mandatory)
- !===============================================
- ! Z library (used for compression in HDF)
- compiler.lib.z.fflags :
- compiler.lib.z.libs :
- ! JPEG library (used for compression in HDF)
- JPEG_HOME :
- compiler.lib.jpeg.fflags :
- compiler.lib.jpeg.libs :
- ! SZ library (used for compression in HDF)
- SZIP_HOME :
- compiler.lib.sz.fflags :
- compiler.lib.sz.libs :
- ! HDF4 library (without netcdf interface)
- HDF4_HOME :
- compiler.lib.hdf4.fflags :
- compiler.lib.hdf4.libs :
- !===============================================
- ! libraries for NetCDF support (mandatory)
- !===============================================
- !
- ! Use one netcdf library, optionaly with hdf5 if needed. To do, so add the
- ! corresponding 'with_*' to the 'my.df.define' key in your main rc file.
- ! NetCDF3 library (with_netcdf)
- NETCDF_HOME :
- compiler.lib.netcdf.fflags :
- compiler.lib.netcdf.libs :
- ! NetCDF4 library (with_netdf4)
- NETCDF4_HOME :
- compiler.lib.netcdf4.fflags :
- compiler.lib.netcdf4.libs :
- ! NetCDF4 library with parallel IO features (with_netcdf4_par)
- NETCDF4_MPI_HOME :
- compiler.lib.netcdf4_par.fflags :
- compiler.lib.netcdf4_par.libs :
- ! * since NetCDF4 is build on top of HDF5, you may have to link/include HDF5
- ! * library when building the code:
- ! HDF5 library (with_hdf5)
- HDF5_HOME :
- compiler.lib.hdf5.fflags :
- compiler.lib.hdf5.libs :
- ! HDF5 library with parallel features (with_hdf5_par)
- HDF5_MPI_HOME :
- compiler.lib.hdf5_par.fflags :
- compiler.lib.hdf5_par.libs :
- !===============================================
- ! Other libraries
- !===============================================
- ! MPI library
- MPI_HOME :
- compiler.lib.mpi.fflags :
- compiler.lib.mpi.libs :
- ! GRIB-API library (optional). It is needed only if you read meteo files from
- ! the ECMWF MARS system, where they are in grib format.
- compiler.lib.grib_api.fflags :
- compiler.lib.grib_api.libs :
- ! a UDUNITS library is optional. It can be used if you are reading meteo files
- ! in netCDF format to check that units conversion are correct. It is known to
- ! slow down the code significantly. The two main version are supported.
- ! Udunits v1.x library (add with_udunits1 to my.tmm.define in your main rc file)
- UDUNITS_HOME :
- compiler.lib.udunits1.fflags :
- compiler.lib.udunits1.libs :
- ! Udunits v2.x library (add with_udunits2 to my.tmm.define in your main rc file)
- compiler.lib.udunits2.fflags :
- compiler.lib.udunits2.libs :
- ! LAPACK library (optional). If available, add the with_lapack to
- ! my.tm5.define in your main rc file. see User Manual for further details.
- LAPACK_HOME :
- compiler.lib.lapack.fflags :
- compiler.lib.lapack.libs :
- !===============================================================
- ! SETTINGS FOR BATCH SCHEDULER (qsub,bsub,loadleveler,slurm,pbs)
- !===============================================================
- !
- ! Settings in this section can be in a dedicated rc file, or just left here. So,
- ! either include such a file:
- #include rc/queue-<name>-<institute>.rc
- ! *OR* fill the following keys:
- queue : <name-of-your-job-manager>
- ! arguments passed directly to submit command
- queue.pbs.submit.options :
- ! number of MPI tasks (do not change)
- my.queue.ntask : ${par.ntask}
- ! number of threads (always 1, openMP not supported in TM5-MP)
- my.queue.nthread : 1
- ! Queue options are set below, and managed by the 'bin/submit_tm5_tools.py'
- ! script. There can be as many as needed.
- !
- ! Some special values can be used:
- ! if set to <none>, the option is skipped
- ! if set to <auto>, it is automatically set in the python script
- ! if empty, it is used as 'option without argument'.
- !
- ! But check that they are implemented for you job manager. See
- ! bin/submit_tm5_tools.py/QueueOptions_<scheduler-name> for details.
- !
- ! If needed, user account is available from the main rc file as ${my.queue.account}
- !-----------------------------------------------
- ! ** queue options with same value for ALL steps
- !-----------------------------------------------
- ! space seperated list
- queue.<scheduler-name>.options.all : <option1> <option2> <option3>...
- ! and their values
- queue.<scheduler-name>.option.all.<option1> :
- queue.<scheduler-name>.option.all.<option2> :
- queue.<scheduler-name>.option.all.<option3> :
- !------------------------------------------------
- ! ** queue options with different values per step
- !------------------------------------------------
- ! list options per step:
- queue.<scheduler-name>.options.init : <ini_option1> <ini_option2> <ini_option3>...
- queue.<scheduler-name>.options.run : <run_option1> <run_option2>...
- queue.<scheduler-name>.options.done : <don_option1> <don_option2>...
- ! ~~~~~ step init options values
- queue.<scheduler-name>.option.init.<ini_option1> :
- queue.<scheduler-name>.option.init.<ini_option2> :
- queue.<scheduler-name>.option.init.<ini_option3> :
- ! ~~~~~ step run options values
- queue.<scheduler-name>.option.run.<run_option1> :
- queue.<scheduler-name>.option.run.<run_option2> :
- ! ~~~~~ step done options values
- queue.<scheduler-name>.option.done.<don_option1> :
- queue.<scheduler-name>.option.done.<don_option2> :
- !===============================================
- ! make
- !===============================================
- !
- ! Specify the make command. The setup script will insert the 'build.jobs'
- ! specified in the expert.rc (probably 8). But can be overwritten at the
- ! command line (argument to 'setup_tm5' script)
- !
- maker : gmake -j %{build.jobs}
- !JOB-COMPILE ! If you want to submit the compilation to a compute node, uncomment and set
- !JOB-COMPILE ! the necessary keys. See user manual for the further details.
- !JOB-COMPILE !
- !JOB-COMPILE ! - if F (default), pycasso calls the ${maker} command in the foreground.
- !JOB-COMPILE ! - if T, then compilation is submitted according to "submit.to" key (default
- !JOB-COMPILE ! of which is set in expert.rc, and can be overwritten at the CLI): either "queue", or foreground.
- !JOB-COMPILE !
- !JOB-COMPILE ! "my.queue.make" should be set in the main rc file, so the user can switch it on/off from there.
- !JOB-COMPILE !
- !JOB-COMPILE build.make.submit : ${my.queue.make}
- !JOB-COMPILE
- !JOB-COMPILE ! list of options for the job manager, and their value
- !JOB-COMPILE queue.<scheduler-name>.options.build : option1 option2 option3...
- !JOB-COMPILE queue.<scheduler-name>.option.build.<option1> :
- !JOB-COMPILE queue.<scheduler-name>.option.build.<option2> :
- !JOB-COMPILE queue.<scheduler-name>.option.build.<option3> :
- !===============================================
- ! MPI runner
- !===============================================
- !
- ! command for running MPI parallel programs
- mpirun.command :
- mpirun.args :
- ! name of command file; if empty, then executable and arguments are added to the command line
- mpirun.cmdfile :
- ! name of host file
- mpirun.hostfile :
- !===============================================
- ! debugger
- !===============================================
- !
- ! type: totalview | idb | kdbg
- debugger : kdbg
- ! command for debugger
- ! o KDE debugger around gdb (Linux systems using gfortran)
- debugger.command : kdbg
- ! o Intel debugger (for systems with Intel compiler)
- !debugger.command : idb -gui
- ! o TotalView (IBM)
- !debugger.command : totalview -searchPath=${build.sourcedir}
- !===============================================
- ! model data
- !===============================================
- ! the user scratch directory
- my.scratch : ${SCRATCH}
- ! *Permanent* archives to search for meteo files
- ! (Note that the location of the meteo files at runtime is set in the main rc)
- my.meteo.search :
- ! base path to other input data files distributed with the model
- my.data.dir :
- ! extra install tasks
- my.install.tasks :
|