123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132 |
- # Platform dependent configuration functions for the 'zenobe' machine
- #(zenobe.hpc.cenaero.be)
- function configure()
- {
- # This function should configure all settings/modules needed to
- # later prepare the EC-Earth run directory and set variables used
- # in the run script
- # Configure paths for building/running EC-Earth
- ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
- run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
- ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
- archive_dir=/SCRATCH/acad/ecearth/${USER}/archive/${exp_name}
- # File for standard output.
- # NOTE: This will be modified for restart jobs!
- stdout_file=${PBS_O_WORKDIR-$PWD}/${PBS_JOBNAME-"local"}_${PBS_JOBID-"id"}.log
- # Resubmit this job for automatic restarts? [true/false]
- # Also, add options for the resubmit command here.
- resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
- resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
- # Configure grib api paths
- export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
- export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
- export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
- # Configure number of processors per node
- proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
- # Configure and load modules
- pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
- module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
- if [ -n "${pre_load_modules_cmd}" ]
- then
- ${pre_load_modules_cmd}
- fi
- if [ -n "${module_list}" ]
- then
- module load ${module_list}
- fi
- # Add directories to the shared library search path
- if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
- then
- export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
- fi
- }
- function launch()
- {
- # Compute and check the node distribution
- info "======================="
- info "Node/proc distribution:"
- info "-----------------------"
- info "IFS: ${ifs_numproc}"
- info "NEMO: ${nem_numproc}"
- info "XIOS: ${xio_numproc}"
- info "======================="
- cmd="mpirun"
- while (( "$#" ))
- do
- nranks=$1
- executable=./$(basename $2)
- shift
- shift
- cmd+=" -n $nranks $executable"
- while (( "$#" )) && [ "$1" != "--" ]
- do
- cmd+=" $1"
- shift
- done
- shift || true
- (( "$#" )) && cmd+=" :"
- done
- #export OMP_NUM_THREADS=1
- #export OMP_NUM_THREADS=1
- #export I_MPI_ADJUST_BCAST=3
- #export PSM2_MTU=8196
- #export PSM2_MEMORY=large
- #export PSM2_MQ_RNDV_HFI_THRESH=1
- #export I_MPI_DEBUG=5
- #export I_MPI_FABRIC=tmi
- pwd
- echo $cmd
- #exit
- $cmd
- }
- function finalise()
- {
- # This function should execute of any post run functionality, e.g.
- # platform dependent cleaning or a resubmit
- if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
- then
- info "Resubmitting job for leg $((leg_number+1))"
- # Need to go to start_dir to find the run script
- cd ${PBS_O_WORKDIR} # same as $start_dir
- cp ./${PBS_JOBNAME} ./${PBS_JOBNAME}.$$
-
- sed "s:force_run_from_scratch=true:force_run_from_scratch=false:" \
- <./${PBS_JOBNAME}.$$ \
- >./${PBS_JOBNAME}
- cp -f ./${PBS_JOBNAME} ./${PBS_JOBNAME}.$$
- sed "s:special_restart=true:special_restart=false:" \
- <./${PBS_JOBNAME}.$$ \
- >./${PBS_JOBNAME}
- \rm -f ./${PBS_JOBNAME}.$$
- # Submit command
- set -x
- qsub ./${PBS_JOBNAME}
- set +x
- else
- info "Not resubmitting."
- fi
- }
|