|
@@ -0,0 +1,144 @@
|
|
|
|
|
+# Platform dependent configuration functions for the 'zenobe' machine
|
|
|
|
|
+#(zenobe.hpc.cenaero.be)
|
|
|
|
|
+
|
|
|
|
|
+function configure()
|
|
|
|
|
+{
|
|
|
|
|
+ # This function should configure all settings/modules needed to
|
|
|
|
|
+ # later prepare the EC-Earth run directory and set variables used
|
|
|
|
|
+ # in the run script
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ # Configure paths for building/running EC-Earth
|
|
|
|
|
+ ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
|
|
|
|
|
+ run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
|
|
|
|
|
+ ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
|
|
|
|
|
+
|
|
|
|
|
+ # File for standard output.
|
|
|
|
|
+ # NOTE: This will be modified for restart jobs!
|
|
|
|
|
+ stdout_file=${SLURM_SUBMIT_DIR-$PWD}/$(basename ${SLURM_JOB_NAME})_${SLURM_JOB_ID-"id"}.log
|
|
|
|
|
+
|
|
|
|
|
+ # Resubmit this job for automatic restarts? [true/false]
|
|
|
|
|
+ # Also, add options for the resubmit command here.
|
|
|
|
|
+ resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
|
|
|
|
|
+ resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
|
|
|
|
|
+
|
|
|
|
|
+ # Configure GRIBEX paths
|
|
|
|
|
+ export LOCAL_DEFINITION_TEMPLATES=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]/gribtemplates
|
|
|
|
|
+ export ECMWF_LOCAL_TABLE_PATH=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]/gribtables
|
|
|
|
|
+
|
|
|
|
|
+ # Configure grib api paths
|
|
|
|
|
+ export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
|
|
|
|
|
+ export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
|
|
|
|
|
+ export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
|
|
|
|
|
+
|
|
|
|
|
+ # Configure number of processors per node
|
|
|
|
|
+ proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
|
|
|
|
|
+
|
|
|
|
|
+ # Configure and load modules
|
|
|
|
|
+ pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
|
|
|
|
|
+ module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
|
|
|
|
|
+
|
|
|
|
|
+ if [ -n "${pre_load_modules_cmd}" ]
|
|
|
|
|
+ then
|
|
|
|
|
+ ${pre_load_modules_cmd}
|
|
|
|
|
+ fi
|
|
|
|
|
+ module load ${module_list}
|
|
|
|
|
+
|
|
|
|
|
+ # Add directories to the shared library search path
|
|
|
|
|
+ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
|
|
|
|
|
+
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function launch()
|
|
|
|
|
+{
|
|
|
|
|
+ # Compute and check the node distribution
|
|
|
|
|
+ info "======================="
|
|
|
|
|
+ info "Node/proc distribution:"
|
|
|
|
|
+ info "-----------------------"
|
|
|
|
|
+ info "IFS: ${ifs_numproc}"
|
|
|
|
|
+ info "NEMO: ${nem_numproc}"
|
|
|
|
|
+ info "XIOS: ${xio_numproc}"
|
|
|
|
|
+ info "======================="
|
|
|
|
|
+
|
|
|
|
|
+ cmd="mpirun"
|
|
|
|
|
+
|
|
|
|
|
+ while (( "$#" ))
|
|
|
|
|
+ do
|
|
|
|
|
+ nranks=$1
|
|
|
|
|
+ executable=./$(basename $2)
|
|
|
|
|
+ shift
|
|
|
|
|
+ shift
|
|
|
|
|
+
|
|
|
|
|
+ cmd+=" -n $nranks $executable"
|
|
|
|
|
+
|
|
|
|
|
+ while (( "$#" )) && [ "$1" != "--" ]
|
|
|
|
|
+ do
|
|
|
|
|
+ cmd+=" $1"
|
|
|
|
|
+ shift
|
|
|
|
|
+ done
|
|
|
|
|
+ shift || true
|
|
|
|
|
+
|
|
|
|
|
+ (( "$#" )) && cmd+=" :"
|
|
|
|
|
+ done
|
|
|
|
|
+
|
|
|
|
|
+ #export OMP_NUM_THREADS=1
|
|
|
|
|
+ pwd
|
|
|
|
|
+ echo $cmd
|
|
|
|
|
+ #exit
|
|
|
|
|
+ $cmd
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function finalise2()
|
|
|
|
|
+{
|
|
|
|
|
+ # This function should execute of any post run functionality, e.g.
|
|
|
|
|
+ # platform dependent cleaning or a resubmit
|
|
|
|
|
+
|
|
|
|
|
+ if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
|
|
|
|
|
+ then
|
|
|
|
|
+ info "Resubmitting job for leg $((leg_number+1))"
|
|
|
|
|
+ # Need to go to start_dir to find the run script
|
|
|
|
|
+ cd ${SLURM_SUBMIT_DIR} # same as $start_dir
|
|
|
|
|
+
|
|
|
|
|
+ cp ./${SLURM_JOB_NAME} ./$(basename ${SLURM_JOB_NAME}).$$
|
|
|
|
|
+
|
|
|
|
|
+ sed "s:force_run_from_scratch=true:force_run_from_scratch=false:" \
|
|
|
|
|
+ <./$(basename ${SLURM_JOB_NAME}).$$ \
|
|
|
|
|
+ >./$(basename ${SLURM_JOB_NAME})
|
|
|
|
|
+
|
|
|
|
|
+ cp -f ./${SLURM_JOB_NAME} ./$(basename ${SLURM_JOB_NAME}).$$
|
|
|
|
|
+
|
|
|
|
|
+ sed "s:special_restart=true:special_restart=false:" \
|
|
|
|
|
+ <./$(basename ${SLURM_JOB_NAME}).$$ \
|
|
|
|
|
+ >./$(basename ${SLURM_JOB_NAME})
|
|
|
|
|
+
|
|
|
|
|
+ \rm -f ./${PBS_JOBNAME}.$$
|
|
|
|
|
+
|
|
|
|
|
+ # Submit command
|
|
|
|
|
+ set -x
|
|
|
|
|
+ qsub ./$(basename ${SLURM_JOB_NAME})
|
|
|
|
|
+ set +x
|
|
|
|
|
+ else
|
|
|
|
|
+ info "Not resubmitting."
|
|
|
|
|
+ fi
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function finalise()
|
|
|
|
|
+{
|
|
|
|
|
+ # This function should execute of any post run functionality, e.g.
|
|
|
|
|
+ # platform dependent cleaning or a resubmit
|
|
|
|
|
+
|
|
|
|
|
+ if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
|
|
|
|
|
+ then
|
|
|
|
|
+ info "Resubmitting job for leg $((leg_number+1))"
|
|
|
|
|
+ # Need to go to start_dir to find the run script
|
|
|
|
|
+ cd ${start_dir}
|
|
|
|
|
+ # Submit command
|
|
|
|
|
+ # Note: This does not work if you specify a job name with sbatch -J jobname!
|
|
|
|
|
+ sbatch -N ${SLURM_JOB_NUM_NODES} \
|
|
|
|
|
+ -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
|
|
|
|
|
+ -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
|
|
|
|
|
+ -d ${SLURM_JOB_ID} \
|
|
|
|
|
+ ${resubmit_opt} \
|
|
|
|
|
+ ./${SLURM_JOB_NAME}
|
|
|
|
|
+ fi
|
|
|
|
|
+}
|