# Platform dependent configuration functions for the 'zenobe' machine #(zenobe.hpc.cenaero.be) function configure() { # This function should configure all settings/modules needed to # later prepare the EC-Earth run directory and set variables used # in the run script # Configure paths for building/running EC-Earth ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]] run_dir=[[[PLT:ACTIVE:RUN_DIR]]] #run_dir=/CECI/trsf/${USER}/run/ecearth/${exp_name} ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]] # File for standard output. # NOTE: This will be modified for restart jobs! stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log # Resubmit this job for automatic restarts? [true/false] # Also, add options for the resubmit command here. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]] resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]" # Configure GRIBEX paths export LOCAL_DEFINITION_TEMPLATES=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]/gribtemplates export ECMWF_LOCAL_TABLE_PATH=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]/gribtables # Configure number of processors per node proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]] # Configure and load modules pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]" module_list="[[[PLT:ACTIVE:MODULE_LIST]]]" if [ -n "${pre_load_modules_cmd}" ] then ${pre_load_modules_cmd} fi module load ${module_list} # Add directories to the shared library search path export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" # Configure grib api paths export GRIB_DEFINITION_PATH=${EBROOTGRIB_API}/share/grib_api/definitions export GRIB_SAMPLES_PATH=${EBROOTGRIB_API}/share/grib_api/ifs_samples/grib1 export GRIB_BIN_PATH=${EBROOTGRIB_API}/bin } function launch() { # Compute and check the node distribution info "=======================" info "Node/proc distribution:" info "-----------------------" info "IFS: ${ifs_numproc}" info "NEMO: ${nem_numproc}" info "XIOS: ${xio_numproc}" info "=======================" cmd="mpirun" while (( "$#" )) do # Get number of MPI ranks and executable name nranks=$1 executable=./$(basename $2) shift shift cmd+=" -n $nranks $executable" # Add any arguments to executable while (( "$#" )) && [ "$1" != "--" ] do cmd+=" $1" shift done shift || true # Add colon of more executables follow (( "$#" )) && cmd+=" :" done #export OMP_NUM_THREADS=1 pwd echo $cmd #exit $cmd } function finalise() { # This function should execute of any post run functionality, e.g. # platform dependent cleaning or a resubmit if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ] then info "Resubmitting job for leg $((leg_number+1))" # Need to go to start_dir to find the run script cd ${start_dir} # Submit command # Note: This does not work if you specify a job name with sbatch -J jobname! sbatch -N ${SLURM_JOB_NUM_NODES} \ -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \ -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \ -d ${SLURM_JOB_ID} \ ${resubmit_opt} \ ./${SLURM_JOB_NAME} fi }