lemaitre3.cfg.tmpl 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. # Platform dependent configuration functions for the 'lemaitre3' machine
  2. #(lemaitre3.cism.ucl.ac.be)
  3. function configure()
  4. {
  5. # This function should configure all settings/modules needed to
  6. # later prepare the EC-Earth run directory and set variables used
  7. # in the run script
  8. # Configure paths for building/running EC-Earth
  9. ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
  10. run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
  11. ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
  12. archive_dir=/scratch/ucl/elic/${USER}/ecearth/archive/${exp_name}
  13. # File for standard output.
  14. # NOTE: This will be modified for restart jobs!
  15. stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
  16. # Resubmit this job for automatic restarts? [true/false]
  17. # Also, add options for the resubmit command here.
  18. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
  19. resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
  20. # Configure number of processors per node
  21. proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
  22. # Configure and load modules
  23. pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
  24. module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
  25. if [ -n "${pre_load_modules_cmd}" ]
  26. then
  27. ${pre_load_modules_cmd}
  28. fi
  29. if [ -n "${module_list}" ]
  30. then
  31. module load ${module_list}
  32. fi
  33. # Configure grib api paths
  34. export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
  35. export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
  36. export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
  37. # Add directories to the shared library search path
  38. if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
  39. then
  40. export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
  41. fi
  42. }
  43. function launch()
  44. {
  45. # Compute and check the node distribution
  46. info "======================="
  47. info "Node/proc distribution:"
  48. info "-----------------------"
  49. info "IFS: ${ifs_numproc}"
  50. info "NEMO: ${nem_numproc}"
  51. info "XIOS: ${xio_numproc}"
  52. info "======================="
  53. cmd="mpirun"
  54. while (( "$#" ))
  55. do
  56. nranks=$1
  57. executable=./$(basename $2)
  58. shift
  59. shift
  60. cmd+=" -n $nranks $executable"
  61. while (( "$#" )) && [ "$1" != "--" ]
  62. do
  63. cmd+=" $1"
  64. shift
  65. done
  66. shift || true
  67. (( "$#" )) && cmd+=" :"
  68. done
  69. #export OMP_NUM_THREADS=1
  70. pwd
  71. echo $cmd
  72. #exit
  73. $cmd
  74. }
  75. function finalise2()
  76. {
  77. # This function should execute of any post run functionality, e.g.
  78. # platform dependent cleaning or a resubmit
  79. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  80. then
  81. info "Resubmitting job for leg $((leg_number+1))"
  82. # Need to go to start_dir to find the run script
  83. cd ${SLURM_SUBMIT_DIR} # same as $start_dir
  84. cp ./${SLURM_JOB_NAME} ./$(basename ${SLURM_JOB_NAME}).$$
  85. sed "s:force_run_from_scratch=true:force_run_from_scratch=false:" \
  86. <./$(basename ${SLURM_JOB_NAME}).$$ \
  87. >./$(basename ${SLURM_JOB_NAME})
  88. cp -f ./${SLURM_JOB_NAME} ./$(basename ${SLURM_JOB_NAME}).$$
  89. sed "s:special_restart=true:special_restart=false:" \
  90. <./$(basename ${SLURM_JOB_NAME}).$$ \
  91. >./$(basename ${SLURM_JOB_NAME})
  92. \rm -f ./${PBS_JOBNAME}.$$
  93. # Submit command
  94. set -x
  95. qsub ./$(basename ${SLURM_JOB_NAME})
  96. set +x
  97. else
  98. info "Not resubmitting."
  99. fi
  100. }
  101. function finalise()
  102. {
  103. # This function should execute of any post run functionality, e.g.
  104. # platform dependent cleaning or a resubmit
  105. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  106. then
  107. info "Resubmitting job for leg $((leg_number+1))"
  108. # Need to go to start_dir to find the run script
  109. cd ${start_dir}
  110. # Submit command
  111. # Note: This does not work if you specify a job name with sbatch -J jobname!
  112. sbatch -N ${SLURM_JOB_NUM_NODES} \
  113. -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  114. -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  115. -d ${SLURM_JOB_ID} \
  116. ${resubmit_opt} \
  117. ./${SLURM_JOB_NAME}
  118. fi
  119. }