nsc-bi.cfg.tmpl 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # Platform dependent configuration functions for the 'bi' machine
  2. #(bi.nsc.liu.se)
  3. function configure()
  4. {
  5. # This function should configure all settings/modules needed to
  6. # later prepare the EC-Earth run directory and set variables used
  7. # in the run script
  8. # Configure paths for building/running EC-Earth
  9. ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
  10. run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
  11. ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
  12. # File for standard output.
  13. # NOTE: This will be modified for restart jobs!
  14. stdout_file=${start_dir}/out/$(basename ${SLURM_JOB_NAME}).out
  15. # Resubmit this job for automatic restarts? [true/false]
  16. # Also, add options for the resubmit command here.
  17. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
  18. resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
  19. # Configure grib api paths
  20. export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
  21. export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
  22. export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
  23. # Configure number of processors per node
  24. proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
  25. # Configure and load modules
  26. pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
  27. module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
  28. if [ -n "${module_list}" ]
  29. then
  30. set +u
  31. if [ -n "${pre_load_modules_cmd}" ]
  32. then
  33. ${pre_load_modules_cmd}
  34. fi
  35. for m in "${module_list}"
  36. do
  37. eval $(/usr/bin/modulecmd bash add $m)
  38. done
  39. set -u
  40. fi
  41. # Add directories to the shared library search path
  42. if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
  43. then
  44. export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
  45. fi
  46. ulimit -s unlimited
  47. }
  48. function launch()
  49. {
  50. cmd="mpiexec.hydra -bootstrap slurm -genvall -prepend-rank -ordered-output"
  51. while (( "$#" ))
  52. do
  53. nranks=$1
  54. executable=./$(basename $2)
  55. shift
  56. shift
  57. cmd+=" -n $nranks $executable"
  58. while (( "$#" )) && [ "$1" != "--" ]
  59. do
  60. cmd+=" $1"
  61. shift
  62. done
  63. shift || true
  64. (( "$#" )) && cmd+=" :"
  65. done
  66. export OMP_NUM_THREADS=1
  67. $cmd
  68. }
  69. function finalise()
  70. {
  71. # This function should execute of any post run functionality, e.g.
  72. # platform dependent cleaning or a resubmit
  73. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  74. then
  75. info "Resubmitting job for leg $((leg_number+1))"
  76. # Need to go to start_dir to find the run script
  77. cd ${start_dir}
  78. # Submit command
  79. # Note: This does not work if you specify a job name with sbatch -J jobname!
  80. sbatch -N ${SLURM_JOB_NUM_NODES} \
  81. -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  82. -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  83. -d ${SLURM_JOB_ID} \
  84. ${resubmit_opt} \
  85. ./${SLURM_JOB_NAME}
  86. fi
  87. }