bsc-marenostrum4.cfg.tmpl 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. # Platform dependent configuration functions for MareNostrum
  2. #(mnX.bsc.es)
  3. function configure()
  4. {
  5. # This function should configure all settings/modules needed to
  6. # later prepare the EC-Earth run directory and set variables used
  7. # in the run script
  8. # SCRATCH is not defined in MN3, define it here
  9. # and also make sure it is defined when compiling
  10. export SCRATCH=/gpfs/scratch/`id -gn`/${USER}
  11. # Configure paths for building/running EC-Earth
  12. ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
  13. run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
  14. ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
  15. # File for standard output.
  16. # NOTE: This will be modified for restart jobs!
  17. stdout_file=${start_dir}/out/$(basename ${SLURM_JOB_NAME}).out
  18. # Resubmit this job for automatic restarts? [true/false]
  19. # Also, add options for the resubmit command here.
  20. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
  21. resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
  22. # Configure grib api paths
  23. export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
  24. export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
  25. export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
  26. # Configure number of processors per node
  27. proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
  28. # Configure and load modules
  29. pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
  30. module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
  31. if [ -n "${module_list}" ]
  32. then
  33. set +eu
  34. if [ -n "${pre_load_modules_cmd}" ]
  35. then
  36. ${pre_load_modules_cmd}
  37. fi
  38. for m in "${module_list}"
  39. do
  40. module add $m
  41. done
  42. set -eu
  43. fi
  44. # Add directories to the shared library search path
  45. if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
  46. then
  47. export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
  48. fi
  49. ulimit -s unlimited
  50. }
  51. function launch()
  52. {
  53. cmd="mpirun"
  54. while (( "$#" ))
  55. do
  56. # Get number of MPI ranks and executable name
  57. nranks=$1
  58. executable=./$(basename $2)
  59. shift
  60. shift
  61. cmd+=" -np $nranks $executable"
  62. # Add any arguments to executable
  63. while (( "$#" )) && [ "$1" != "--" ]
  64. do
  65. cmd+=" $1"
  66. shift
  67. done
  68. shift || true
  69. # Add colon of more executables follow
  70. (( "$#" )) && cmd+=" :"
  71. done
  72. export OMP_NUM_THREADS=1
  73. export I_MPI_ADJUST_BCAST=3
  74. export PSM2_MTU=8196
  75. export PSM2_MEMORY=large
  76. export PSM2_MQ_RNDV_HFI_THRESH=1
  77. export I_MPI_DEBUG=5
  78. export LD_LIBRARY_PATH="/apps/PSM2/10.3-37/usr/lib64:$LD_LIBRARY_PATH"
  79. export I_MPI_FABRIC=tmi
  80. echo SLURM_JOB_ID: $SLURM_JOB_ID
  81. $cmd
  82. }
  83. function finalise()
  84. {
  85. # This function should execute of any post run functionality, e.g.
  86. # platform dependent cleaning or a resubmit
  87. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  88. then
  89. info "Resubmitting job for leg $((leg_number+1))"
  90. # Need to go to start_dir to find the run script
  91. cd ${start_dir}
  92. # Submit command
  93. # Note: This does not work if you explicitely specify a job name!
  94. # bsub -n ${SLURM_JOB_NUM_NODES} \
  95. # -w ${SLURM_JOB_ID} \
  96. # -oo ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  97. # ${resubmit_opt} \
  98. # ${SLURM_JOB_NAME}
  99. fi
  100. }