ecmwf-cca-intel.cfg.tmpl 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. # Platform dependent configuration functions for the CRAY at ECMWF machine
  2. function configure()
  3. {
  4. # This function should configure all settings/modules needed to
  5. # later prepare the EC-Earth run directory and set variables used
  6. # in the run script
  7. # Configure paths for building/running EC-Earth
  8. ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
  9. run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
  10. ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
  11. # File for standard output.
  12. # NOTE: This will be modified for restart jobs!
  13. stdout_file=${start_dir}/out/$exp_name.out
  14. # Resubmit this job for automatic restarts? [true/false]
  15. # Also, add options for the resubmit command here.
  16. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
  17. resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
  18. # Configure GRIBEX paths
  19. export LOCAL_DEFINITION_TEMPLATES=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]
  20. export ECMWF_LOCAL_TABLE_PATH=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]
  21. # Configure grib api paths
  22. export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
  23. export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
  24. export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
  25. export emos_tool=/usr/local/apps/libemos/000458/GNU/63/bin/emos_tool
  26. # Configure number of processors per node
  27. proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
  28. # Configure and load modules
  29. pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
  30. module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
  31. # Load any required modules
  32. if [[ -n "$module_list" ]]
  33. then
  34. if [ -n "${pre_load_modules_cmd}" ]
  35. then
  36. eval $(echo "${pre_load_modules_cmd}")
  37. fi
  38. for m in ${module_list}
  39. do
  40. module load $m
  41. done
  42. module list
  43. fi
  44. # Add directories to the shared library search path
  45. if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
  46. then
  47. export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
  48. fi
  49. #hostname
  50. ulimit -s unlimited
  51. ulimit -v unlimited
  52. #ulimit -n 2048
  53. #ulimit -a
  54. }
  55. function configure_python()
  56. {
  57. # specific for python+eccodes setup - used for OSM pre/post-processing
  58. module rm grib_api
  59. module load eccodes
  60. module unload python
  61. module load python/2.7.15-01
  62. unset GRIB_DEFINITION_PATH
  63. unset GRIB_SAMPLES_PATH
  64. unset GRIB_BIN_PATH
  65. }
  66. function launch()
  67. {
  68. use_forking=`echo "[[[PLT:ACTIVE:USE_FORKING]]]" | tr '[:upper:]' '[:lower:]'`
  69. if [ "${use_forking}" = "true" ]
  70. then
  71. launch_forking $*
  72. else
  73. launch_default $*
  74. fi
  75. }
  76. function launch_default()
  77. {
  78. # version using aprun
  79. cmd="aprun"
  80. # account for possible user tasks_per_node request
  81. IFS=':' read -ra tasks_per_node <<< "$EC_tasks_per_node"
  82. read hyperth <<< "$EC_hyperthreads"
  83. i=0
  84. while (( "$#" ))
  85. do
  86. nranks=$1
  87. executable=./$(basename $2)
  88. shift
  89. shift
  90. if [ "$executable" == "./master1s.exe" -o "$executable" == "./master1s_cpl.exe" ]
  91. then
  92. cmd+=" -d ${OMP_NUM_THREADS:-1} -cc depth $executable"
  93. else
  94. cmd+=" -n $nranks -N ${tasks_per_node[$i]} -j ${hyperth} $executable"
  95. fi
  96. while (( "$#" )) && [ "$1" != "--" ]
  97. do
  98. cmd+=" $1"
  99. shift
  100. done
  101. shift || true
  102. (( "$#" )) && cmd+=" :"
  103. ((i+=1))
  104. done
  105. echo $cmd
  106. $cmd
  107. }
  108. function launch_forking()
  109. {
  110. # Version using launcher script to allow node sharing.
  111. LAUNCHER="launch.sh"
  112. nproc=0
  113. cat <<-EOF >$LAUNCHER # Indentation with tabs, not spaces!!
  114. #!/bin/ksh
  115. rank=\$EC_FARM_ID
  116. export OMP_NUM_THREADS=1
  117. if false # just to manage all the other test cases uniformly with an elif
  118. then
  119. :
  120. EOF
  121. # Create the launcher script
  122. while [ $# -gt 0 ]
  123. do
  124. nranks=$1
  125. executable=./$(basename $2)
  126. shift # nranks
  127. shift # executable
  128. ((nproc+=nranks))
  129. while [ $# -gt 0 ] && [ "$1" != "--" ]
  130. do
  131. executable="$executable $1"
  132. shift # argument
  133. done
  134. shift || true # separator -- or nothing
  135. cat <<-EOF >>$LAUNCHER # Indentation with tabs, not spaces!!
  136. elif [ \$rank -lt $nproc ]
  137. then
  138. exec $executable
  139. EOF
  140. done
  141. cat <<-EOF >>$LAUNCHER # Indentation with tabs, not spaces!!
  142. fi
  143. EOF
  144. # Create the command to execute
  145. IFS=':' read -ra total_tasks <<< "$EC_total_tasks"
  146. IFS=':' read -ra tasks_per_node <<< "$EC_tasks_per_node"
  147. read hyperth <<< "$EC_hyperthreads"
  148. i=0
  149. nproc_header=0
  150. cmd="aprun"
  151. for n in ${total_tasks[*]}
  152. do
  153. cmd+=" -n $n -N ${tasks_per_node[$i]} -j ${hyperth} ./${LAUNCHER}"
  154. ((i+=1))
  155. ((nproc_header+=n))
  156. [ "$nproc_header" -lt "$nproc" ] && cmd+=" :"
  157. done
  158. # Check that the total number of tasks in the job file matches the ecconf settings
  159. if [ "$nproc_header" -ne "$nproc" ]
  160. then
  161. echo "The job header total tasks does not match the ecconf settings"
  162. exit 2
  163. fi
  164. chmod +x $LAUNCHER
  165. export PMI_NO_FORK=1
  166. echo $cmd
  167. $cmd
  168. }
  169. function finalise()
  170. {
  171. # This function should execute of any post run functionality, e.g.
  172. # platform dependent cleaning or a resubmit
  173. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  174. then
  175. info "Resubmitting job for leg $((leg_number+1))"
  176. # go to submit dir
  177. cd ${PBS_O_WORKDIR} # same as ${start_dir}
  178. ## # FOLLOWING WILL NOT WORK, BECAUSE QSUB OPTIONS ARE IGNORED
  179. ##
  180. ## but the qsub options below are ignored
  181. ## has_config xios && ectasks=1
  182. ## has_config nemo && ectasks=${ectasks}:${nem_numproc}
  183. ## has_config rnfmapper && ectasks=${ectasks}:1
  184. ## has_config ifs && ectasks=${ectasks}:${ifs_numproc}
  185. ## has_config tm5 && ectasks=${ectasks}:${tm5_numproc}
  186. ##
  187. ## ectasks=$(echo ${ectasks} | sed "s/^://")
  188. ## ecthreads=$(echo ${ectasks} | sed "s/[^:]\+/1/g")
  189. ##
  190. ## qsub -N eceDev \
  191. ## -q np \
  192. ## -l EC_billing_account=spnltune \
  193. ## -l EC_total_tasks=${ectasks} \
  194. ## -l EC_threads_per_task=$ecthreads} \
  195. ## -l EC_memory_per_task=2GB \
  196. ## -l EC_hyperthreads=1 \
  197. ## -j oe \
  198. ## -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  199. ## ./${PBS_JOBNAME}
  200. # So use sed instead
  201. log=$(basename ${stdout_file}).$(printf %03d $((leg_number+1)))
  202. cp ./${PBS_JOBNAME} ./${PBS_JOBNAME}.$$
  203. sed "s:#PBS -o out/.*:#PBS -o out/${log}:" \
  204. <./${PBS_JOBNAME}.$$ \
  205. >./${PBS_JOBNAME}
  206. \rm -f ./${PBS_JOBNAME}.$$
  207. # Submit command
  208. set -x
  209. qsub ./${PBS_JOBNAME}
  210. set +x
  211. else
  212. info "Not resubmitting."
  213. fi
  214. }