ecmwf-cca-intel.cfg.tmpl 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. # Platform dependent configuration functions for the CRAY at ECMWF machine
  2. function configure()
  3. {
  4. # This function should configure all settings/modules needed to
  5. # later prepare the EC-Earth run directory and set variables used
  6. # in the run script
  7. # Configure paths for building/running EC-Earth
  8. ecearth_src_dir=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]
  9. run_dir=[[[PLT:ACTIVE:RUN_DIR]]]
  10. ini_data_dir=[[[PLT:ACTIVE:INI_DATA_DIR]]]
  11. # File for standard output.
  12. # NOTE: This will be modified for restart jobs!
  13. stdout_file=${start_dir}/out/$exp_name.out
  14. # Resubmit this job for automatic restarts? [true/false]
  15. # Also, add options for the resubmit command here.
  16. resubmit_job=[[[PLT:ACTIVE:RESUBMIT_JOB]]]
  17. resubmit_opt="[[[PLT:ACTIVE:RESUBMIT_OPT]]]"
  18. # Configure GRIBEX paths
  19. export LOCAL_DEFINITION_TEMPLATES=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]
  20. export ECMWF_LOCAL_TABLE_PATH=[[[PLT:ACTIVE:GRIBEX_DEFINITION_PATH]]]
  21. # Configure grib api paths
  22. export GRIB_DEFINITION_PATH=[[[PLT:ACTIVE:ECEARTH_SRC_DIR]]]/util/grib_table_126:[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_DEFINITION_SUBDIR]]]
  23. export GRIB_SAMPLES_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_SAMPLES_SUBDIR]]]
  24. export GRIB_BIN_PATH=[[[PLT:ACTIVE:GRIBAPI_BASE_DIR]]]/[[[PLT:ACTIVE:GRIBAPI_BIN_SUBDIR]]]
  25. export emos_tool=/usr/local/apps/libemos/000458/GNU/63/bin/emos_tool
  26. # Configure number of processors per node
  27. proc_per_node=[[[PLT:ACTIVE:PROC_PER_NODE]]]
  28. # Configure and load modules
  29. pre_load_modules_cmd="[[[PLT:ACTIVE:PRE_LOAD_MODULES_CMD]]]"
  30. module_list="[[[PLT:ACTIVE:MODULE_LIST]]]"
  31. # Load any required modules
  32. if [[ -n "$module_list" ]]
  33. then
  34. if [ -n "${pre_load_modules_cmd}" ]
  35. then
  36. eval $(echo "${pre_load_modules_cmd}")
  37. fi
  38. for m in ${module_list}
  39. do
  40. module load $m
  41. done
  42. module list
  43. fi
  44. # Add directories to the shared library search path
  45. if [ -n "[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]" ]
  46. then
  47. export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}"[[[PLT:ACTIVE:ADD_TO_LD_LIBRARY_PATH]]]"
  48. fi
  49. #hostname
  50. ulimit -s unlimited
  51. ulimit -v unlimited
  52. #ulimit -n 2048
  53. #ulimit -a
  54. }
  55. function configure_python()
  56. {
  57. # specific for python+eccodes setup - used for OSM pre/post-processing
  58. module rm grib_api
  59. module load eccodes
  60. module load python
  61. unset GRIB_DEFINITION_PATH
  62. unset GRIB_SAMPLES_PATH
  63. unset GRIB_BIN_PATH
  64. }
  65. function launch()
  66. {
  67. use_forking=`echo "[[[PLT:ACTIVE:USE_FORKING]]]" | tr '[:upper:]' '[:lower:]'`
  68. if [ "${use_forking}" = "true" ]
  69. then
  70. launch_forking $*
  71. else
  72. launch_default $*
  73. fi
  74. }
  75. function launch_default()
  76. {
  77. # version using aprun
  78. cmd="aprun"
  79. # account for possible user tasks_per_node request
  80. IFS=':' read -ra tasks_per_node <<< "$EC_tasks_per_node"
  81. read hyperth <<< "$EC_hyperthreads"
  82. i=0
  83. while (( "$#" ))
  84. do
  85. nranks=$1
  86. executable=./$(basename $2)
  87. shift
  88. shift
  89. if [ "$executable" == "./master1s.exe" -o "$executable" == "./master1s_cpl.exe" ]
  90. then
  91. cmd+=" -d ${OMP_NUM_THREADS:-1} -cc depth $executable"
  92. else
  93. cmd+=" -n $nranks -N ${tasks_per_node[$i]} -j ${hyperth} $executable"
  94. fi
  95. while (( "$#" )) && [ "$1" != "--" ]
  96. do
  97. cmd+=" $1"
  98. shift
  99. done
  100. shift || true
  101. (( "$#" )) && cmd+=" :"
  102. ((i+=1))
  103. done
  104. echo $cmd
  105. $cmd
  106. }
  107. function launch_forking()
  108. {
  109. # Version using launcher script to allow node sharing.
  110. LAUNCHER="launch.sh"
  111. nproc=0
  112. cat <<-EOF >$LAUNCHER # Indentation with tabs, not spaces!!
  113. #!/bin/ksh
  114. rank=\$EC_FARM_ID
  115. export OMP_NUM_THREADS=1
  116. if false # just to manage all the other test cases uniformly with an elif
  117. then
  118. :
  119. EOF
  120. # Create the launcher script
  121. while [ $# -gt 0 ]
  122. do
  123. nranks=$1
  124. executable=./$(basename $2)
  125. shift # nranks
  126. shift # executable
  127. ((nproc+=nranks))
  128. while [ $# -gt 0 ] && [ "$1" != "--" ]
  129. do
  130. executable="$executable $1"
  131. shift # argument
  132. done
  133. shift || true # separator -- or nothing
  134. cat <<-EOF >>$LAUNCHER # Indentation with tabs, not spaces!!
  135. elif [ \$rank -lt $nproc ]
  136. then
  137. exec $executable
  138. EOF
  139. done
  140. cat <<-EOF >>$LAUNCHER # Indentation with tabs, not spaces!!
  141. fi
  142. EOF
  143. # Create the command to execute
  144. IFS=':' read -ra total_tasks <<< "$EC_total_tasks"
  145. IFS=':' read -ra tasks_per_node <<< "$EC_tasks_per_node"
  146. read hyperth <<< "$EC_hyperthreads"
  147. i=0
  148. nproc_header=0
  149. cmd="aprun"
  150. for n in ${total_tasks[*]}
  151. do
  152. cmd+=" -n $n -N ${tasks_per_node[$i]} -j ${hyperth} ./${LAUNCHER}"
  153. ((i+=1))
  154. ((nproc_header+=n))
  155. [ "$nproc_header" -lt "$nproc" ] && cmd+=" :"
  156. done
  157. # Check that the total number of tasks in the job file matches the ecconf settings
  158. if [ "$nproc_header" -ne "$nproc" ]
  159. then
  160. echo "The job header total tasks does not match the ecconf settings"
  161. exit 2
  162. fi
  163. chmod +x $LAUNCHER
  164. export PMI_NO_FORK=1
  165. echo $cmd
  166. $cmd
  167. }
  168. function finalise()
  169. {
  170. # This function should execute of any post run functionality, e.g.
  171. # platform dependent cleaning or a resubmit
  172. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  173. then
  174. info "Resubmitting job for leg $((leg_number+1))"
  175. # go to submit dir
  176. cd ${PBS_O_WORKDIR} # same as ${start_dir}
  177. ## # FOLLOWING WILL NOT WORK, BECAUSE QSUB OPTIONS ARE IGNORED
  178. ##
  179. ## but the qsub options below are ignored
  180. ## has_config xios && ectasks=1
  181. ## has_config nemo && ectasks=${ectasks}:${nem_numproc}
  182. ## has_config rnfmapper && ectasks=${ectasks}:1
  183. ## has_config ifs && ectasks=${ectasks}:${ifs_numproc}
  184. ## has_config tm5 && ectasks=${ectasks}:${tm5_numproc}
  185. ##
  186. ## ectasks=$(echo ${ectasks} | sed "s/^://")
  187. ## ecthreads=$(echo ${ectasks} | sed "s/[^:]\+/1/g")
  188. ##
  189. ## qsub -N eceDev \
  190. ## -q np \
  191. ## -l EC_billing_account=spnltune \
  192. ## -l EC_total_tasks=${ectasks} \
  193. ## -l EC_threads_per_task=$ecthreads} \
  194. ## -l EC_memory_per_task=2GB \
  195. ## -l EC_hyperthreads=1 \
  196. ## -j oe \
  197. ## -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  198. ## ./${PBS_JOBNAME}
  199. # So use sed instead
  200. log=$(basename ${stdout_file}).$(printf %03d $((leg_number+1)))
  201. cp ./${PBS_JOBNAME} ./${PBS_JOBNAME}.$$
  202. sed "s:#PBS -o out/.*:#PBS -o out/${log}:" \
  203. <./${PBS_JOBNAME}.$$ \
  204. >./${PBS_JOBNAME}
  205. \rm -f ./${PBS_JOBNAME}.$$
  206. # Submit command
  207. set -x
  208. qsub ./${PBS_JOBNAME}
  209. set +x
  210. else
  211. info "Not resubmitting."
  212. fi
  213. }