NE4_00.sh 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. #!/bin/bash
  2. #
  3. # Job options
  4. #
  5. #SBATCH --job-name=NE4_00
  6. #SBATCH --time=12:00:00
  7. #SBATCH --account=ecearth
  8. #
  9. #SBATCH --nodes=7
  10. #SBATCH --exclusive
  11. ##SBATCH --ntasks=1320
  12. #SBATCH --ntasks-per-node=100
  13. #SBATCH --partition=batch
  14. #
  15. set -ueo pipefail
  16. #
  17. LOCAL_NODES=7
  18. LOCAL_TASKS=700
  19. #
  20. stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
  21. exec > ${stdout_file}
  22. echo "------------------ Job Info --------------------"
  23. echo "jobid : ${SLURM_JOB_ID-"id"}"
  24. echo "jobname : ${SLURM_JOB_NAME-"local"}"
  25. echo "nodename : ${SLURMD_NODENAME-"nlocal"}"
  26. echo "# nodes : ${SLURM_JOB_NUM_NODES-$LOCAL_NODES}"
  27. echo "# tasks : ${SLURM_NTASKS-$LOCAL_TASKS}"
  28. echo "submit dir : ${SLURM_SUBMIT_DIR-$PWD}"
  29. set -ue
  30. #
  31. # Cluster variables
  32. #
  33. NB_CORES_PER_NODES=128
  34. MAX_CORES_PER_NODES=100
  35. LIST_CORES_SOCKET=`seq -s',' 0 $((NB_CORES_PER_NODES-1))`
  36. #
  37. # Experiment options
  38. #
  39. exp_name=NE4_00
  40. run_start_date="1960-01-01"
  41. run_duration="1 year"
  42. rst_freq="1 month"
  43. run_num_legs=12
  44. special_restart=false
  45. special_restart_from=EXP0
  46. special_restart_date="1959-01-01"
  47. # ORCA025=1350 - ORCA1=2700 - ORCA2=5400
  48. # 1-23360 1-11680 1-5840
  49. nem_time_step_sec=1350
  50. lim_time_step_sec=1350
  51. nem_restart_offset=0
  52. nem_config_name=ORCA025_ICE
  53. info_file="nemo.info"
  54. start_dir=${SLURM_SUBMIT_DIR-$PWD}
  55. run_dir="/gpfs/scratch/acad/ecearth/$USER/nemo/run/${exp_name}"
  56. archive_dir="/gpfs/scratch/acad/ecearth/$USER/nemo/archive/${exp_name}"
  57. #
  58. # Program configuration
  59. #
  60. #192 - 230 - 460 - 1150
  61. #debug nem_numproc=360
  62. nem_numproc=600
  63. xio_numproc=100
  64. #debug xio_numproc=24
  65. #4 - 4 - 6 - 14 (max 26)
  66. nemo_src_dir=${HOME}/modeles/nemo_4.2.0
  67. shared_dir=${nemo_src_dir}/cfgs/SHARED
  68. nem_exe=nemo.exe
  69. nem_exe_file=${start_dir}/../BLD/bin/nemo.exe
  70. xio_exe=xios_server.exe
  71. xio_exe_file=${nemo_src_dir}/ext/xios-trunk-2326_gnu/bin/xios_server.exe
  72. all_proc=$(($nem_numproc+$xio_numproc))
  73. if [[ "${SLURM_JOB_NAME-"local"}" != "local" ]] ; then
  74. if (( $all_proc != ${SLURM_NTASKS-$LOCAL_TASKS} ))
  75. then
  76. echo "XIOS procs + NEMOC procs do not fit with SLURM requirements."
  77. #exit 0
  78. fi
  79. fi
  80. #
  81. # Data configuration
  82. #
  83. nem_grid=ORCA025L121
  84. #
  85. ini_data_dir=/gpfs/scratch/acad/ecearth/pbarriat/data/nemo
  86. #
  87. ic_subdir=initial
  88. ic_files=(
  89. "Goutorbe_ghflux.nc"
  90. "eORCA025_ghflux_v2.0_c3.0_weights_bilin_nohls.nc => weights_ghflux_bilinear.nc"
  91. "eORCA025_iwm_b0.2_v1.0_nohls.nc => zdfiwm_forcing.nc"
  92. "eORCA025.L121_domain_cfg_b0.5_c3.0_d1.0_nohls_clean.nc => domain_cfg.nc"
  93. "eORCA025_runoff_b0.2_v0.0_nohls.nc => runoff.nc"
  94. "eORCA025_calving_b0.2_v2.3_nohls.nc => calving.nc"
  95. "eORCA025_ttv_b0.2_v0.0_nohls.nc => boost_tidal_velocity.nc"
  96. "eORCA025_bfr2d_v0.2_nohls.nc => bfr_coef.nc"
  97. "eORCA025_shlat2d_v0.2_nohls.nc => shlat2d.nc"
  98. "eORCA025_distcoast_b0.2_v0.0_nohls.nc => distcoast.nc"
  99. "eORCA025.L121-empc_nohls.nc => empc.nc"
  100. )
  101. #
  102. nem_res_hor=$(echo ${nem_grid} | sed 's:ORCA\([0-9]\+\)L[0-9]\+:\1:')
  103. #
  104. clim_subdir=climatology
  105. clim_files=(
  106. "eORCA025.L121_WOA2018_c3.0_d1.0_v19812010.5.2_nohls.nc => woce_monthly_init.nc"
  107. "chlorophyl_v0.0.nc => chlorophyl.nc"
  108. "eORCA025_chlorophyl_v0.0_c3.0_weights_bilin_nohls.nc => chlorophyl_weights_bilin.nc"
  109. "eORCA025_sss_WOA2018_c3.0_v19812010.5.1_nohls.nc => sss_absolute_salinity.nc"
  110. "eORCA025_seaice_c3.0_v19802004.0_nohls.nc => seaice.nc"
  111. )
  112. #
  113. forcing_subdir=forcing
  114. nem_forcing_set=ERA5
  115. #nem_forcing_set=JRA55
  116. forcing_files=(
  117. "* => ."
  118. )
  119. #
  120. shared_files=(
  121. "namelist_ice_ref"
  122. "namelist_ref"
  123. "domain_def_nemo.xml"
  124. "axis_def_nemo.xml"
  125. "field_def_nemo-ice.xml"
  126. "field_def_nemo-oce.xml"
  127. "grid_def_nemo.xml"
  128. )
  129. #
  130. # Script logic
  131. #
  132. function leap_days()
  133. {
  134. local ld=0
  135. local frstYYYY=$(date -ud "$1" +%Y)
  136. local lastYYYY=$(date -ud "$2" +%Y)
  137. set +e
  138. $(date -ud "${frstYYYY}-02-29" > /dev/null 2>&1) \
  139. && (( $(date -ud "$1" +%s) < $(date -ud "${frstYYYY}-03-01" +%s) )) \
  140. && (( $(date -ud "$2" +%s) > $(date -ud "${lastYYYY}-02-28" +%s) )) \
  141. && (( ld++ ))
  142. for (( y=(( ${frstYYYY}+1 )); y<=(( ${lastYYYY}-1 )); y++ ))
  143. do
  144. $(date -ud "$y-02-29" > /dev/null 2>&1) && (( ld++ ))
  145. done
  146. (( $lastYYYY > $frstYYYY )) \
  147. && $(date -ud "${lastYYYY}-02-29" > /dev/null 2>&1) \
  148. && (( $(date -ud "$1" +%s) < $(date -ud "${frstYYYY}-03-01" +%s) )) \
  149. && (( $(date -ud "$2" +%s) > $(date -ud "${lastYYYY}-02-28" +%s) )) \
  150. && (( ld++ ))
  151. set -e
  152. echo "$ld"
  153. }
  154. [[ $@ == *verbose* ]] && set -x
  155. #module purge
  156. module load craype-x86-milan
  157. module load PrgEnv-gnu/8.3.3
  158. module load netCDF-Fortran/4.6.0-gompi-2022a
  159. module load Perl/.5.34.1-GCCcore-11.3.0
  160. if [ ! -d ${run_dir:?} ]
  161. then
  162. mkdir -p ${run_dir}
  163. #
  164. if $special_restart
  165. then
  166. rsync -av --delete ${run_dir}/../${special_restart_from}/ --exclude log --exclude output --exclude restart --exclude="${special_restart_from}_*" --exclude="ocean*" --exclude="restart_*" --exclude="debug.*" --exclude="output.*" ${run_dir}
  167. cp -f ${nem_exe_file} ${run_dir}
  168. cp -f ${xio_exe_file} ${run_dir}
  169. special_year=${special_restart_date:0:4}
  170. sed -i "/$special_year/q" ${run_dir}/${info_file}
  171. . ${run_dir}/${info_file}
  172. special_restart_leg=$(printf %03d $((leg_number+1)))
  173. cd ${run_dir}/../../archive/${special_restart_from}/restart/${special_restart_leg}
  174. for f in *.nc; do
  175. nf=${exp_name}${f:4}
  176. cp $f ${run_dir}/$nf
  177. done
  178. cd -
  179. cd ${run_dir}
  180. for f in ${exp_name}_????????_restart_???_????.nc; do
  181. nf=${f:14}
  182. ln -s $f $nf
  183. done
  184. cd -
  185. fi
  186. cd ${start_dir}
  187. cp context_nemo.xml file_def_nemo-ice.xml file_def_nemo-oce.xml iodef.xml namelist_ice_cfg* build_namelist_cfg* ${run_dir}
  188. cd ${run_dir}
  189. cp ${xio_exe_file} ${xio_exe}
  190. cp ${nem_exe_file} ${nem_exe}
  191. [[ ! -f EMPave_old.dat ]] && echo " 0 0.0000000000000000E+00 0.0000000000000000E+00" > EMPave_old.dat
  192. for file in "${ic_files[@]}"; do
  193. [[ ! -e ${file#*> } ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/${ic_subdir}/${nem_grid}/$file")
  194. done
  195. for file in "${ic_files[@]}"; do
  196. [[ ! -e ${file#*> } ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/${ic_subdir}/$file")
  197. done
  198. for file in "${clim_files[@]}"; do
  199. [[ ! -e ${file#*> } ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/${clim_subdir}/${nem_grid}/$file")
  200. done
  201. for file in "${clim_files[@]}"; do
  202. [[ ! -e ${file#*> } ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/${clim_subdir}/$file")
  203. done
  204. for file in "${forcing_files[@]}"; do
  205. [[ ! -e ${file#*> } || "$file" == \** ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/${forcing_subdir}/${nem_forcing_set}/$file")
  206. done
  207. for file in "${shared_files[@]}"; do
  208. [[ ! -e ${file#*> } ]] && ln -sf $(sed 's/ *=> */ /' <<< "${shared_dir}/$file")
  209. done
  210. else
  211. cd ${run_dir}
  212. shopt -s nullglob
  213. for v in grid_U grid_V grid_W grid_T icemod SBC SBC_scalar diaptr2D diaptr3D
  214. do
  215. for f in ${exp_name}_??_????????_????????_${v}_????.nc
  216. do
  217. rm -f "$f"
  218. done
  219. for f in ${exp_name}_??_????????_????????_${v}.nc
  220. do
  221. rm -f "$f"
  222. done
  223. for f in ${exp_name}_??_${v}.nc
  224. do
  225. rm -f "$f"
  226. done
  227. done
  228. for f in ocean.output time.step ; do rm -f "${f}"; done
  229. shopt -u nullglob
  230. fi
  231. run_start_date=$(date -uR -d "${run_start_date}")
  232. run_end_date="${run_start_date} + ${run_duration:?}"
  233. run_end_date=$(date -uR -d "${run_end_date}")
  234. run_start_epoch=$(date -u -d"${run_start_date}" +%s)
  235. run_end_epoch=$(date -u -d"${run_end_date}" +%s)
  236. for (( ; run_num_legs>0 ; run_num_legs-- ))
  237. do
  238. [[ -r "${info_file:?}" ]] && source "${info_file:?}"
  239. leg_start_date=${leg_end_date:-$run_start_date}
  240. leg_number=$((${leg_number:=0}+1))
  241. leg_start_epoch=$(date -u -d "${leg_start_date}" +%s)
  242. leg_end_epoch=$(date -u -d "${leg_start_date:?} + ${rst_freq:=$run_duration}" +%s)
  243. leg_end_date=$(date -uR -d@"${leg_end_epoch}")
  244. leg_length_sec=$(( leg_end_epoch - leg_start_epoch ))
  245. leg_start_sec=$(( leg_start_epoch - run_start_epoch ))
  246. leg_end_sec=$(( leg_end_epoch - run_start_epoch ))
  247. leg_start_date_yyyymmdd=$(date -u -d "${leg_start_date}" +%Y%m%d)
  248. leg_length_sec=$(( leg_length_sec - $(leap_days "${leg_start_date}" "${leg_end_date}")*24*3600 ))
  249. leg_start_sec=$(( leg_start_sec - $(leap_days "${run_start_date}" "${leg_start_date}")*24*3600 ))
  250. leg_end_sec=$(( leg_end_sec - $(leap_days "${run_start_date}" "${leg_end_date}")*24*3600 ))
  251. (( leg_number > 1 )) && leg_is_restart=true || leg_is_restart=false
  252. (( leg_end_epoch > run_end_epoch )) && leg_end_date=${run_end_epoch}
  253. if (( leg_start_epoch >= run_end_epoch ))
  254. then
  255. echo "Leg start date equal to or after end of simulation."
  256. echo "Nothing left to do. Cleaning and exiting."
  257. for (( n=0 ; n<nem_numproc ; n++ ))
  258. do
  259. np=$(printf %04d ${n})
  260. rm -f "restart_oce_${np}.nc"
  261. rm -f "restart_ice_${np}.nc"
  262. rm -f "restart_icb_${np}.nc"
  263. done
  264. exit 0
  265. fi
  266. source build_namelist_cfg.sh > namelist_cfg
  267. ns=$(printf %08d $(( leg_start_sec / nem_time_step_sec - nem_restart_offset )))
  268. echo "ns=$ns"
  269. if ((leg_start_sec > 0 )); then
  270. for (( n=0 ; n<nem_numproc ; n++ ))
  271. do
  272. np=$(printf %04d ${n})
  273. formatted_leg_number=$(printf %03d $((leg_number)))
  274. [[ -f "${exp_name:?}_${ns}_restart_oce_${np}.nc" ]] || { cp $archive_dir/restart/${formatted_leg_number}/*oce* . ; }
  275. [[ -f "${exp_name:?}_${ns}_restart_oce_${np}.nc" ]] || { echo "Error: restart file not found." ; exit 2 ; }
  276. ln -fs "${exp_name:?}_${ns}_restart_oce_${np}.nc" "restart_oce_${np}.nc"
  277. [[ -f "${exp_name:?}_${ns}_restart_ice_${np}.nc" ]] || { cp $archive_dir/restart/${formatted_leg_number}/*ice* . ; }
  278. [[ -f "${exp_name:?}_${ns}_restart_ice_${np}.nc" ]] || { echo "Error: restart file not found." ; exit 2 ; }
  279. ln -fs "${exp_name:?}_${ns}_restart_ice_${np}.nc" "restart_ice_${np}.nc"
  280. [[ -f "${exp_name:?}_${ns}_restart_icb_${np}.nc" ]] || { cp $archive_dir/restart/${formatted_leg_number}/*icb* . ; }
  281. [[ -f "${exp_name:?}_${ns}_restart_icb_${np}.nc" ]] || { echo "Error: restart file not found." ; exit 2 ; }
  282. ln -fs "${exp_name:?}_${ns}_restart_icb_${np}.nc" "restart_icb_${np}.nc"
  283. done
  284. fi
  285. [[ $@ == *preponly* ]] && exit 0
  286. time_begin=$(date +%s)
  287. ulimit -s unlimited
  288. if [[ "${SLURM_JOB_NAME-"local"}" == "local" ]] ; then
  289. echo "!!! Local RUN !!!"
  290. #xio_numproc=2
  291. #nem_numproc=24
  292. fi
  293. #
  294. echo "run dir : $run_dir"
  295. echo "leg_number : $leg_number"
  296. #echo "ulimit -s unlimited"
  297. #echo "Lemaitre3-2018: I_MPI_FABRICS=tcp mpirun -np ${xio_numproc} ./${xio_exe} : -np ${nem_numproc} ./${nem_exe}"
  298. #echo "Lemaitre3>2019: I_MPI_FABRICS=ofi FI_PROVIDER=tcp mpirun -np ${xio_numproc} ./${xio_exe} : -np ${nem_numproc} ./${nem_exe}"
  299. #echo "Nic5: I_MPI_HYDRA_TOPOLIB=ipl I_MPI_FABRICS=ofi mpirun -np ${xio_numproc} ./${xio_exe} : -np ${nem_numproc} ./${nem_exe}"
  300. #echo "Zenobe: mpirun -np ${xio_numproc} ./${xio_exe} : -np ${nem_numproc} ./${nem_exe}"
  301. #echo "Cyclone: I_MPI_FABRICS=tcp mpirun -np "${xio_numproc:?}" "./${xio_exe:?}" : -np "${nem_numproc:?}" "./${nem_exe:?}"
  302. #echo "LUMI: srun --multi-prog prog.conf (SLURM_JOB_NUM_NODES:${SLURM_JOB_NUM_NODES-$LOCAL_NODES} SLURM_CPUS_ON_NODE:${SLURM_CPUS_ON_NODE-$NB_CORES_PER_NODES})"
  303. echo "LUCIA: srun srun_wrapper.sh (SLURM_JOB_NUM_NODES:${SLURM_JOB_NUM_NODES-$LOCAL_NODES} SLURM_CPUS_ON_NODE:${SLURM_CPUS_ON_NODE-$NB_CORES_PER_NODES})"
  304. export OMP_NUM_THREADS=1
  305. #export MKL_NUM_THREADS=1
  306. #export PMI_NO_PREINITIALIZE=y
  307. export TIME="launch timing : %e elapsed %U user %S system"
  308. # Split XIOS nodes
  309. #cat /dev/null > prog.conf
  310. #nem_numproc_slice=$(($nem_numproc/${SLURM_JOB_NUM_NODES-$LOCAL_NODES}))
  311. #nem_numproc_slice_0=$(($nem_numproc_slice-1))
  312. #xio_numproc_slice_0=$(($xio_numproc/${SLURM_JOB_NUM_NODES-$LOCAL_NODES}-1))
  313. #xio_numproc_slice=$(($nem_numproc_slice+$xio_numproc_slice_0))
  314. #proc_id=0
  315. #for i in $(eval echo "{1..${SLURM_JOB_NUM_NODES-$LOCAL_NODES}}")
  316. #do
  317. # for j in $(eval echo "{0..$nem_numproc_slice_0}")
  318. # do
  319. # echo "$proc_id ./${nem_exe}" >> prog.conf
  320. # proc_id=$(($proc_id+1))
  321. # done
  322. # for j in $(eval echo "{$nem_numproc_slice..$xio_numproc_slice}")
  323. # do
  324. # echo "$proc_id ./${xio_exe}" >> prog.conf
  325. # proc_id=$(($proc_id+1))
  326. # done
  327. #done
  328. # Group XIOS nodes
  329. cat /dev/null > prog.conf
  330. proc_id=0
  331. for i in $(eval echo "{1..${nem_numproc}}")
  332. do
  333. echo "$proc_id ./${nem_exe}" >> prog.conf
  334. proc_id=$(($proc_id+1))
  335. done
  336. for i in $(eval echo "{1..${xio_numproc}}")
  337. do
  338. echo "$proc_id ./${xio_exe}" >> prog.conf
  339. proc_id=$(($proc_id+1))
  340. done
  341. #echo "LUMI: srun --kill-on-bad-exit=1 --multi-prog prog.conf"
  342. #cat /dev/null > ./ztask_file.conf
  343. #echo "0-$(($xio_numproc-1)) ./${xio_exe}" >> ./ztask_file.conf
  344. #echo "$xio_numproc-$(($xio_numproc+$nem_numproc-1)) ./${nem_exe}" >> ./ztask_file.conf
  345. #BINDING=map_cpu:$LIST_CORES_SOCKET
  346. #echo "LUMI: srun --kill-on-bad-exit=1 --mpi=pmi2 -m cyclic --cpu_bind=$BINDING --multi-prog ./ztask_file.conf"
  347. #exit
  348. echo $time_begin
  349. #mpirun -np ${xio_numproc} ./${xio_exe} : -np ${nem_numproc} ./${nem_exe}
  350. srun --kill-on-bad-exit=1 --multi-prog prog.conf
  351. #srun --kill-on-bad-exit=1 --mpi=pmi2 -m cyclic --cpu_bind=$BINDING --multi-prog ./ztask_file.conf
  352. #srun --kill-on-bad-exit=1 ./${xio_exe} : ./${nem_exe}
  353. time_end=$(date +%s)
  354. echo $time_end
  355. formatted_leg_number=$(printf %03d $((leg_number)))
  356. outdir="${archive_dir:?}/output/${formatted_leg_number}"
  357. mkdir -p "${outdir}"
  358. shopt -s nullglob
  359. for v in grid_U grid_V grid_W grid_T icemod SBC SBC_scalar diaptr2D diaptr3D
  360. do
  361. for f in ${exp_name}_??_????????_????????_${v}_????.nc
  362. do
  363. mv "$f" "$outdir/"
  364. done
  365. for f in ${exp_name}_??_????????_????????_${v}.nc
  366. do
  367. mv "$f" "$outdir/"
  368. done
  369. for f in ${exp_name}_??_${v}.nc
  370. do
  371. mv "$f" "$outdir/"
  372. done
  373. done
  374. outdir="$archive_dir/restart/${formatted_leg_number}"
  375. mkdir -p "${outdir}"
  376. for f in ${exp_name}_${ns}_restart_???_????.nc
  377. do
  378. [ -f "$f" ] && mv "$f" "${outdir}"
  379. done
  380. outdir="$archive_dir/log/${formatted_leg_number}"
  381. mkdir -p "${outdir}"
  382. for f in ocean.output time.step ; do mv "${f}" "${outdir}"; done
  383. cp -f namelist_ice_ref namelist_ice_cfg namelist_ref namelist_cfg ${archive_dir}
  384. [[ -f ${start_dir}/${SLURM_JOB_NAME-"run"}.sh ]] && cp -f ${start_dir}/${SLURM_JOB_NAME-"run"}.sh ${archive_dir}
  385. shopt -u nullglob
  386. tr=$(date -d "0 -$time_begin sec + $time_end sec" +%T)
  387. current_date=$(date +'%F %T')
  388. {
  389. echo "#"
  390. echo "# Finished leg at ${current_date} after ${tr} (hh:mm:ss)"
  391. echo "leg_number=${leg_number}"
  392. echo "leg_start_date=\"${leg_start_date}\""
  393. echo "leg_end_date=\"${leg_end_date}\""
  394. } | tee -a "${info_file}"
  395. special_restart=false
  396. done
  397. cd - >/dev/null
  398. [[ $@ == *noresubmit* ]] && exit 0
  399. if (( leg_end_epoch < run_end_epoch )) ; then
  400. echo "Leg end earlier than end of simulation."
  401. echo "Submitting another job."
  402. if [[ "$@" == *"run"* ]] ; then
  403. exec "$0" "$@"
  404. elif hash sbatch 2>/dev/null; then
  405. # Need to go to start_dir to find the run script
  406. cd ${start_dir}
  407. echo "sbatch -N ${SLURM_JOB_NUM_NODES-"1"} -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -d ${SLURM_JOB_ID-"id"} ./${SLURM_JOB_NAME-"run"}.sh"
  408. # Submit command
  409. # Note: This does not work if you specify a job name with sbatch -J jobname!
  410. sbatch -N ${SLURM_JOB_NUM_NODES-"1"} \
  411. -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  412. -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  413. -d ${SLURM_JOB_ID-"id"} \
  414. ./${SLURM_JOB_NAME-"run"}.sh
  415. #
  416. else
  417. cd ${start_dir}
  418. echo "qsub ${PBS_JOBNAME}.sh"
  419. qsub ./${PBS_JOBNAME}.sh
  420. fi
  421. else
  422. echo "Nothing left to do. Cleaning and exiting." # FIXME Factorize this (we have two exit points)
  423. for (( n=0 ; n<nem_numproc ; n++ ))
  424. do
  425. np=$(printf %04d ${n})
  426. rm -f "restart_oce_${np}.nc"
  427. rm -f "restart_ice_${np}.nc"
  428. rm -f "restart_icb_${np}.nc"
  429. done
  430. fi
  431. exit 0