EC00.sh 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. #!/bin/bash
  2. #
  3. # Job options
  4. #
  5. #SBATCH --job-name=EC00
  6. #SBATCH --time=01:59:00
  7. #SBATCH --account=ecearth
  8. #
  9. #SBATCH --nodes=4
  10. #SBATCH --exclusive
  11. #SBATCH --ntasks-per-node=125
  12. ##SBATCH --partition=medium
  13. ##SBATCH --exclude=cnm016
  14. #SBATCH --partition=debug
  15. ##SBATCH --switches=1@47:50:00
  16. #
  17. set -ueo pipefail
  18. #
  19. LOCAL_NODES=4
  20. LOCAL_TASKS=500
  21. #
  22. stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
  23. exec > ${stdout_file}
  24. echo "------------------ Job Info --------------------"
  25. echo "jobid : ${SLURM_JOB_ID-"id"}"
  26. echo "jobname : ${SLURM_JOB_NAME-"local"}"
  27. echo "nodename : ${SLURMD_NODENAME-"local"}"
  28. echo "# nodes : ${SLURM_JOB_NUM_NODES-$LOCAL_NODES}"
  29. echo "# tasks : ${SLURM_NTASKS-$LOCAL_TASKS}"
  30. echo "submit dir : ${SLURM_SUBMIT_DIR-$PWD}"
  31. set -ue
  32. #
  33. # Cluster variables
  34. #
  35. NB_CORES_PER_NODES=128
  36. MAX_CORES_PER_NODES=123
  37. LIST_CORES_SOCKET=`seq -s',' 0 $((NB_CORES_PER_NODES-1))`
  38. #
  39. # Directories
  40. start_dir=${SLURM_SUBMIT_DIR-$PWD}
  41. # librunscript defines some helper functions
  42. . ${start_dir}/librunscript.sh
  43. # =============================================================================
  44. # *** BEGIN User configuration
  45. # =============================================================================
  46. # -----------------------------------------------------------------------------
  47. # *** General configuration
  48. # -----------------------------------------------------------------------------
  49. # Component configuration
  50. # (for syntax of the $config variable, see librunscript.sh)
  51. config="ifs nemo lim3 rnfmapper xios:detached oasis"
  52. # Experiment name (exactly 4 letters!)
  53. export exp_name=EC00
  54. # Simulation start and end date. Use any (reasonable) syntax you want.
  55. run_start_date="1970-01-01"
  56. run_end_date="${run_start_date} + 52 years"
  57. # Set $force_run_from_scratch to 'true' if you want to force this run to start
  58. # from scratch, possibly ignoring any restart files present in the run
  59. # directory. Leave set to 'false' otherwise.
  60. # NOTE: If set to 'true' the run directory $run_dir is cleaned!
  61. force_run_from_scratch=true
  62. special_restart=false
  63. special_restart_from=ECE3
  64. special_restart_date="1995-01-01"
  65. # Resolution (TM5 resolution is set at compilation)
  66. ifs_grid=T255L91
  67. nem_grid=ORCA1L75
  68. # Restart frequency. Use any (reasonable) number and time unit you want.
  69. # For runs without restart, leave this variable empty
  70. rst_freq="6 months"
  71. # Number of restart legs to be run in one go
  72. run_num_legs=1
  73. # Directories
  74. ctrl_file_dir=${start_dir}/ctrl
  75. output_control_files_dir=${start_dir}/ctrl
  76. # Architecture
  77. build_arch=ecconf
  78. # This file is used to store information about restarts
  79. ece_info_file="ece.info"
  80. # -----------------------------------------------------------------------------
  81. # *** Read platform dependent configuration
  82. # -----------------------------------------------------------------------------
  83. #. ${start_dir}/ecconf.cfg
  84. # This function should configure all settings/modules needed to
  85. # later prepare the EC-Earth run directory and set variables used
  86. # in the run script
  87. # SCRATCH is not defined in MN3, define it here
  88. # and also make sure it is defined when compiling
  89. export SCRATCH=/gpfs/scratch/acad/ecearth/${USER}
  90. # Configure paths for building/running EC-Earth
  91. ecearth_src_dir=${HOME}/models/ecearth_3.3.1/sources
  92. run_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/run/${exp_name}
  93. ini_data_dir=/gpfs/scratch/acad/ecearth/data/bsc32/v3.3.1/inidata
  94. archive_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/archive/${exp_name}
  95. # File for standard output.
  96. # NOTE: This will be modified for restart jobs!
  97. stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
  98. # Resubmit this job for automatic restarts? [true/false]
  99. # Also, add options for the resubmit command here.
  100. resubmit_job=true
  101. resubmit_opt=""
  102. module purge
  103. module load EasyBuild/2023a
  104. MODULEPATH=$MODULEPATH:/gpfs/projects/acad/ecearth/softs/easybuild/modules/all
  105. module load netCDF-Fortran/4.6.1-iompi-2023a
  106. module load imkl/2023.1.0
  107. module load grib_api/1.24.0-iompi-2023a
  108. module load CDO/1.9.10-iompi-2023a
  109. # Configure grib api paths
  110. export GRIB_DEFINITION_PATH=${HOME}/models/ecearth_3.3.1/sources/util/grib_table_126:${EBROOTGRIB_API}/share/grib_api/definitions
  111. export GRIB_SAMPLES_PATH=${EBROOTGRIB_API}/share/grib_api/ifs_samples/grib1
  112. export GRIB_BIN_PATH=${EBROOTGRIB_API}/bin
  113. # Configure number of processors per node
  114. proc_per_node=128
  115. ulimit -s unlimited
  116. # -----------------------------------------------------------------------------
  117. # *** Time step settings
  118. # -----------------------------------------------------------------------------
  119. case "${ifs_grid}--${nem_grid}" in
  120. T159L*--ORCA1L*)
  121. ifs_time_step_sec=3600; nem_time_step_sec=2700; lim_time_step_sec=2700; cpl_freq_atm_oce_sec=10800
  122. ;;
  123. T255L*--ORCA1L*)
  124. ifs_time_step_sec=2700; nem_time_step_sec=2700; lim_time_step_sec=2700; cpl_freq_atm_oce_sec=2700
  125. ;;
  126. T511L*--ORCA025L*)
  127. ifs_time_step_sec=900 ; nem_time_step_sec=900 ; lim_time_step_sec=900 ; cpl_freq_atm_oce_sec=2700
  128. ;;
  129. *) error "Can't set time steps for unknown combination of horizontal grids: ${ifs_grid}-${nem_grid}"
  130. ;;
  131. esac
  132. # -----------------------------------------------------------------------------
  133. # *** IFS configuration
  134. # -----------------------------------------------------------------------------
  135. ifs_version=36r4
  136. ifs_di_freq=$(( 24 * 3600 / ifs_time_step_sec ))
  137. ifs_ddh_freq=$(( 120 * 3600 / ifs_time_step_sec ))
  138. export ifs_res_hor=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\1:')
  139. ifs_res_ver=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\2:')
  140. ifs_numproc=400
  141. ifs_exe_file=${ecearth_src_dir}/ifs-${ifs_version}/bin/ifsmaster-${build_arch}
  142. ifs_lastout=false
  143. # USE FORCING FROM CMIP5 (SOLAR, GHG, AEROSOL, O3)
  144. ifs_cmip5=TRUE
  145. # SWITCH FOR RCP AND HISTORICAL RUNS FOR CMIP5 (0=HISTO 1=RCP 3-PD, 2=RCP 4.5, 3=RCP 6.0, 4=RCP 8.5)
  146. ifs_cmip5_rcp=0
  147. # 1PCTCO2 and A4XCO2 should operate together with ifs_cmip_fixyear years, ie., the baseline GHGs level will use that for year ifs_cmip_fixyear
  148. export ifs_cmip_fixyear=1850
  149. # USE FORCING FROM CMIP6 (HAS PRIORITY OVER LCMIP5)
  150. ifs_cmip6=TRUE
  151. # MAC-SP anthropogenic simple plume model (parameterization of anthropogenic aerosol optical properties)
  152. ifs_mac2sp=TRUE
  153. # Use CMIP6 prescribed preindustrial aerosol
  154. ifs_cmip6piaer=TRUE
  155. # !! scenario 'historical' max 2014
  156. # 1850 (o3_pi)
  157. # 1850 --> 2014 (o3_histo)
  158. # !! other scenarios (> 2014), default: SSP3-7.0
  159. # SSP1-1.9, SSP1-2.6, SSP1-2.6-Ext, SSP2-4.5, SSP3-7.0, SSP3-LowNTCF, SSP4-3.4, SSP5-3.4-OS, SSP4-6.0, SSP5-3.4-OS-Ext, SSP5-8.5, SSP5-8.5-Ext
  160. ifs_cmip6_scenario=historical
  161. # Read CMIP6 stratospheric aerosol data file, vertically integrated version
  162. lcmip6_strataer_simp=FALSE
  163. lcmip6_strataer_full=TRUE
  164. lcmip6_strataer_bckgd=FALSE
  165. # for an abrupt increase of CO2 to 4x the starting year (ifs_cmip_fixyear)
  166. export ifs_A4xCO2=FALSE
  167. # for an 1% per year increase of CO2 until reaching 4x the starting year (ifs_cmip_fixyear)
  168. export ifs_1PCTCO2=FALSE
  169. # Time-varying orbital forcing (Qiong Zhang, SU-2013-09)
  170. # https://dev.ec-earth.org/projects/ecearth3/wiki/Orbital_forcing_in_EC-Earth_3
  171. #
  172. # ifs_orb_switch=false, no orbital calculations applied
  173. # ifs_orb_switch=true, use orbital calculations according to ifs_orb_mode
  174. # ifs_orb_mode="fixed_year", or "variable_year", or "fixed_parameters"
  175. # fixed_year: calculate the orbital parameters at ifs_orb_iyear, e.g.,1850
  176. # variable_year: calculate orbital parameters annually start from ifs_orb_iyear
  177. # fixed_parameters: prescribe orbital parameters for given year
  178. ifs_orb_switch=FALSE
  179. ifs_orb_mode="variable_year"
  180. ifs_orb_iyear=$(date -u -d "${run_start_date}" +%Y)
  181. # IFS tuning parameters
  182. ifs_tuning_parameter_file=${ctrl_file_dir}/ifs-tuning-parameters-${ifs_grid}.sh
  183. if [ -f ${ifs_tuning_parameter_file} ]
  184. then
  185. source ${ifs_tuning_parameter_file}
  186. else
  187. error "Sorry, ${ifs_tuning_parameter_file} not found, exiting."
  188. fi
  189. # Select source of vegetation data:
  190. # ifs climatology from IFS
  191. # era20c vegetation from an off-line LPJ-Guess run forced with ERA20C
  192. # (currently available only for T255 and T159)
  193. #
  194. ifs_veg_source="era20c"
  195. case ${ifs_veg_source} in
  196. "ifs" )
  197. # Use Lambert-Beer to compute effective vegetation cover
  198. n_compute_eff_veg_fraction=2
  199. ;;
  200. "era20c" )
  201. # LPJG vegetation is provided as effective cover
  202. # Don't use Lambert-Beer
  203. n_compute_eff_veg_fraction=0
  204. ;;
  205. * )
  206. error "Vegetation from ${ifs_veg_source} not implemented"
  207. ;;
  208. esac
  209. # use DMI land ice physics and varying snow albedo
  210. ifs_landice=false
  211. # -----------------------------------------------------------------------------
  212. # *** NEMO/LIM configuration
  213. # -----------------------------------------------------------------------------
  214. # This is only needed if the experiment is started from an existing set of NEMO
  215. # restart files
  216. nem_restart_file_path=${start_dir}/nemo-rst
  217. nem_restart_offset=0
  218. nem_res_hor=$(echo ${nem_grid} | sed 's:ORCA\([0-9]\+\)L[0-9]\+:\1:')
  219. nem_exe_file=${ecearth_src_dir}/nemo-3.6/CONFIG/${nem_grid}_LIM3/BLD/bin/nemo.exe
  220. nem_numproc=98
  221. # -----------------------------------------------------------------------------
  222. # *** Runoff mapper configuration
  223. # -----------------------------------------------------------------------------
  224. rnf_exe_file=${ecearth_src_dir}/runoff-mapper/bin/runoff-mapper.exe
  225. rnf_numproc=1
  226. # -----------------------------------------------------------------------------
  227. # *** OASIS configuration
  228. # -----------------------------------------------------------------------------
  229. # Restart files for the coupling fields (note 8 character limit in OASIS)
  230. # rstas.nc : atmosphere single-category fields
  231. # rstam.nc : atmosphere multi-category fields
  232. # rstos.nc : ocean single-category fields
  233. # rstom.nc : ocean multi-category fields
  234. oas_rst_files="rstas.nc rstos.nc"
  235. # Decide whether the OASIS weight files for interpolation should be linked from
  236. # the setup directory (true) or not (false). In the latter case, the weights
  237. # are re-computed at the start of the run.
  238. oas_link_weights=true
  239. # Flux correction for runoff (not calving) sent from Oasis to ocean.
  240. # 1.07945 is computed to compensate for a P-E=-0.016 mm/day (valid for std res)
  241. oas_mb_fluxcorr=1.07945
  242. # -----------------------------------------------------------------------------
  243. # *** XIOS configuration
  244. # -----------------------------------------------------------------------------
  245. xio_exe_file=${ecearth_src_dir}/xios-2.5/bin/xios_server.exe
  246. xio_numproc=1
  247. # -----------------------------------------------------------------------------
  248. # *** Extra initial conditions saved during the run
  249. # -----------------------------------------------------------------------------
  250. if has_config save_ic
  251. then
  252. source ./libsave_ic.sh
  253. declare -a save_ic_date save_ic_date1 save_ic_sec save_ic_day save_ic_ppt_file save_ic_nemo_ts
  254. fi
  255. # =============================================================================
  256. # *** END of User configuration
  257. # =============================================================================
  258. # =============================================================================
  259. # *** This is where the code begins ...
  260. # =============================================================================
  261. # -----------------------------------------------------------------------------
  262. # *** Create the run dir if necessary and go there
  263. # Everything is done from here.
  264. # -----------------------------------------------------------------------------
  265. if [ ! -d ${run_dir} ]
  266. then
  267. mkdir -p ${run_dir}
  268. if $special_restart
  269. then
  270. force_run_from_scratch=false
  271. echo 'rsync -av --delete ${run_dir}/../${special_restart_from}/ --exclude log --exclude output --exclude restart --exclude="${special_restart_from}_*" --exclude="srf*" --exclude="restart_*" --exclude="debug.*" --ex clude="output.*" ${run_dir}'
  272. rsync -av --delete ${run_dir}/../${special_restart_from}/ --exclude log --exclude output --exclude restart --exclude="${special_restart_from}_*" --exclude="srf*" --exclude="restart_*" --exclude="debug.*" --exclude="output.*" ${run_dir}
  273. cp -f ${nem_exe_file} ${run_dir}
  274. cp -f ${ifs_exe_file} ${run_dir}
  275. cp -f ${rnf_exe_file} ${run_dir}
  276. cp -f ${xio_exe_file} ${run_dir}
  277. special_year=${special_restart_date:0:4}
  278. sed -i "/$special_year/q" ${run_dir}/ece.info
  279. . ${run_dir}/ece.info
  280. special_restart_leg=$(printf %03d $((leg_number+1)))
  281. special_restart_leg_oasis=$(printf %03d $((leg_number+2)))
  282. # PUT HERE THE INSTRUCTIONS TO COPY THE restart files
  283. rsync -av ${run_dir}/../../archive/${special_restart_from}/restart/ifs/${special_restart_leg}/ ${run_dir}
  284. rsync -av ${run_dir}/../../archive/${special_restart_from}/restart/oasis/${special_restart_leg_oasis}/ ${run_dir}
  285. cd ${run_dir}/../../archive/${special_restart_from}/restart/nemo/${special_restart_leg}
  286. for f in *.nc; do
  287. nf=${exp_name}${f:4}
  288. cp $f ${run_dir}/$nf
  289. done
  290. cd -
  291. cd ${run_dir}
  292. for f in ${exp_name}_????????_restart_???_????.nc; do
  293. nf=${f:14}
  294. ln -s $f $nf
  295. done
  296. cd -
  297. rm -f ${run_dir}/ICMCL${special_restart_from}INIT
  298. mv ${run_dir}/ICMGG${special_restart_from}INIUA ${run_dir}/ICMGG${exp_name}INIUA
  299. mv ${run_dir}/ICMGG${special_restart_from}INIT ${run_dir}/ICMGG${exp_name}INIT
  300. mv ${run_dir}/ICMSH${special_restart_from}INIT ${run_dir}/ICMSH${exp_name}INIT
  301. fi
  302. else
  303. force_run_from_scratch=false
  304. special_restart=false
  305. fi
  306. cd ${run_dir}
  307. # -----------------------------------------------------------------------------
  308. # *** Determine the time span of this run and whether it's a restart leg
  309. # -----------------------------------------------------------------------------
  310. # Regularise the format of the start and end date of the simulation
  311. run_start_date=$(date -uR -d "${run_start_date}")
  312. run_end_date=$(date -uR -d "${run_end_date}")
  313. # -----------------------------------------------------------------------------
  314. # *** Set path to grib_set
  315. # -----------------------------------------------------------------------------
  316. grib_set=${GRIB_BIN_PATH}${GRIB_BIN_PATH:+/}grib_set
  317. # Loop over the number of legs
  318. for (( ; run_num_legs>0 ; run_num_legs-- ))
  319. do
  320. # Check for restart information file and set the current leg start date
  321. # Ignore restart information file if force_run_from_scratch is true
  322. if ${force_run_from_scratch} || ! [ -r ${ece_info_file} ]
  323. then
  324. leg_is_restart=false
  325. leg_start_date=${run_start_date}
  326. leg_number=1
  327. else
  328. leg_is_restart=true
  329. . ./${ece_info_file}
  330. leg_start_date=${leg_end_date}
  331. leg_number=$((leg_number+1))
  332. fi
  333. # Compute the end date of the current leg
  334. if [ -n "${rst_freq}" ]
  335. then
  336. leg_end_date=$(date -uR -d "${leg_start_date} + ${rst_freq}")
  337. else
  338. leg_end_date=${run_end_date}
  339. fi
  340. if [ $(date -u -d "${leg_end_date}" +%s) -ge $(date -u -d "${run_end_date}" +%s) ]
  341. then
  342. leg_end_date=${run_end_date}
  343. ifs_lastout=true
  344. fi
  345. # Some time variables needed later
  346. leg_length_sec=$(( $(date -u -d "${leg_end_date}" +%s) - $(date -u -d "${leg_start_date}" +%s) ))
  347. leg_start_sec=$(( $(date -u -d "${leg_start_date}" +%s) - $(date -u -d "${run_start_date}" +%s) ))
  348. leg_end_sec=$(( $(date -u -d "${leg_end_date}" +%s) - $(date -u -d "${run_start_date}" +%s) ))
  349. leg_start_date_yyyymmdd=$(date -u -d "${leg_start_date}" +%Y%m%d)
  350. leg_start_date_yyyymm=$(date -u -d "${leg_start_date}" +%Y%m)
  351. leg_start_date_yyyy=$(date -u -d "${leg_start_date}" +%Y)
  352. leg_end_date_yyyy=$(date -u -d "${leg_end_date}" +%Y)
  353. # Check whether there's actually time left to simulate - exit otherwise
  354. if [ ${leg_length_sec} -le 0 ]
  355. then
  356. info "Leg start date equal to or after end of simulation."
  357. info "Nothing left to do. Exiting."
  358. exit 0
  359. fi
  360. # Initial conditions saved during the run
  361. do_save_ic=false
  362. has_config save_ic && save_ic_get_config
  363. # if you do not use an option with save_ic, you must define 'do_save_ic' and
  364. # 'save_ic_date_offset' here or in ../libsave_ic.sh/save_ic_get_config()
  365. ${do_save_ic} && save_ic_define_vars
  366. # -------------------------------------------------------------------------
  367. # *** Prepare the run directory for a run from scratch
  368. # -------------------------------------------------------------------------
  369. if ! $leg_is_restart
  370. then
  371. # ---------------------------------------------------------------------
  372. # *** Check if run dir is empty. If not, and if we are allowed to do so
  373. # by ${force_run_from_scratch}, remove everything
  374. # ---------------------------------------------------------------------
  375. if $(ls * >& /dev/null)
  376. then
  377. if ${force_run_from_scratch}
  378. then
  379. rm -fr ${run_dir}/*
  380. else
  381. error "Run directory ${run_dir} not empty and \$force_run_from_scratch not set."
  382. fi
  383. fi
  384. # ---------------------------------------------------------------------
  385. # *** Copy executables of model components
  386. # *** Additionally, create symlinks to the original place for reference
  387. # ---------------------------------------------------------------------
  388. cp ${ifs_exe_file} .
  389. ln -s ${ifs_exe_file} $(basename ${ifs_exe_file}).lnk
  390. cp ${nem_exe_file} .
  391. ln -s ${nem_exe_file} $(basename ${nem_exe_file}).lnk
  392. cp ${rnf_exe_file} .
  393. ln -s ${rnf_exe_file} $(basename ${rnf_exe_file}).lnk
  394. cp ${xio_exe_file} .
  395. ln -s ${xio_exe_file} $(basename ${xio_exe_file}).lnk
  396. # ---------------------------------------------------------------------
  397. # *** Files needed for IFS (linked)
  398. # ---------------------------------------------------------------------
  399. # Initial data
  400. ln -s \
  401. ${ini_data_dir}/ifs/${ifs_grid}/${leg_start_date_yyyymmdd}/ICMGGECE3INIUA \
  402. ICMGG${exp_name}INIUA
  403. ln -s \
  404. ${ini_data_dir}/ifs/${ifs_grid}/${leg_start_date_yyyymmdd}/ICMSHECE3INIT \
  405. ICMSH${exp_name}INIT
  406. rm -f ICMGG${exp_name}INIT
  407. cp ${ini_data_dir}/ifs/${ifs_grid}/${leg_start_date_yyyymmdd}/ICMGGECE3INIT \
  408. ICMGG${exp_name}INIT
  409. # add bare_soil_albedo to ICMGG*INIT
  410. tempfile=tmp.$$
  411. ${grib_set} -s dataDate=$(date -u -d "$run_start_date" +%Y%m%d) \
  412. ${ini_data_dir}/ifs/${ifs_grid}/climate/bare_soil_albedos.grb \
  413. ${tempfile}
  414. cat ${tempfile} >> ICMGG${exp_name}INIT
  415. rm -f ${tempfile}
  416. # add land ice mask if needed
  417. if ${ifs_landice}
  418. then
  419. tempfile=tmp.$$
  420. cdo divc,10 -setcode,82 -selcode,141 ICMGG${exp_name}INIT ${tempfile}
  421. ${grib_set} -s gridType=reduced_gg ${tempfile} ${tempfile}
  422. cat ${tempfile} >> ICMGG${exp_name}INIT
  423. rm -f ${tempfile}
  424. fi
  425. # Other stuff
  426. ln -s ${ini_data_dir}/ifs/rtables/* .
  427. # Output control (ppt files)
  428. if [ ! -f ${output_control_files_dir}/pptdddddd0600 ] && [ ! -f ${output_control_files_dir}/pptdddddd0300 ];then
  429. echo "Error from ece-esm.sh: Neither the file pptdddddd0600 or pptdddddd0300 exists in the directory:"
  430. echo " " ${output_control_files_dir}
  431. exit -1
  432. fi
  433. mkdir postins
  434. cp ${output_control_files_dir}/ppt* postins/
  435. if [ -f postins/pptdddddd0600 ];then
  436. ln -s pptdddddd0600 postins/pptdddddd0000
  437. ln -s pptdddddd0600 postins/pptdddddd1200
  438. ln -s pptdddddd0600 postins/pptdddddd1800
  439. fi
  440. if [ -f postins/pptdddddd0300 ];then
  441. ln -s pptdddddd0300 postins/pptdddddd0900
  442. ln -s pptdddddd0300 postins/pptdddddd1500
  443. ln -s pptdddddd0300 postins/pptdddddd2100
  444. if [ ! -f postins/pptdddddd0600 ];then
  445. ln -s pptdddddd0300 postins/pptdddddd0000
  446. ln -s pptdddddd0300 postins/pptdddddd0600
  447. ln -s pptdddddd0300 postins/pptdddddd1200
  448. ln -s pptdddddd0300 postins/pptdddddd1800
  449. fi
  450. fi
  451. /bin/ls -1 postins/* > dirlist
  452. # ---------------------------------------------------------------------
  453. # *** Files needed for NEMO (linked)
  454. # ---------------------------------------------------------------------
  455. # Link initialisation files for matching ORCA grid
  456. for f in \
  457. bathy_meter.nc coordinates.nc \
  458. ahmcoef.nc \
  459. K1rowdrg.nc M2rowdrg.nc mask_itf.nc \
  460. decay_scale_bot.nc decay_scale_cri.nc \
  461. mixing_power_bot.nc mixing_power_cri.nc mixing_power_pyc.nc \
  462. runoff_depth.nc subbasins.nc
  463. do
  464. [ -f ${ini_data_dir}/nemo/initial/${nem_grid}/$f ] && ln -s ${ini_data_dir}/nemo/initial/${nem_grid}/$f
  465. done
  466. # Link geothermal heating file (independent of grid) and matching weight file
  467. ln -s ${ini_data_dir}/nemo/initial/Goutorbe_ghflux.nc
  468. ln -s ${ini_data_dir}/nemo/initial/weights_Goutorbe1_2_orca${nem_res_hor}_bilinear.nc
  469. # Link the salinity climatology file (needed for diagnostics)
  470. ln -s ${ini_data_dir}/nemo/climatology/${nem_grid}/sali_ref_clim_monthly.nc
  471. # Link either restart files or climatology files for the initial state
  472. if $(has_config nemo:start_from_restart)
  473. then
  474. # When linking restart files, we accept three options:
  475. # (1) Merged files for ocean and ice, i.e.
  476. # restart_oce.nc and restart_ice.nc
  477. # (2) One-file-per-MPI-rank, i.e.
  478. # restart_oce_????.nc and restart_ice_????.nc
  479. # No check is done whether the number of restart files agrees
  480. # with the number of MPI ranks for NEMO!
  481. # (3) One-file-per-MPI-rank with a prefix, i.e.
  482. # <exp_name>_<time_step>_restart_oce_????.nc (similar for the ice)
  483. # The prefix is ignored.
  484. # The code assumes that one of the options can be applied! If more
  485. # options are applicable, the first is chosen. If none of the
  486. # options apply, NEMO will crash with missing restart file.
  487. if ls -U ${nem_restart_file_path}/restart_[oi]ce.nc > /dev/null 2>&1
  488. then
  489. ln -s ${nem_restart_file_path}/restart_[oi]ce.nc ./
  490. elif ls -U ${nem_restart_file_path}/restart_[oi]ce_????.nc > /dev/null 2>&1
  491. then
  492. ln -s ${nem_restart_file_path}/restart_[oi]ce_????.nc ./
  493. else
  494. for f in ${nem_restart_file_path}/????_????????_restart_[oi]ce_????.nc
  495. do
  496. ln -s $f $(echo $f | sed 's/.*_\(restart_[oi]ce_....\.nc\)/\1/')
  497. done
  498. fi
  499. else
  500. # Temperature and salinity files for initialisation
  501. ln -s ${ini_data_dir}/nemo/climatology/absolute_salinity_WOA13_decav_Reg1L75_clim.nc
  502. ln -s ${ini_data_dir}/nemo/climatology/conservative_temperature_WOA13_decav_Reg1L75_clim.nc
  503. ln -s ${ini_data_dir}/nemo/climatology/weights_WOA13d1_2_orca${nem_res_hor}_bilinear.nc
  504. # Grid dependent runoff files
  505. case ${nem_grid} in
  506. ORCA1*) ln -s ${ini_data_dir}/nemo/climatology/runoff-icb_DaiTrenberth_Depoorter_ORCA1_JD.nc ;;
  507. ORCA025*) ln -s ${ini_data_dir}/nemo/climatology/ORCA_R025_runoff_v1.1.nc ;;
  508. esac
  509. fi
  510. # for ocean_nudging
  511. if $(has_config nemo:ocenudg) ; then
  512. ln -fs ${ini_data_dir}/nemo/oce_nudg/resto.nc ./
  513. fi
  514. # XIOS files
  515. . ${ctrl_file_dir}/iodef.xml.sh > iodef.xml
  516. ln -s ${ctrl_file_dir}/context_nemo.xml
  517. ln -s ${ctrl_file_dir}/domain_def_nemo.xml
  518. ln -s ${ctrl_file_dir}/axis_def_nemo.xml
  519. ln -s ${ctrl_file_dir}/grids_def_nemo.xml
  520. ln -s ${ctrl_file_dir}/field_def_nemo-lim.xml
  521. ln -s ${ctrl_file_dir}/field_def_nemo-opa.xml
  522. ln -s ${ctrl_file_dir}/field_def_nemo-pisces.xml
  523. ln -s ${ctrl_file_dir}/field_def_nemo-inerttrc.xml
  524. ln -s ${output_control_files_dir}/file_def_nemo-lim3.xml file_def_nemo-lim.xml
  525. ln -s ${output_control_files_dir}/file_def_nemo-opa.xml
  526. ln -s ${output_control_files_dir}/file_def_nemo-pisces.xml
  527. if [ -f ${ini_data_dir}/xios/ORCA${nem_res_hor}/coordinates_xios.nc ]
  528. then
  529. cp ${ini_data_dir}/xios/ORCA${nem_res_hor}/coordinates_xios.nc ./
  530. else
  531. info "File 'coordinates_xios.nc' not found. NEMO can not be run with land domain removal!"
  532. fi
  533. # ---------------------------------------------------------------------
  534. # *** Files needed for the Runoff mapper (linked)
  535. # ---------------------------------------------------------------------
  536. ln -s ${ini_data_dir}/runoff-mapper/runoff_maps.nc
  537. # ---------------------------------------------------------------------
  538. # *** Files needed for OASIS (linked)
  539. # ---------------------------------------------------------------------
  540. oas_grid_dir=${ini_data_dir}/oasis/T${ifs_res_hor}-ORCA${nem_res_hor}
  541. # Name table file
  542. ln -s ${ini_data_dir}/oasis/cf_name_table.txt
  543. # Grid definition files
  544. ln -s ${oas_grid_dir}/areas.nc
  545. ln -s ${oas_grid_dir}/grids.nc
  546. ln -s ${oas_grid_dir}/masks.nc
  547. # Weight files
  548. case ${ifs_res_hor} in
  549. 159) oas_agrd=080
  550. ;;
  551. 255) oas_agrd=128
  552. ;;
  553. 511) oas_agrd=256
  554. ;;
  555. 799) oas_agrd=400
  556. ;;
  557. *) error "Unsupported horizontal resolution (IFS): ${ifs_res_hor}"
  558. ;;
  559. esac
  560. case ${nem_res_hor} in
  561. 1) oas_ogrd=O1t0
  562. ;;
  563. 025) oas_ogrd=Ot25
  564. ;;
  565. *) error "Unsupported horizontal resolution (NEMO): ${nem_res_hor}"
  566. ;;
  567. esac
  568. if ${oas_link_weights}
  569. then
  570. for f in ${oas_grid_dir}/rmp_????_to_????_GAUSWGT.nc
  571. do
  572. ln -s $f
  573. done
  574. fi
  575. for f in ${oas_rst_files}
  576. do
  577. cp ${oas_grid_dir}/rst/$f .
  578. done
  579. else # i.e. $leg_is_restart == true
  580. # ---------------------------------------------------------------------
  581. # *** Remove all leftover output files from previous legs
  582. # ---------------------------------------------------------------------
  583. # IFS files
  584. rm -f ICM{SH,GG}${exp_name}+??????
  585. # NEMO files
  586. rm -f ${exp_name}_??_????????_????????_{grid_U,grid_V,grid_W,grid_T,icemod,SBC,scalar,SBC_scalar,diad_T}.nc
  587. fi # ! $leg_is_restart
  588. #--------------------------------------------------------------------------
  589. # *** Surface restoring and ocean nudging options
  590. #--------------------------------------------------------------------------
  591. #for ocean_nudging
  592. if $(has_config nemo:ocenudg) ; then
  593. ln -fs ${ini_data_dir}/nemo/oce_nudg/temp_sal*.nc ./
  594. fi
  595. #for surface restoring
  596. if $(has_config nemo:surfresto) ; then
  597. ln -fs ${ini_data_dir}/nemo/surface_restoring/sss_restore_data*.nc ./
  598. ln -fs ${ini_data_dir}/nemo/surface_restoring/sst_restore_data*.nc ./
  599. ln -fs ${ini_data_dir}/nemo/surface_restoring/mask_restore*.nc ./
  600. fi
  601. # -------------------------------------------------------------------------
  602. # *** Remove land grid-points
  603. # -------------------------------------------------------------------------
  604. if $(has_config nemo:elpin)
  605. then
  606. if [ ! -f coordinates_xios.nc ]
  607. then
  608. error "ELpIN requested, but file 'coordinates_xios.nc' was not found"
  609. fi
  610. jpns=($(${ecearth_src_dir}/util/ELPiN/ELPiNv2.cmd ${nem_numproc}))
  611. info "nemo domain decompostion from ELpIN: ${jpns[@]}"
  612. nem_numproc=${jpns[0]}
  613. nem_jpni=${jpns[1]}
  614. nem_jpnj=${jpns[2]}
  615. else
  616. info "nemo original domain decomposition (not using ELPiN)"
  617. fi
  618. # -------------------------------------------------------------------------
  619. # *** Initial conditions saved during the run
  620. # -------------------------------------------------------------------------
  621. ${do_save_ic} && save_ic_prepare_output
  622. # -------------------------------------------------------------------------
  623. # *** Create some control files
  624. # -------------------------------------------------------------------------
  625. # IFS frequency output for namelist
  626. if [ -f postins/pptdddddd0300 ]
  627. then
  628. ifs_output_freq=$(( 3 * 3600 / ifs_time_step_sec ))
  629. elif [ -f postins/pptdddddd0600 ]
  630. then
  631. ifs_output_freq=$(( 6 * 3600 / ifs_time_step_sec ))
  632. else
  633. error "IFS output frequency undefined."
  634. fi
  635. # IFS, NEMO, LIM namelist and OASIS namcouple files
  636. . ${ctrl_file_dir}/namelist.ifs.sh > fort.4
  637. . ${ctrl_file_dir}/namelist.nemo.ref.sh > namelist_ref
  638. . ${ctrl_file_dir}/namelist.nemo-${nem_grid}-coupled.cfg.sh > namelist_cfg
  639. . ${ctrl_file_dir}/namelist.lim3.ref.sh > namelist_ice_ref
  640. . ${ctrl_file_dir}/namelist.lim3-${nem_grid}.cfg.sh > namelist_ice_cfg
  641. . ${ctrl_file_dir}/namelist.runoffmapper.sh > namelist.runoffmapper
  642. . ${ctrl_file_dir}/namcouple.sh > namcouple
  643. # -------------------------------------------------------------------------
  644. # *** Create ICMCL file with vegetation fields
  645. # not needed if LPJG is used with feedback
  646. # -------------------------------------------------------------------------
  647. tempfile=tmp.$$
  648. case ${ifs_veg_source} in
  649. "ifs" )
  650. # Vegetation from IFS (climatology)
  651. icmclfile=${ini_data_dir}/ifs/${ifs_grid}/climate/ICMCL_ONLY_VEG_PD
  652. # Create data for december, the year before the leg starts
  653. ${grib_set} \
  654. -s dataDate=$(printf "%04d" $((leg_start_date_yyyy-1)))1215 \
  655. ${icmclfile}-12 ICMCL${exp_name}INIT
  656. # Create data for all month in the years of the leg
  657. for (( y=${leg_start_date_yyyy} ; y<=${leg_end_date_yyyy} ; y++ ))
  658. do
  659. yy=$(printf "%04d" $y)
  660. for m in {1..12}
  661. do
  662. mm=$(printf "%02d" $m)
  663. ${grib_set} -s dataDate=${yy}${mm}15 ${icmclfile}-${mm} ${tempfile}
  664. cat ${tempfile} >> ICMCL${exp_name}INIT
  665. done
  666. done
  667. # Create data for january, the year after the leg ends
  668. ${grib_set} \
  669. -s dataDate=$(printf "%04d" $((leg_end_date_yyyy+1)))0115 \
  670. ${icmclfile}-01 ${tempfile}
  671. cat ${tempfile} >> ICMCL${exp_name}INIT
  672. ;;
  673. "era20c" )
  674. # Vegetation from an off-line LPJG run forced with ERA20C (v16)
  675. rm -f ICMCL${exp_name}INIT
  676. # Create data for all years of the leg, including one year
  677. # before and one year after
  678. for (( yr=leg_start_date_yyyy-1 ; yr<=leg_end_date_yyyy+1 ; yr+=1 ))
  679. do
  680. if [ $ifs_cmip_fixyear -le 0 ]
  681. then
  682. cat ${ini_data_dir}/ifs/${ifs_grid}/icmcl_v16/icmcl_$yr.grb >> ICMCL${exp_name}INIT
  683. else
  684. # Fixed year forcing, requires cdo! (only when not using ifs_veg_source=custom_exp*)
  685. # If cdo is not available at runtime you need to fix proper
  686. # icmcl files beforehand and use them here
  687. cdo setyear,$yr ${ini_data_dir}/ifs/${ifs_grid}/icmcl_v16/icmcl_${ifs_cmip_fixyear}.grb ${tempfile}
  688. cat ${tempfile} >> ICMCL${exp_name}INIT
  689. fi
  690. done
  691. ;;
  692. * )
  693. error "Vegetation from ${ifs_veg_source} not implemented"
  694. ;;
  695. esac
  696. # Clean up
  697. rm -f ${tempfile}
  698. # -------------------------------------------------------------------------
  699. # *** Link the appropriate NEMO restart files of the previous leg
  700. # -------------------------------------------------------------------------
  701. if $leg_is_restart && ! $special_restart
  702. then
  703. ns=$(printf %08d $(( leg_start_sec / nem_time_step_sec - nem_restart_offset )))
  704. for (( n=0 ; n<nem_numproc ; n++ ))
  705. do
  706. np=$(printf %04d ${n})
  707. ln -fs ${exp_name}_${ns}_restart_oce_${np}.nc restart_oce_${np}.nc
  708. ln -fs ${exp_name}_${ns}_restart_ice_${np}.nc restart_ice_${np}.nc
  709. done
  710. # Make sure there are no global restart files
  711. # If links are found, they will be removed. We are cautious and do
  712. # _not_ remove real files! However, if real global restart files are
  713. # present, NEMO/LIM will stop because time stamps will not match.
  714. [ -h restart_oce.nc ] && rm restart_oce.nc
  715. [ -h restart_ice.nc ] && rm restart_ice.nc
  716. fi
  717. # -------------------------------------------------------------------------
  718. # *** Remove some OASIS files of the previous leg
  719. # -------------------------------------------------------------------------
  720. if $leg_is_restart
  721. then
  722. rm -f anaisout_*
  723. fi
  724. # -------------------------------------------------------------------------
  725. # *** Start the run
  726. # -------------------------------------------------------------------------
  727. export DR_HOOK_IGNORE_SIGNALS='-1'
  728. export CPLNG='active'
  729. # Use the launch function from the platform configuration file
  730. t1=$(date +%s)
  731. # Compute and check the node distribution
  732. info "======================="
  733. info "Node/proc distribution:"
  734. info "-----------------------"
  735. info "IFS: ${ifs_numproc}"
  736. info "NEMO: ${nem_numproc}"
  737. info "XIOS: ${xio_numproc}"
  738. info "RUNOFF: ${rnf_numproc}"
  739. info "======================="
  740. pwd
  741. info "======================="
  742. info "mpirun \
  743. -n ${xio_numproc} ./$(basename ${xio_exe_file}) : \
  744. -n ${nem_numproc} ./$(basename ${nem_exe_file}) : \
  745. -n ${ifs_numproc} ./$(basename ${ifs_exe_file}) -v ecmwf -e ${exp_name} : \
  746. -n ${rnf_numproc} ./$(basename ${rnf_exe_file})"
  747. info "======================="
  748. #exit
  749. mpirun \
  750. -n ${xio_numproc} ./$(basename ${xio_exe_file}) : \
  751. -n ${nem_numproc} ./$(basename ${nem_exe_file}) : \
  752. -n ${ifs_numproc} ./$(basename ${ifs_exe_file}) -v ecmwf -e ${exp_name} : \
  753. -n ${rnf_numproc} ./$(basename ${rnf_exe_file})
  754. t2=$(date +%s)
  755. tr=$(date -u -d "0 -$t1 sec + $t2 sec" +%T)
  756. # -------------------------------------------------------------------------
  757. # *** Check for signs of success
  758. # Note the tests provide no guarantee that things went fine! They are
  759. # just based on the IFS and NEMO log files. More tests (e.g. checking
  760. # restart files) could be implemented.
  761. # -------------------------------------------------------------------------
  762. # Checking for IFS success
  763. if [ -f ifs.stat ]
  764. then
  765. if [ "$(awk 'END{print $3}' ifs.stat)" == "CNT0" ]
  766. then
  767. info "Leg successfully completed according to IFS log file 'ifs.stat'."
  768. else
  769. error "Leg not completed according to IFS log file 'ifs.stat'."
  770. fi
  771. else
  772. error "IFS log file 'ifs.stat' not found after run."
  773. fi
  774. # Check for NEMO success
  775. if [ -f ocean.output ]
  776. then
  777. if [ "$(sed -n '/New day/h; ${g;s:.*\([0-9/]\{10\}\).*:\1:;p;}' ocean.output)" == "$(date -u -d "${leg_end_date} - 1 day" +%Y/%m/%d)" ]
  778. then
  779. info "Leg successfully completed according to NEMO log file 'ocean.output'."
  780. else
  781. error "Leg not completed according to NEMO log file 'ocean.output'."
  782. fi
  783. else
  784. error "NEMO log file 'ocean.output' not found after run."
  785. fi
  786. # -------------------------------------------------------------------------
  787. # *** Post-process initial conditions saved during the run if requested
  788. # -------------------------------------------------------------------------
  789. ${do_save_ic} && save_ic_postproc
  790. # -------------------------------------------------------------------------
  791. # *** Move IFS output files to archive directory
  792. # -------------------------------------------------------------------------
  793. outdir="${archive_dir}/output/ifs/$(printf %03d $((leg_number)))"
  794. mkdir -p ${outdir}
  795. prv_leg=$(printf %03d $((leg_number-1)))
  796. # This takes care of a special IFS feature: The output for the last time
  797. # step of each leg is written at the first time step of the new leg. The
  798. # following code makes sure that the output is appended to the appropriate
  799. # file. Since GRIB files are just streams, its done with a simple cat
  800. # command.
  801. for f in ICMSH${exp_name}+?????? ICMGG${exp_name}+??????
  802. do
  803. if [ -f output/ifs/${prv_leg}/${f} ]
  804. then
  805. cat ${f} >> output/ifs/${prv_leg}/${f}
  806. rm -f ${f}
  807. else
  808. mv ${f} ${outdir}
  809. fi
  810. done
  811. # -------------------------------------------------------------------------
  812. # *** Move NEMO output files to archive directory
  813. # -------------------------------------------------------------------------
  814. outdir="${archive_dir}/output/nemo/$(printf %03d $((leg_number)))"
  815. mkdir -p ${outdir}
  816. for v in grid_U grid_V grid_W grid_T icemod SBC scalar SBC_scalar diad_T \
  817. grid_T_2D grid_U_2D grid_V_2D grid_W_2D grid_T_3D grid_U_3D grid_V_3D grid_W_3D \
  818. grid_1point grid_T_3D_ncatice vert_sum \
  819. grid_ptr_W_3basin_3D grid_ptr_T_3basin_2D grid_ptr_T_2D \
  820. zoom_700_sum zoom_300_sum zoom_2000_sum
  821. do
  822. for f in ${exp_name}_*_????????_????????_*${v}.nc
  823. do
  824. test -f $f && mv $f $outdir/
  825. done
  826. done
  827. # -------------------------------------------------------------------------
  828. # *** Move IFS restart files to archive directory
  829. # -------------------------------------------------------------------------
  830. if $leg_is_restart
  831. then
  832. outdir="${archive_dir}/restart/ifs/$(printf %03d $((leg_number)))"
  833. mkdir -p ${outdir}
  834. # Figure out the time part of the restart files (cf. CTIME on rcf files)
  835. # NOTE: Assuming that restarts are at full days (time=0000) only!
  836. nd="$(printf %06d $((leg_start_sec/(24*3600))))0000"
  837. mv srf${nd}.???? ${outdir}
  838. fi
  839. # -------------------------------------------------------------------------
  840. # *** Move NEMO restart files to archive directory
  841. # -------------------------------------------------------------------------
  842. if $leg_is_restart && $(has_config nemo)
  843. then
  844. outdir="${archive_dir}/restart/nemo/$(printf %03d $((leg_number)))"
  845. mkdir -p ${outdir}
  846. ns=$(printf %08d $(( leg_start_sec / nem_time_step_sec - nem_restart_offset )))
  847. for f in oce ice
  848. do
  849. mv ${exp_name}_${ns}_restart_${f}_????.nc ${outdir}
  850. done
  851. fi
  852. # -------------------------------------------------------------------------
  853. # *** Copy OASIS restart files to archive directory
  854. # NOTE: These files are copied and not moved as they are used in the
  855. # next leg!
  856. # Note also that the OASIS restart files present at the end of
  857. # the leg correspond to the start of the next leg!
  858. # -------------------------------------------------------------------------
  859. outdir="${archive_dir}/restart/oasis/$(printf %03d $((leg_number+1)))"
  860. mkdir -p ${outdir}
  861. for f in ${oas_rst_files}
  862. do
  863. test -f ${f} && cp ${f} ${outdir}
  864. done
  865. # -------------------------------------------------------------------------
  866. # *** Copy rcf files to the archive directory (of the next leg!)
  867. # -------------------------------------------------------------------------
  868. outdir="${archive_dir}/restart/ifs/$(printf %03d $((leg_number+1)))"
  869. mkdir -p ${outdir}
  870. for f in rcf
  871. do
  872. test -f ${f} && cp ${f} ${outdir}
  873. done
  874. # -------------------------------------------------------------------------
  875. # *** Move log files to archive directory
  876. # -------------------------------------------------------------------------
  877. outdir="${archive_dir}/log/$(printf %03d $((leg_number)))"
  878. mkdir -p ${outdir}
  879. for f in \
  880. ifs.log ifs.stat fort.4 ocean.output \
  881. time.step solver.stat \
  882. nout.000000 debug.??.?????? A*_??.nc O*_??.nc
  883. do
  884. test -f ${f} && mv ${f} ${outdir}
  885. done
  886. # -------------------------------------------------------------------------
  887. # *** Write the restart control file
  888. # -------------------------------------------------------------------------
  889. # Compute CPMIP performance
  890. sypd="$(cpmip_sypd $leg_length_sec $(($t2 - $t1)))"
  891. chpsy="$(cpmip_chpsy $leg_length_sec $(($t2 - $t1)) $(($ifs_numproc + $nem_numproc + $rnf_numproc + $xio_numproc)))"
  892. echo "#" | tee -a ${ece_info_file}
  893. echo "# Finished leg at `date '+%F %T'` after ${tr} (hh:mm:ss)" \
  894. | tee -a ${ece_info_file}
  895. echo "# CPMIP performance: $sypd SYPD $chpsy CHPSY"| tee -a ${ece_info_file}
  896. echo "leg_number=${leg_number}" | tee -a ${ece_info_file}
  897. echo "leg_start_date=\"${leg_start_date}\"" | tee -a ${ece_info_file}
  898. echo "leg_end_date=\"${leg_end_date}\"" | tee -a ${ece_info_file}
  899. # Need to reset force_run_from_scratch in order to avoid destroying the next leg
  900. force_run_from_scratch=false
  901. special_restart=false
  902. done # loop over legs
  903. # -----------------------------------------------------------------------------
  904. # *** Platform dependent finalising of the run
  905. # -----------------------------------------------------------------------------
  906. #finalise
  907. if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
  908. then
  909. info "Resubmitting job for leg $((leg_number+1))"
  910. # Need to go to start_dir to find the run script
  911. cd ${start_dir}
  912. # Submit command
  913. echo "sbatch -N ${SLURM_JOB_NUM_NODES-"1"} -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  914. -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -d ${SLURM_JOB_ID-"id"} \
  915. ./${SLURM_JOB_NAME-"run"}.sh"
  916. # Note: This does not work if you specify a job name with sbatch -J jobname!
  917. sbatch -N ${SLURM_JOB_NUM_NODES-"1"} \
  918. -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  919. -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
  920. -d ${SLURM_JOB_ID-"id"} \
  921. ./${SLURM_JOB_NAME-"run"}.sh
  922. fi
  923. exit 0