|
|
@@ -1,4 +1,4 @@
|
|
|
-#!/bin/bash
|
|
|
+#!/usr/bin/env bash
|
|
|
|
|
|
#
|
|
|
# Job options
|
|
|
@@ -9,13 +9,18 @@
|
|
|
#
|
|
|
#SBATCH --nodes=4
|
|
|
#SBATCH --exclusive
|
|
|
-#SBATCH --ntasks-per-node=123
|
|
|
+#SBATCH --ntasks-per-node=125
|
|
|
+##SBATCH --partition=medium
|
|
|
+##SBATCH --exclude=cnm016
|
|
|
#SBATCH --partition=debug
|
|
|
+##SBATCH --partition=batch
|
|
|
+#SBATCH --mail-user=name@uclouvain.be
|
|
|
+#SBATCH --mail-type=ALL
|
|
|
#
|
|
|
set -ueo pipefail
|
|
|
#
|
|
|
LOCAL_NODES=4
|
|
|
-LOCAL_TASKS=492
|
|
|
+LOCAL_TASKS=500
|
|
|
#
|
|
|
stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
|
|
|
exec > ${stdout_file}
|
|
|
@@ -91,7 +96,7 @@ nem_grid=ORCA1L75
|
|
|
|
|
|
# Restart frequency. Use any (reasonable) number and time unit you want.
|
|
|
# For runs without restart, leave this variable empty
|
|
|
-rst_freq="6 months"
|
|
|
+rst_freq="1 year"
|
|
|
|
|
|
# Number of restart legs to be run in one go
|
|
|
run_num_legs=1
|
|
|
@@ -116,54 +121,16 @@ use_machinefile=%USE_MACHINEFILE%
|
|
|
# This file is used to store information about restarts
|
|
|
ece_info_file="ece.info"
|
|
|
|
|
|
+forcing_files=(
|
|
|
+"* => ."
|
|
|
+)
|
|
|
+
|
|
|
# -----------------------------------------------------------------------------
|
|
|
# *** Read platform dependent configuration
|
|
|
# -----------------------------------------------------------------------------
|
|
|
. ${start_dir}/ecconf.cfg
|
|
|
|
|
|
-# This function should configure all settings/modules needed to
|
|
|
-# later prepare the EC-Earth run directory and set variables used
|
|
|
-# in the run script
|
|
|
-
|
|
|
-# SCRATCH is not defined in MN3, define it here
|
|
|
-# and also make sure it is defined when compiling
|
|
|
-export SCRATCH=/gpfs/scratch/acad/ecearth/${USER}
|
|
|
-
|
|
|
-# Configure paths for building/running EC-Earth
|
|
|
-ecearth_src_dir=${HOME}/models/ecearth_3.3.3.2/sources
|
|
|
-run_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/run/${exp_name}
|
|
|
-ini_data_dir=/gpfs/scratch/acad/ecearth/data/bsc32/v3.3.3.2/inidata
|
|
|
-archive_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/archive/${exp_name}
|
|
|
-
|
|
|
-# File for standard output.
|
|
|
-# NOTE: This will be modified for restart jobs!
|
|
|
-stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
|
|
|
-
|
|
|
-# Resubmit this job for automatic restarts? [true/false]
|
|
|
-# Also, add options for the resubmit command here.
|
|
|
-resubmit_job=true
|
|
|
-resubmit_opt=""
|
|
|
-
|
|
|
-module purge
|
|
|
-module load EasyBuild/2023a
|
|
|
-MODULEPATH=$MODULEPATH:/gpfs/projects/acad/ecearth/softs/easybuild/modules/all
|
|
|
-module load netCDF-Fortran/4.6.1-iompi-2023a
|
|
|
-module load imkl/2023.1.0
|
|
|
-module load grib_api/1.24.0-iompi-2023a
|
|
|
-module load CDO/1.9.10-iompi-2023a
|
|
|
-
|
|
|
-# Configure grib api paths
|
|
|
-export GRIB_DEFINITION_PATH=${HOME}/models/ecearth_3.3.3.2/sources/util/grib_table_126:${EBROOTGRIB_API}/share/grib_api/definitions
|
|
|
-export GRIB_SAMPLES_PATH=${EBROOTGRIB_API}/share/grib_api/ifs_samples/grib1
|
|
|
-export GRIB_BIN_PATH=${EBROOTGRIB_API}/bin
|
|
|
-
|
|
|
-# Configure number of processors per node
|
|
|
-proc_per_node=128
|
|
|
-
|
|
|
-# Use machinefiles or not
|
|
|
-[[ `echo "$use_machinefile" | tr '[:upper:]' '[:lower:]'` == true ]] && use_machinefile=true || use_machinefile=false
|
|
|
-
|
|
|
-ulimit -s unlimited
|
|
|
+configure
|
|
|
|
|
|
# -----------------------------------------------------------------------------
|
|
|
# *** Time step settings
|
|
|
@@ -224,7 +191,7 @@ ifs_ddh_freq=$(( 120 * 3600 / ifs_time_step_sec ))
|
|
|
export ifs_res_hor=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\1:')
|
|
|
ifs_res_ver=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\2:')
|
|
|
|
|
|
-ifs_numproc=392
|
|
|
+ifs_numproc=400
|
|
|
|
|
|
ifs_exe_file=${ecearth_src_dir}/ifs-${ifs_version}/bin/ifsmaster-${build_arch}
|
|
|
|
|
|
@@ -256,7 +223,7 @@ ifs_cmip6piaer=TRUE
|
|
|
# 1850 --> 2014 (o3_histo)
|
|
|
# !! other scenarios (> 2014), default: SSP3-7.0
|
|
|
# SSP1-1.9, SSP1-2.6, SSP1-2.6-Ext, SSP2-4.5, SSP3-7.0, SSP3-LowNTCF, SSP4-3.4, SSP5-3.4-OS, SSP4-6.0, SSP5-3.4-OS-Ext, SSP5-8.5, SSP5-8.5-Ext
|
|
|
-ifs_cmip6_scenario=historical
|
|
|
+export ifs_cmip6_scenario=historical
|
|
|
|
|
|
# Enable optional COVID-19 scenarios, will enforce ifs_cmip6_scenario=SSP2-4.5
|
|
|
ifs_covid19=FALSE
|
|
|
@@ -960,6 +927,11 @@ do
|
|
|
ln -s ${ini_data_dir}/nemo/oce_nudg/resto.nc
|
|
|
fi
|
|
|
|
|
|
+ # for ocean wind stress anomalies
|
|
|
+ for file in "${forcing_files[@]}"; do
|
|
|
+ [[ ! -e ${file#*> } || "$file" == \** ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/nemo/forcing/wind_stress_anom/$file")
|
|
|
+ done
|
|
|
+
|
|
|
# XIOS files
|
|
|
. ${ctrl_file_dir}/iodef.xml.sh > iodef.xml
|
|
|
ln -s ${ctrl_file_dir}/context_nemo.xml
|
|
|
@@ -1995,23 +1967,6 @@ done # loop over legs
|
|
|
# -----------------------------------------------------------------------------
|
|
|
# *** Platform dependent finalising of the run
|
|
|
# -----------------------------------------------------------------------------
|
|
|
-#finalise
|
|
|
-
|
|
|
-if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
|
|
|
-then
|
|
|
- info "Resubmitting job for leg $((leg_number+1))"
|
|
|
- # Need to go to start_dir to find the run script
|
|
|
- cd ${start_dir}
|
|
|
- # Submit command
|
|
|
- echo "sbatch -N ${SLURM_JOB_NUM_NODES-"1"} -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
|
|
|
- -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -d ${SLURM_JOB_ID-"id"} \
|
|
|
- ./${SLURM_JOB_NAME-"run"}.sh"
|
|
|
- # Note: This does not work if you specify a job name with sbatch -J jobname!
|
|
|
- sbatch -N ${SLURM_JOB_NUM_NODES-"1"} \
|
|
|
- -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
|
|
|
- -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
|
|
|
- -d ${SLURM_JOB_ID-"id"} \
|
|
|
- ./${SLURM_JOB_NAME-"run"}.sh
|
|
|
-fi
|
|
|
+finalise
|
|
|
|
|
|
exit 0
|