Browse Source

runtime/Adding wind stress anomalies feature

Pierre-Yves Barriat 9 months ago
parent
commit
ffa6d38fab

+ 22 - 67
runtime/classic/EC00.sh

@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 #
 # Job options 
@@ -9,13 +9,18 @@
 #
 #SBATCH --nodes=4
 #SBATCH --exclusive
-#SBATCH --ntasks-per-node=123
+#SBATCH --ntasks-per-node=125
+##SBATCH --partition=medium
+##SBATCH --exclude=cnm016
 #SBATCH --partition=debug
+##SBATCH --partition=batch
+#SBATCH --mail-user=name@uclouvain.be
+#SBATCH --mail-type=ALL
 #
 set -ueo pipefail
 #
 LOCAL_NODES=4
-LOCAL_TASKS=492
+LOCAL_TASKS=500
 #
 stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
 exec > ${stdout_file}
@@ -91,7 +96,7 @@ nem_grid=ORCA1L75
 
 # Restart frequency. Use any (reasonable) number and time unit you want.
 # For runs without restart, leave this variable empty
-rst_freq="6 months"
+rst_freq="1 year"
 
 # Number of restart legs to be run in one go
 run_num_legs=1
@@ -116,54 +121,16 @@ use_machinefile=%USE_MACHINEFILE%
 # This file is used to store information about restarts
 ece_info_file="ece.info"
 
+forcing_files=(
+"* => ."
+)
+
 # -----------------------------------------------------------------------------
 # *** Read platform dependent configuration
 # -----------------------------------------------------------------------------
 . ${start_dir}/ecconf.cfg
 
-# This function should configure all settings/modules needed to
-# later prepare the EC-Earth run directory and set variables used
-# in the run script
-
-# SCRATCH is not defined in MN3, define it here
-# and also make sure it is defined when compiling
-export SCRATCH=/gpfs/scratch/acad/ecearth/${USER}
-
-# Configure paths for building/running EC-Earth
-ecearth_src_dir=${HOME}/models/ecearth_3.3.3.2/sources
-run_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/run/${exp_name}
-ini_data_dir=/gpfs/scratch/acad/ecearth/data/bsc32/v3.3.3.2/inidata
-archive_dir=/gpfs/scratch/acad/ecearth/${USER}/ecearth/archive/${exp_name}
-
-# File for standard output.
-# NOTE: This will be modified for restart jobs!
-stdout_file=${SLURM_SUBMIT_DIR-$PWD}/${SLURM_JOB_NAME-"local"}_${SLURM_JOB_ID-"id"}.log
-
-# Resubmit this job for automatic restarts? [true/false]
-# Also, add options for the resubmit command here.
-resubmit_job=true
-resubmit_opt=""
-
-module purge
-module load EasyBuild/2023a
-MODULEPATH=$MODULEPATH:/gpfs/projects/acad/ecearth/softs/easybuild/modules/all
-module load netCDF-Fortran/4.6.1-iompi-2023a
-module load imkl/2023.1.0 
-module load grib_api/1.24.0-iompi-2023a
-module load CDO/1.9.10-iompi-2023a
-
-# Configure grib api paths
-export GRIB_DEFINITION_PATH=${HOME}/models/ecearth_3.3.3.2/sources/util/grib_table_126:${EBROOTGRIB_API}/share/grib_api/definitions
-export GRIB_SAMPLES_PATH=${EBROOTGRIB_API}/share/grib_api/ifs_samples/grib1
-export GRIB_BIN_PATH=${EBROOTGRIB_API}/bin
-
-# Configure number of processors per node
-proc_per_node=128
-
-# Use machinefiles or not
-[[ `echo "$use_machinefile" | tr '[:upper:]' '[:lower:]'` == true ]] && use_machinefile=true || use_machinefile=false
-
-ulimit -s unlimited
+configure
 
 # -----------------------------------------------------------------------------
 # *** Time step settings
@@ -224,7 +191,7 @@ ifs_ddh_freq=$(( 120 * 3600 / ifs_time_step_sec ))
 export ifs_res_hor=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\1:')
 ifs_res_ver=$(echo ${ifs_grid} | sed 's:T\([0-9]\+\)L\([0-9]\+\):\2:')
 
-ifs_numproc=392
+ifs_numproc=400
 
 ifs_exe_file=${ecearth_src_dir}/ifs-${ifs_version}/bin/ifsmaster-${build_arch}
 
@@ -256,7 +223,7 @@ ifs_cmip6piaer=TRUE
 # 1850 --> 2014 (o3_histo)
 # !! other scenarios (> 2014), default: SSP3-7.0
 # SSP1-1.9, SSP1-2.6, SSP1-2.6-Ext, SSP2-4.5, SSP3-7.0, SSP3-LowNTCF, SSP4-3.4, SSP5-3.4-OS, SSP4-6.0, SSP5-3.4-OS-Ext, SSP5-8.5, SSP5-8.5-Ext
-ifs_cmip6_scenario=historical
+export ifs_cmip6_scenario=historical
 
 # Enable optional COVID-19 scenarios, will enforce ifs_cmip6_scenario=SSP2-4.5
 ifs_covid19=FALSE
@@ -960,6 +927,11 @@ do
                 ln -s ${ini_data_dir}/nemo/oce_nudg/resto.nc
             fi
 
+	    # for ocean wind stress anomalies
+	    for file in "${forcing_files[@]}"; do
+		    [[ ! -e ${file#*> } ||  "$file" == \** ]] && ln -sf $(sed 's/ *=> */ /' <<< "${ini_data_dir}/nemo/forcing/wind_stress_anom/$file")
+	    done
+
             # XIOS files
             . ${ctrl_file_dir}/iodef.xml.sh > iodef.xml
             ln -s ${ctrl_file_dir}/context_nemo.xml
@@ -1995,23 +1967,6 @@ done # loop over legs
 # -----------------------------------------------------------------------------
 # *** Platform dependent finalising of the run
 # -----------------------------------------------------------------------------
-#finalise
-
-if ${resubmit_job} && [ $(date -d "${leg_end_date}" +%s) -lt $(date -d "${run_end_date}" +%s) ]
-then
-    info "Resubmitting job for leg $((leg_number+1))"
-    # Need to go to start_dir to find the run script
-    cd ${start_dir}
-    # Submit command
-    echo "sbatch -N ${SLURM_JOB_NUM_NODES-"1"} -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
-              -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -d ${SLURM_JOB_ID-"id"} \
-              ./${SLURM_JOB_NAME-"run"}.sh"
-        # Note: This does not work if you specify a job name with sbatch -J jobname!
-        sbatch -N ${SLURM_JOB_NUM_NODES-"1"}                                             \
-               -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
-               -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
-               -d ${SLURM_JOB_ID-"id"}                                                   \
-               ./${SLURM_JOB_NAME-"run"}.sh
-fi
+finalise
 
 exit 0

+ 8 - 0
runtime/classic/ctrl/namelist.nemo-ORCA1L75-coupled.cfg.sh

@@ -166,6 +166,14 @@ cat << EOF
    nn_cplmodel   =     1     !  Maximum number of models to/from which NEMO is potentialy sending/receiving data
    ln_usecplmask = .false.   !  use a coupling mask file to merge data received from several models
                              !   -> file cplmask.nc with the float variable called cplmask (jpi,jpj,nn_cplmodel)
+!
+   ln_force_windstress = .false.
+   cn_dir        = './'  !  root directory for the tau uo/vo anomalies (wind stress along i and j-axis)
+   !_____________!__________________________!___________________!__________________!_____________!_________!___________!__________!__________!_______________!
+   !             !  file name               ! frequency (hours) ! variable         ! time interp.!  clim   ! 'yearly'/ ! weights  ! rotation ! land/sea mask !
+   !             !                          !  (if <0  months)  !   name           !   (logical) !  (T/F)  ! 'monthly' ! filename ! pairing  !    filename   !
+   sn_tau_anom_u = 'nemo5_main_tauuo'       ,  -1               , 'tauuo'          , .false.     , .false. , 'yearly'  , ''       , ''       , ''
+   sn_tau_anom_v = 'nemo5_main_tauvo'       ,  -1               , 'tauvo'          , .false.     , .false. , 'yearly'  , ''       , ''       , ''
 /
 !-----------------------------------------------------------------------
 &namtra_qsr    !   penetrative solar radiation

+ 8 - 0
runtime/classic/ctrl/namelist.nemo.ref.sh

@@ -425,6 +425,14 @@ cat << EOF
    nn_cplmodel   =     1     !  Maximum number of models to/from which NEMO is potentialy sending/receiving data
    ln_usecplmask = .false.   !  use a coupling mask file to merge data received from several models
                              !   -> file cplmask.nc with the float variable called cplmask (jpi,jpj,nn_cplmodel)
+!
+   ln_force_windstress = .false.
+   cn_dir        = './'  !  root directory for the tau uo/vo anomalies (wind stress along i and j-axis)
+   !_____________!________________________________!___________________!__________________!_____________!_________!___________!__________!__________!_______________!
+   !             !  file name                     ! frequency (hours) ! variable         ! time interp.!  clim   ! 'yearly'/ ! weights  ! rotation ! land/sea mask !
+   !             !                                !  (if <0  months)  !   name           !   (logical) !  (T/F)  ! 'monthly' ! filename ! pairing  !    filename   !
+   sn_tau_anom_u = 'nemo5_main_tauuo'             ,  -1               , 'tauuo'          , .false.     , .false. , 'yearly'  , ''       , ''       , ''
+   sn_tau_anom_v = 'nemo5_main_tauvo'             ,  -1               , 'tauvo'          , .false.     , .false. , 'yearly'  , ''       , ''       , ''
 /
 !-----------------------------------------------------------------------
 &namsbc_sas    !   analytical surface boundary condition

+ 17 - 13
runtime/classic/ecconf.cfg

@@ -28,16 +28,18 @@ function configure()
     resubmit_job=true
     resubmit_opt=""
 
-    module load craype-x86-milan
-    module load PrgEnv-intel/8.3.3
-    MODULEPATH=$MODULEPATH:/gpfs/projects/acad/ecearth/softs/easybuild/modules/all
-    module load netCDF-Fortran/4.6.0-iompi-2022.05
-    module load imkl/2022.1.0 OpenJPEG/2.5.0-GCCcore-11.3.0 grib_api/1.24.0-iompi-2022.05
-
+    module purge
+    module load EasyBuild/2023a
+    export MODULEPATH=$MODULEPATH:/gpfs/projects/acad/ecearth/softs/easybuild/2023a/modules/all
+    module load netCDF-Fortran/4.6.1-iompi-2023a
+    module load CDO/2.2.2-iompi-2023a
+    module load NCO/5.1.3-iomkl-2023a
+    module load ecCodes/2.31.0-iompi-2023a
+    
     # Configure grib api paths
-    export GRIB_DEFINITION_PATH=${HOME}/models/ecearth_3.3.3.2/sources/util/grib_table_126:${EBROOTGRIB_API}/share/grib_api/definitions
-    export GRIB_SAMPLES_PATH=${EBROOTGRIB_API}/share/grib_api/ifs_samples/grib1
-    export GRIB_BIN_PATH=${EBROOTGRIB_API}/bin
+    export GRIB_DEFINITION_PATH=${HOME}/models/ecearth_3.3.3.2/sources/util/grib_table_126:${EBROOTECCODES}/share/eccodes/definitions
+    export GRIB_SAMPLES_PATH=${EBROOTECCODES}/share/eccodes/ifs_samples/grib1
+    export GRIB_BIN_PATH=${EBROOTECCODES}/bin
 
     # Configure number of processors per node
     proc_per_node=128
@@ -288,12 +290,14 @@ function finalise()
         # Need to go to start_dir to find the run script
         cd ${start_dir}
         # Submit command
+        echo "sbatch -N ${SLURM_JOB_NUM_NODES-"1"} -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
+              -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) -d ${SLURM_JOB_ID-"id"} \
+              ./${SLURM_JOB_NAME-"run"}.sh"
         # Note: This does not work if you specify a job name with sbatch -J jobname!
-        sbatch -N ${SLURM_JOB_NUM_NODES}                                                 \
+        sbatch -N ${SLURM_JOB_NUM_NODES-"1"}                                             \
                -o ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
                -e ${run_dir}/$(basename ${stdout_file}).$(printf %03d $((leg_number+1))) \
-               -d ${SLURM_JOB_ID}                                                        \
-               ${resubmit_opt}                                                           \
-               ./${SLURM_JOB_NAME}.sh
+               -d ${SLURM_JOB_ID-"id"}                                                   \
+               ./${SLURM_JOB_NAME-"run"}.sh
     fi
 }