1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889 |
- #!/bin/bash
- #PBS -N cca.job
- #PBS -q np
- #PBS -l EC_billing_account=nlchekli
- #PBS -l EC_total_tasks=1:24:72:1
- #PBS -l EC_threads_per_task=1:1:1:1
- #PBS -l EC_hyperthreads=1
- #PBS -j oe
- #PBS -o run-ifs+nemo.001
- ####PBS -l walltime=03:00:00
- #################################################################################
- # HOW-TO: #
- # #
- # - copy this template into your rundir/classic dir: #
- # #
- # cp cca.job.tmpl ../cca.job #
- # #
- # - set #PBS -l EC_total_tasks to the values used in the config-run.xml, in #
- # the same order as the executables in the "launch" command of the #
- # script to submit. Above example 1:24:72:1 is for 4 models of the GCM: #
- # #
- # 1 XIOS core, 24 NEMO cores, 72 IFS cores, 1 RUNOFF core #
- # #
- # - if you want to use the "forking" feature, enable it in the platform file: #
- # ecmwf-cca-intel.xml (USE_FORKING=true), and group consecutive programs #
- # to share the computing nodes requested for each group. #
- # In the previous example, if you want to run XIOS, NEMO and RUNOFF #
- # in a group, you should use: EC_total_tasks=26:72. #
- # #
- # - set #PBS -l EC_threads_per_task to as many 1s as there are coupled models, #
- # or groups, separated by ":". #
- # #
- # - set #PBS -o (name of the log file) to the path/name you want #
- # #
- # - set #PBS -l EC_billing_account to a valid account of yours #
- # #
- # - replace "./script.sh" below with the script to run. For the GCM it is: #
- # ./ece-ifs+nemo.sh #
- # #
- # - submit this script: #
- # #
- # qsub cca.job #
- # #
- # #
- # For bash functions that automatically parse the config-run.xml and do all #
- # these steps, see /home/ms/nl/nm6/ECEARTH/ecearth_functions.sh #
- # #
- # Note that the entire memory on one np node (~120G) is available to the (up #
- # to) 36 (resp. 72) tasks you run on the node, i.e. ~3.3 G (reps.1.67 G) per #
- # tasks, if your run with hyperthreads=1 (resp. hyperthreads=2). #
- # This is sufficient for standard EC-Earth, but if you need more memory per #
- # tasks, you can either: #
- # #
- # (1) increase it for all and every tasks by adding this directive: #
- # #
- # #PBS -l EC_memory_per_task=4GB #
- # #
- # which is probably overdoing it. #
- # #
- # (2) increase it for the tasks of only one model by reducing the number of #
- # of task per node for that model (default is 36*hyperthreading), with #
- # something like: #
- # #
- # #PBS -l EC_tasks_per_node=1:36:1:20 #
- # #
- # This is currently needed with LPJ-Guess where 5 tasks/node for #
- # a total of 10 tasks is needed to accommodate its large memory #
- # footprint #
- # #
- ###################### IMPORTANT ################################################
- # If you change the name of this script, you need to set the #PBS -N directive #
- # to the name of this script for automatic resubmission to work. Remember that #
- # name cannot be longer than 15 letters. #
- #################################################################################
- cd $PBS_O_WORKDIR
- #module load darshan
- #module unload atp
- #export DARSHAN_LOG_DIR=${SCRATCH}/ecearth/darshan-logs
- #mkdir -p $DARSHAN_LOG_DIR
-
- ./script.sh
- exit
|