ecmwf-cca.job.tmpl 5.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. #!/bin/bash
  2. #PBS -N cca.job
  3. #PBS -q np
  4. #PBS -l EC_billing_account=nlchekli
  5. #PBS -l EC_total_tasks=1:24:72:1
  6. #PBS -l EC_threads_per_task=1:1:1:1
  7. #PBS -l EC_hyperthreads=1
  8. #PBS -j oe
  9. #PBS -o run-ifs+nemo.001
  10. ####PBS -l walltime=03:00:00
  11. #################################################################################
  12. # HOW-TO: #
  13. # #
  14. # - copy this template into your rundir/classic dir: #
  15. # #
  16. # cp cca.job.tmpl ../cca.job #
  17. # #
  18. # - set #PBS -l EC_total_tasks to the values used in the config-run.xml, in #
  19. # the same order as the executables in the "launch" command of the #
  20. # script to submit. Above example 1:24:72:1 is for 4 models of the GCM: #
  21. # #
  22. # 1 XIOS core, 24 NEMO cores, 72 IFS cores, 1 RUNOFF core #
  23. # #
  24. # - if you want to use the "forking" feature, enable it in the platform file: #
  25. # ecmwf-cca-intel.xml (USE_FORKING=true), and group consecutive programs #
  26. # to share the computing nodes requested for each group. #
  27. # In the previous example, if you want to run XIOS, NEMO and RUNOFF #
  28. # in a group, you should use: EC_total_tasks=26:72. #
  29. # #
  30. # - set #PBS -l EC_threads_per_task to as many 1s as there are coupled models, #
  31. # or groups, separated by ":". #
  32. # #
  33. # - set #PBS -o (name of the log file) to the path/name you want #
  34. # #
  35. # - set #PBS -l EC_billing_account to a valid account of yours #
  36. # #
  37. # - replace "./script.sh" below with the script to run. For the GCM it is: #
  38. # ./ece-ifs+nemo.sh #
  39. # #
  40. # - submit this script: #
  41. # #
  42. # qsub cca.job #
  43. # #
  44. # #
  45. # For bash functions that automatically parse the config-run.xml and do all #
  46. # these steps, see /home/ms/nl/nm6/ECEARTH/ecearth_functions.sh #
  47. # #
  48. # Note that the entire memory on one np node (~120G) is available to the (up #
  49. # to) 36 (resp. 72) tasks you run on the node, i.e. ~3.3 G (reps.1.67 G) per #
  50. # tasks, if your run with hyperthreads=1 (resp. hyperthreads=2). #
  51. # This is sufficient for standard EC-Earth, but if you need more memory per #
  52. # tasks, you can either: #
  53. # #
  54. # (1) increase it for all and every tasks by adding this directive: #
  55. # #
  56. # #PBS -l EC_memory_per_task=4GB #
  57. # #
  58. # which is probably overdoing it. #
  59. # #
  60. # (2) increase it for the tasks of only one model by reducing the number of #
  61. # of task per node for that model (default is 36*hyperthreading), with #
  62. # something like: #
  63. # #
  64. # #PBS -l EC_tasks_per_node=1:36:1:20 #
  65. # #
  66. # This is currently needed with LPJ-Guess where 5 tasks/node for #
  67. # a total of 10 tasks is needed to accommodate its large memory #
  68. # footprint #
  69. # #
  70. ###################### IMPORTANT ################################################
  71. # If you change the name of this script, you need to set the #PBS -N directive #
  72. # to the name of this script for automatic resubmission to work. Remember that #
  73. # name cannot be longer than 15 letters. #
  74. #################################################################################
  75. cd $PBS_O_WORKDIR
  76. #module load darshan
  77. #module unload atp
  78. #export DARSHAN_LOG_DIR=${SCRATCH}/ecearth/darshan-logs
  79. #mkdir -p $DARSHAN_LOG_DIR
  80. ./script.sh
  81. exit