queue-slurm-knmi.rc 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. !
  2. ! Settings for SLURM job manager - for details and other options: "man sbatch" or "web documentation"
  3. !
  4. queue : slurm
  5. ! (1) passed directly (ie "as is" at the command line)
  6. !------------------------------------------------------------------
  7. ! (--share or -s for example to share node(s) with other jobs)
  8. ! add here also the long options
  9. queue.slurm.submit.options : --share
  10. ! Note that this is added to the 'submit.options' key value from the pycasso-tm5-expert.rc
  11. ! (2) Short options set by end user (and processed by submit_tm5_tools.py script
  12. !----------------------------------------------
  13. ! Some of the SLURM options:
  14. ! --------------------------
  15. ! J = job name
  16. ! N = minimum[-maximum] nb of nodes
  17. ! n = maximum nb of tasks
  18. ! c = cpus per task (basically nb of openMP threads)
  19. ! w = node list. Can be simple (neuron224) or complicated (neuron[224,225])
  20. ! p = partition. Eg: 24gbmem, 48gbmem, or 96gbmem
  21. ! o = output log
  22. ! e = error log
  23. ! ensure correct environment for init step
  24. queue.slurm.options.init : J N n o e
  25. queue.slurm.option.init.J : ${job.name}
  26. queue.slurm.option.init.N : 1
  27. queue.slurm.option.init.n : 1
  28. queue.slurm.option.init.o : <auto>
  29. queue.slurm.option.init.e : <auto>
  30. ! ensure correct environment for done step
  31. queue.slurm.options.done : J N n o e
  32. queue.slurm.option.done.J : ${job.name}
  33. queue.slurm.option.done.n : 1
  34. queue.slurm.option.done.N : 1
  35. !queue.slurm.option.done.p : short
  36. !queue.slurm.option.done.t : 00:10:00
  37. queue.slurm.option.done.o : <auto>
  38. queue.slurm.option.done.e : <auto>
  39. ! ensure correct environment for run step
  40. queue.slurm.options.run : J N n p w o e
  41. queue.slurm.option.run.J : ${job.name}
  42. queue.slurm.option.run.N : ${slurm.nnode}
  43. queue.slurm.option.run.n : ${par.ntask}
  44. !queue.slurm.option.run.c : ${par.nthread}
  45. queue.slurm.option.run.p : ${slurm.partition}
  46. queue.slurm.option.run.w : ${slurm.nodename}
  47. !queue.slurm.option.run.t : ${ll.wall_clock_limit}
  48. queue.slurm.option.run.o : <auto>
  49. queue.slurm.option.run.e : <auto>
  50. ! tasks per node
  51. !queue.slurm.option.tpn :
  52. ! maximum number of tasks (but @workshop, was told to use it as number of proc = ntask*nthreads...
  53. ! ... seems a bit curious. over-allocate?)
  54. !queue.slurm.option.n : <auto>
  55. !need to use -export
  56. !queue.slurm.option.run.environment : OMP_NUM_THREADS = ${par.nthread}
  57. !queue.slurm.option.s: F
  58. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  59. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  60. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  61. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  62. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  63. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  64. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  65. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  66. ! sbatch -p 96gbmem --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  67. ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  68. ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  69. ! sbatch -p 48gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  70. ! sbatch -p 96gbmem -J TM5 -w neuron BULL_run.slurm
  71. ! sbatch -p 96gbmem -N 2 -n 24 -J TM5 --ntasks-per-node=12 -w neuron[224,225] BULL_run.slurm
  72. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
  73. ! sbatch -p 96gbmem -N 1 -n 8 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
  74. ! sbatch -p 96gbmem -N 1 -n 12 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
  75. ! sbatch -p 96gbmem -N 1 -n 4 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
  76. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  77. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  78. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  79. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  80. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  81. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  82. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  83. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  84. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  85. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  86. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  87. ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
  88. ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
  89. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  90. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  91. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  92. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  93. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  94. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  95. ! MPIMPIANALYSER_PROFILECOMM=1 srunsbatch -p 96gbmem -w neuron224 BULL_run.slurm
  96. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  97. ! man sbatch