pycasso-queue-slurm-sara.rc 4.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. !
  2. ! Settings for SLURM job manager - for details and other options: "man sbatch" or "web documentation"
  3. !
  4. queue : slurm
  5. ! (1) passed directly (ie "as is" at the command line)
  6. !------------------------------------------------------------------
  7. ! (--share or -s for example to share node(s) with other jobs)
  8. ! add here also the long options
  9. queue.slurm.submit.options :
  10. !queue.slurm.submit.options : --share
  11. ! Note that this is added to the 'submit.options' key value from the pycasso-tm5-expert.rc
  12. ! (2) Short options set by end user (and processed by submit_tm5_tools.py script
  13. !----------------------------------------------
  14. ! ensure correct environment for init step
  15. queue.slurm.options.init : J n p t e o
  16. queue.slurm.option.init.n : 1
  17. queue.slurm.option.init.J : ${job.name}
  18. queue.slurm.option.init.o : <auto>
  19. queue.slurm.option.init.e : <auto>
  20. queue.slurm.option.init.p : staging
  21. queue.slurm.option.init.t : 03:00:00
  22. ! ensure correct environment for done step
  23. queue.slurm.options.done : J n p t e o
  24. queue.slurm.option.done.n : 1
  25. queue.slurm.option.done.J : ${job.name}
  26. queue.slurm.option.done.p : short
  27. queue.slurm.option.done.t : 00:10:00
  28. queue.slurm.option.done.o : <auto>
  29. queue.slurm.option.done.e : <auto>
  30. ! ensure correct environment for run step
  31. queue.slurm.options.run : J n p t c e o
  32. queue.slurm.option.run.J : ${job.name}
  33. queue.slurm.option.run.n : ${par.ntask}
  34. queue.slurm.option.run.c : ${par.nthread}
  35. queue.slurm.option.run.t : ${wall_clock_limit}
  36. queue.slurm.option.run.o : <auto>
  37. queue.slurm.option.run.e : <auto>
  38. queue.slurm.option.run.p : ${queue_option}
  39. ! list of node names. Can be simple: neuron224
  40. ! or complicated: mynode[1-5,7,..]
  41. !queue.slurm.option.w : ${slurm.nodename}
  42. !
  43. !queue.slurm.option.s: F
  44. ! partition
  45. queue.slurm.option.default.p : normal
  46. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  47. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  48. ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  49. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  50. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  51. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  52. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  53. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  54. ! sbatch -p 96gbmem --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  55. ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
  56. ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  57. ! sbatch -p 48gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
  58. ! sbatch -p 96gbmem -J TM5 -w neuron BULL_run.slurm
  59. ! sbatch -p 96gbmem -N 2 -n 24 -J TM5 --ntasks-per-node=12 -w neuron[224,225] BULL_run.slurm
  60. ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
  61. ! sbatch -p 96gbmem -N 1 -n 8 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
  62. ! sbatch -p 96gbmem -N 1 -n 12 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
  63. ! sbatch -p 96gbmem -N 1 -n 4 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
  64. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  65. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  66. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  67. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  68. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  69. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  70. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  71. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  72. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  73. ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
  74. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  75. ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
  76. ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
  77. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  78. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  79. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  80. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  81. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  82. ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
  83. ! MPIMPIANALYSER_PROFILECOMM=1 srunsbatch -p 96gbmem -w neuron224 BULL_run.slurm
  84. ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
  85. ! man sbatch