123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116 |
- !
- ! Settings for SLURM job manager - for details and other options: "man sbatch" or "web documentation"
- !
- queue : slurm
- ! (1) passed directly (ie "as is" at the command line)
- !------------------------------------------------------------------
- ! (--share or -s for example to share node(s) with other jobs)
- ! add here also the long options
- queue.slurm.submit.options :
- !queue.slurm.submit.options : --share
- ! Note that this is added to the 'submit.options' key value from the pycasso-tm5-expert.rc
- ! (2) Short options set by end user (and processed by submit_tm5_tools.py script
- !----------------------------------------------
- ! first, list of queue options (same for each step):
- !queue.slurm.options : J N n c p t
- queue.slurm.options.default : J N p t
- ! job name:
- queue.slurm.option.default.J : ${job.name}
- ! minimum number of node
- queue.slurm.option.default.N : 1
- ! ${slurm.nnode}
- !queue.slurm.option.run.c : 1
- ! maximum number of tasks (but @workshop, was told to use it as number of proc... over-allocate?)
- !queue.slurm.option.n : <auto>
- ! tasks per node
- !queue.slurm.option.tpn :
- ! ensure correct environment for init step
- queue.slurm.options.init : J n p t e o
- queue.slurm.option.init.n : 1
- queue.slurm.option.init.J : ${job.name}
- queue.slurm.option.init.o : <auto>
- queue.slurm.option.init.e : <auto>
- ! ensure correct environment for done step
- queue.slurm.options.done : J N n p t e o
- queue.slurm.option.done.n : 1
- queue.slurm.option.done.J : ${job.name}
- queue.slurm.option.done.p : short
- queue.slurm.option.done.N : 1
- queue.slurm.option.done.t : 00:10:00
- queue.slurm.option.done.o : <auto>
- queue.slurm.option.done.e : <auto>
- ! ensure correct environment for run step
- queue.slurm.options.run : J N n p t c e o
- queue.slurm.option.run.J : ${job.name}
- queue.slurm.option.run.n : ${par.ntask}
- queue.slurm.option.run.c : ${par.nthread}
- queue.slurm.option.run.N : 1
- queue.slurm.option.run.t : ${ll.wall_clock_limit}
- queue.slurm.option.run.o : <auto>
- queue.slurm.option.run.e : <auto>
- ! list of node names. Can be simple: neuron224
- ! or complicated: mynode[1-5,7,..]
- !queue.slurm.option.w : ${slurm.nodename}
- !
- !queue.slurm.option.s: F
- ! partition
- queue.slurm.option.default.p : normal
-
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 96gbmem -J TM5 -w neuron BULL_run.slurm
- ! sbatch -p 96gbmem -N 2 -n 24 -J TM5 --ntasks-per-node=12 -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 8 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 12 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 4 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! MPIMPIANALYSER_PROFILECOMM=1 srunsbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! man sbatch
|