123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109 |
- !
- ! Settings for SLURM job manager - for details and other options: "man sbatch" or "web documentation"
- !
- queue : slurm
- ! (1) passed directly (ie "as is" at the command line)
- !------------------------------------------------------------------
- ! (--share or -s for example to share node(s) with other jobs)
- ! add here also the long options
- queue.slurm.submit.options : --share
- ! Note that this is added to the 'submit.options' key value from the pycasso-tm5-expert.rc
- ! (2) Short options set by end user (and processed by submit_tm5_tools.py script
- !----------------------------------------------
- ! Some of the SLURM options:
- ! --------------------------
- ! J = job name
- ! N = minimum[-maximum] nb of nodes
- ! n = maximum nb of tasks
- ! c = cpus per task (basically nb of openMP threads)
- ! w = node list. Can be simple (neuron224) or complicated (neuron[224,225])
- ! p = partition. Eg: 24gbmem, 48gbmem, or 96gbmem
- ! o = output log
- ! e = error log
- ! ensure correct environment for init step
- queue.slurm.options.init : J N n o e
- queue.slurm.option.init.J : ${job.name}
- queue.slurm.option.init.N : 1
- queue.slurm.option.init.n : 1
- queue.slurm.option.init.o : <auto>
- queue.slurm.option.init.e : <auto>
- ! ensure correct environment for done step
- queue.slurm.options.done : J N n o e
- queue.slurm.option.done.J : ${job.name}
- queue.slurm.option.done.n : 1
- queue.slurm.option.done.N : 1
- !queue.slurm.option.done.p : short
- !queue.slurm.option.done.t : 00:10:00
- queue.slurm.option.done.o : <auto>
- queue.slurm.option.done.e : <auto>
- ! ensure correct environment for run step
- queue.slurm.options.run : J N n p w o e
- queue.slurm.option.run.J : ${job.name}
- queue.slurm.option.run.N : ${slurm.nnode}
- queue.slurm.option.run.n : ${par.ntask}
- !queue.slurm.option.run.c : ${par.nthread}
- queue.slurm.option.run.p : ${slurm.partition}
- queue.slurm.option.run.w : ${slurm.nodename}
- !queue.slurm.option.run.t : ${ll.wall_clock_limit}
- queue.slurm.option.run.o : <auto>
- queue.slurm.option.run.e : <auto>
- ! tasks per node
- !queue.slurm.option.tpn :
- ! maximum number of tasks (but @workshop, was told to use it as number of proc = ntask*nthreads...
- ! ... seems a bit curious. over-allocate?)
- !queue.slurm.option.n : <auto>
- !need to use -export
- !queue.slurm.option.run.environment : OMP_NUM_THREADS = ${par.nthread}
- !queue.slurm.option.s: F
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 48gbmem -N 1 -n 2 -J TM5 --share --ntasks-per-node=1 -w neuron204 BULL_run.slurm
- ! sbatch -p 96gbmem -J TM5 -w neuron BULL_run.slurm
- ! sbatch -p 96gbmem -N 2 -n 24 -J TM5 --ntasks-per-node=12 -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 1 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 8 -J TM5 --ntasks-per-node=12 -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 12 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -N 1 -n 4 -J TM5 --ntasks-per-node=12 -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 48gbmem -w neuron202 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron[224,225] BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! MPIMPIANALYSER_PROFILECOMM=1 srunsbatch -p 96gbmem -w neuron224 BULL_run.slurm
- ! sbatch -p 96gbmem -w neuron225 BULL_run.slurm
- ! man sbatch
|