ReturnToService=2 TaskPlugin=task/cgroup # TIMERS SlurmctldTimeout=300 SlurmdTimeout=300 InactiveLimit=0 MinJobAge=300 KillWait=30 Waittime=0 # LOGGING SlurmctldDebug=3 SlurmctldLogFile=/var/log/slurmctld SlurmdDebug=3 SlurmdLogFile=/var/log/slurmd # ACCOUNTING # Limit Enforcement AccountingStorageEnforce=qos,limits JobAcctGatherType=jobacct_gather/linux AccountingStorageType=accounting_storage/slurmdbd AccountingStorageUser=slurm # CONSUMABLE RESOURCES # #SelectType=select/linear SelectType=select/cons_res SelectTypeParameters=CR_CPU_Memory # Scheduler SchedulerType=sched/backfill # Nodes NodeName=cn[100-113,115-128] Procs=56 # Partitions PartitionName=defq Default=YES MinNodes=1 AllowGroups=ALL PriorityJobFactor=1 PriorityTier=1 DisableRootJobs=NO RootOnly=NO Hidden=NO Shared=NO GraceTime=0 PreemptMode=OFF Re OverSubscribe=NO OverTimeLimit=0 State=UP Nodes=cn[100-113,115-128] # Generic resources types GresTypes=gpu,mic # Epilog/Prolog parameters PrologSlurmctld=/cm/local/apps/cmd/scripts/prolog-prejob Prolog=/cm/local/apps/cmd/scripts/prolog Epilog=/cm/local/apps/cmd/scripts/epilog # Fast Schedule option FastSchedule=0 # Power Saving SuspendTime=-1 # this disables power saving SuspendTimeout=30 ResumeTimeout=60 SuspendProgram=/cm/local/apps/cluster-tools/wlm/scripts/slurmpoweroff ResumeProgram=/cm/local/apps/cluster-tools/wlm/scripts/slurmpoweron