[torqueusers] assigning resources to a queue

David Beer dbeer at adaptivecomputing.com
Tue Apr 24 09:38:21 MDT 2012


Christina

To make the hydrology queue request <specific node> by default -

'set queue hydrology resources_default.need_nodes=<specific node>'

David

On Tue, Apr 24, 2012 at 7:35 AM, Christina Salls
<christina.salls at noaa.gov>wrote:

> Hi all,
>
>       I am new to torque and have implemented a very simple configuration
> which has been working just fine so far.  I have torque installed on a
> single 20 node cluster using the built in scheduler.  At this point I have
> a few users that are submitting jobs to a single default queue.  I have a
> new user that is going to have one node "assigned" to him.  I was planning
> to configure a separate queue for his jobs, and ask the users to request
> the queue as part of their queue submission script.  Is there a way to
> assign a particular node to a queue?  And the rest of the nodes to the
> other queue?  Would this be the simplest way to accomplish the objective?
>  Thanks in advance.
>
> Christina
>
> ps.
>
> [root at wings rpy2-2.2.0]# qmgr
> Max open servers: 10239
> Qmgr: print server
> #
> # Create queues and set their attributes.
> #
> #
> # Create and define queue hydrology
> #
> create queue hydrology
> set queue hydrology queue_type = Execution
> set queue hydrology enabled = True
> set queue hydrology started = True
> #
> # Create and define queue batch
> #
> create queue batch
> set queue batch queue_type = Execution
> set queue batch enabled = True
> set queue batch started = True
> #
> # Set server attributes.
> #
> set server scheduling = True
> set server acl_hosts = admin.default.domain
> set server acl_hosts += wings.glerl.noaa.gov
> set server managers = root at wings.glerl.noaa.gov
> set server managers += root@*
> set server managers += salls at wings.glerl.noaa.gov
> set server operators = root at wings.glerl.noaa.gov
> set server operators += salls at wings.glerl.noaa.gov
> set server default_queue = batch
> set server log_events = 511
> set server mail_from = adm
> set server scheduler_iteration = 100
> set server node_check_rate = 150
> set server tcp_timeout = 6
> set server mom_job_sync = True
> set server keep_completed = 0
> set server auto_node_np = True
> set server next_job_number = 760
> set server np_default = 12
> set server record_job_info = True
> set server record_job_script = True
> set server job_log_keep_days = 10
>
> [root at wings rpy2-2.2.0]# pbsnodes -a
> n001
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274459,varattr=,jobs=,state=free,netload=586401282,gres=,loadave=0.00,ncpus=12,physmem=20463136kb,availmem=27850272kb,totmem=28655128kb,idletime=510345,nusers=1,nsessions=1,sessions=30936,uname=Linux
> n001 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n002
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274453,varattr=,jobs=,state=free,netload=766967700,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31910444kb,totmem=32792076kb,idletime=510298,nusers=1,nsessions=1,sessions=29658,uname=Linux
> n002 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n003
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274478,varattr=,jobs=,state=free,netload=123926745,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31938444kb,totmem=32792076kb,idletime=510352,nusers=1,nsessions=1,sessions=29463,uname=Linux
> n003 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n004
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274486,varattr=,jobs=,state=free,netload=5979387196,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31914772kb,totmem=32792076kb,idletime=510379,nusers=1,nsessions=1,sessions=29625,uname=Linux
> n004 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n005
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274467,varattr=,jobs=,state=free,netload=123646737,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31920932kb,totmem=32792076kb,idletime=510320,nusers=0,nsessions=?
> 15201,sessions=? 15201,uname=Linux n005 2.6.32-131.0.15.el6.x86_64 #1 SMP
> Tue May 10 15:42:40 EDT 2011 x86_64,opsys=linux
>      gpus = 0
>
> n006
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274483,varattr=,jobs=,state=free,netload=123497908,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31958328kb,totmem=32792076kb,idletime=510349,nusers=1,nsessions=1,sessions=29461,uname=Linux
> n006 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n007
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274454,varattr=,jobs=,state=free,netload=124286477,gres=,loadave=0.09,ncpus=12,physmem=24600084kb,availmem=31923372kb,totmem=32792076kb,idletime=510340,nusers=1,nsessions=1,sessions=29488,uname=Linux
> n007 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n008
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274460,varattr=,jobs=,state=free,netload=123313535,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31924724kb,totmem=32792076kb,idletime=510321,nusers=1,nsessions=1,sessions=29439,uname=Linux
> n008 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n009
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274450,varattr=,jobs=,state=free,netload=117452022,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31925280kb,totmem=32792076kb,idletime=510335,nusers=1,nsessions=1,sessions=28949,uname=Linux
> n009 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n010
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274466,varattr=,jobs=,state=free,netload=117146413,gres=,loadave=0.07,ncpus=12,physmem=24600084kb,availmem=31920892kb,totmem=32792076kb,idletime=510339,nusers=1,nsessions=1,sessions=28894,uname=Linux
> n010 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n011
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274484,varattr=,jobs=,state=free,netload=116981665,gres=,loadave=0.03,ncpus=12,physmem=24600084kb,availmem=31918964kb,totmem=32792076kb,idletime=510345,nusers=1,nsessions=1,sessions=28907,uname=Linux
> n011 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n012
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274474,varattr=,jobs=,state=free,netload=58328745076,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31890504kb,totmem=32792076kb,idletime=510341,nusers=1,nsessions=1,sessions=29498,uname=Linux
> n012 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n013
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274461,varattr=,jobs=,state=free,netload=115120292,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31919392kb,totmem=32792076kb,idletime=510344,nusers=1,nsessions=1,sessions=28869,uname=Linux
> n013 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n014
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274471,varattr=,jobs=,state=free,netload=114635782,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31921548kb,totmem=32792076kb,idletime=510327,nusers=0,nsessions=?
> 15201,sessions=? 15201,uname=Linux n014 2.6.32-131.0.15.el6.x86_64 #1 SMP
> Tue May 10 15:42:40 EDT 2011 x86_64,opsys=linux
>      gpus = 0
>
> n015
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274481,varattr=,jobs=,state=free,netload=114077034,gres=,loadave=0.02,ncpus=12,physmem=24600084kb,availmem=31921576kb,totmem=32792076kb,idletime=510350,nusers=1,nsessions=1,sessions=28897,uname=Linux
> n015 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n016
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274470,varattr=,jobs=,state=free,netload=114293556,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31926780kb,totmem=32792076kb,idletime=510351,nusers=1,nsessions=1,sessions=28918,uname=Linux
> n016 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n017
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274484,varattr=,jobs=,state=free,netload=114418798,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31919716kb,totmem=32792076kb,idletime=510376,nusers=1,nsessions=1,sessions=28922,uname=Linux
> n017 2.6.32-131.0.15.el6.x86_64 #1 SMP Tue May 10 15:42:40 EDT 2011
> x86_64,opsys=linux
>      gpus = 0
>
> n018
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274470,varattr=,jobs=,state=free,netload=257640357666,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31861476kb,totmem=32792076kb,idletime=510314,nusers=0,nsessions=?
> 15201,sessions=? 15201,uname=Linux n018 2.6.32-131.0.15.el6.x86_64 #1 SMP
> Tue May 10 15:42:40 EDT 2011 x86_64,opsys=linux
>      gpus = 0
>
> n019
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274475,varattr=,jobs=,state=free,netload=109792676,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31924640kb,totmem=32792076kb,idletime=510345,nusers=0,nsessions=?
> 15201,sessions=? 15201,uname=Linux n019 2.6.32-131.0.15.el6.x86_64 #1 SMP
> Tue May 10 15:42:40 EDT 2011 x86_64,opsys=linux
>      gpus = 0
>
> n020
>      state = free
>      np = 12
>      ntype = cluster
>      status =
> rectime=1335274469,varattr=,jobs=,state=free,netload=58239290441,gres=,loadave=0.00,ncpus=12,physmem=24600084kb,availmem=31868680kb,totmem=32792076kb,idletime=510354,nusers=0,nsessions=?
> 15201,sessions=? 15201,uname=Linux n020 2.6.32-131.0.15.el6.x86_64 #1 SMP
> Tue May 10 15:42:40 EDT 2011 x86_64,opsys=linux
>      gpus = 0
>
>
> --
> Christina A. Salls
> GLERL Computer Group
> help.glerl at noaa.gov
> Help Desk x2127
> Christina.Salls at noaa.gov
> Voice Mail 734-741-2446
>
>
>
> _______________________________________________
> torqueusers mailing list
> torqueusers at supercluster.org
> http://www.supercluster.org/mailman/listinfo/torqueusers
>
>


-- 
David Beer | Software Engineer
Adaptive Computing
-------------- next part --------------
An HTML attachment was scrubbed...
URL: http://www.supercluster.org/pipermail/torqueusers/attachments/20120424/ebf85aeb/attachment-0001.html 


More information about the torqueusers mailing list