CraySystem.py 70.7 KB
Newer Older
1
2
"""Resource management for Cray ALPS based systems"""

3
4
5
6
import logging
import threading
import thread
import time
7
import sys
8
import xmlrpclib
9
import json
10
import ConfigParser
11
12
import Cobalt.Util
import Cobalt.Components.system.AlpsBridge as ALPSBridge
13
from Cobalt.Components.base import Component, exposed, automatic, query, locking
14
15
from Cobalt.Components.system.base_system import BaseSystem
from Cobalt.Components.system.CrayNode import CrayNode
16
from Cobalt.Components.system.base_pg_manager import ProcessGroupManager
17
from Cobalt.Components.system.ALPSProcessGroup import ALPSProcessGroup
18
from Cobalt.Exceptions import ComponentLookupError
Paul Rich's avatar
Paul Rich committed
19
from Cobalt.Exceptions import JobNotInteractive
20
from Cobalt.Components.system.ALPSProcessGroup import ALPSProcessGroup
21
from Cobalt.Exceptions import JobValidationError
22
from Cobalt.DataTypes.ProcessGroup import ProcessGroup
23
from Cobalt.Util import compact_num_list, expand_num_list
24
from Cobalt.Util import init_cobalt_config, get_config_option
25

26
_logger = logging.getLogger(__name__)
27
init_cobalt_config()
28

29
30
UPDATE_THREAD_TIMEOUT = int(get_config_option('alpssystem', 'update_thread_timeout', 10))
TEMP_RESERVATION_TIME = int(get_config_option('alpssystem', 'temp_reservation_time', 300))
31
SAVE_ME_INTERVAL = float(get_config_option('alpsssytem', 'save_me_interval', 10.0))
32
33
#default 20 minutes to account for boot.
PENDING_STARTUP_TIMEOUT = float(get_config_option('alpssystem', 'pending_startup_timeout', 1200))
Paul Rich's avatar
Paul Rich committed
34
APKILL_CMD = get_config_option('alps', 'apkill', '/opt/cray/alps/default/bin/apkill')
35
DRAIN_MODE = get_config_option('system', 'drain_mode', 'first-fit')
36
37
38
#cleanup time in seconds
CLEANUP_DRAIN_WINDOW = get_config_option('system', 'cleanup_drain_window', 300)

39
40
#Epsilon for backfilling.  This system does not do this on a per-node basis.
BACKFILL_EPSILON = int(get_config_option('system', 'backfill_epsilon', 120))
41
ELOGIN_HOSTS = [host for host in get_config_option('system', 'elogin_hosts', '').split(':')]
42
43
if ELOGIN_HOSTS == ['']:
    ELOGIN_HOSTS = []
Paul Rich's avatar
Paul Rich committed
44
DRAIN_MODES = ['first-fit', 'backfill']
45
CLEANING_ID = -1
46
47
48
49
50
DEFAULT_MCDRAM_MODE = get_config_option('alpssystem', 'default_mcdram_mode', 'cache')
DEFAULT_NUMA_MODE = get_config_option('alpssystem', 'default_numa_mode', 'quad')
MCDRAM_TO_CACHEPCT = {'flat':'0', 'cache':'100', 'split':'25', 'equal':'50', '0':'0', '25':'25', '50':'50', '100':'100'}
VALID_MCDRAM_MODES = ['flat', 'cache', 'split', 'equal', '0', '25', '50', '100']
VALID_NUMA_MODES = ['a2a', 'hemi', 'quad', 'snc2', 'snc4']
51
52


53
54
55
56
57
58
59
60
61
62
def chain_loc_list(loc_list):
    '''Take a list of compact Cray locations,
    expand and concatenate them.

    '''
    retlist = []
    for locs in loc_list:
        retlist.extend(expand_num_list(locs))
    return retlist

63

64
class CraySystem(BaseSystem):
65
66
67
    '''Cray/ALPS-specific system component.  Behaviors should go here.  Direct
    ALPS interaction through BASIL/other APIs should go through the ALPSBridge
    (or other bridge) module as appropriate.
68

69
    '''
70
71
    name = "system"
    implementation = "alps_system"
72
    logger = _logger
73

74
    def __init__(self, *args, **kwargs):
75
76
77
78
        '''Initialize system.  Read initial states from bridge.
        Get current state

        '''
79
        start_time = time.time()
80
        super(CraySystem, self).__init__(*args, **kwargs)
81
        _logger.info('BASE SYSTEM INITIALIZED')
82
83
84
85
86
87
88
89
90
91
        self._common_init_restart()
        _logger.info('ALPS SYSTEM COMPONENT READY TO RUN')
        _logger.info('Initilaization complete in %s sec.', (time.time() -
                start_time))

    def _common_init_restart(self, spec=None):
        '''Common routine for cold and restart intialization of the system
        component.

        '''
92
93
94
95
96
        try:
            self.system_size = int(get_config_option('system', 'size'))
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            _logger.critical('ALPS SYSTEM: ABORT STARTUP: System size must be specified in the [system] section of the cobalt configuration file.')
            sys.exit(1)
97
98
99
100
101
        if DRAIN_MODE not in DRAIN_MODES:
            #abort startup, we have a completely invalid config.
            _logger.critical('ALPS SYSTEM: ABORT STARTUP: %s is not a valid drain mode.  Must be one of %s.',
                DRAIN_MODE, ", ".join(DRAIN_MODES))
            sys.exit(1)
102
        #initilaize bridge.
103
104
105
106
        bridge_pending = True
        while bridge_pending:
            # purge stale children from prior run.  Also ensure the
            # system_script_forker is currently up.
Paul Rich's avatar
Paul Rich committed
107
108
            # These attempts may fail due to system_script_forker not being up.
            # We don't want to trash the statefile in this case.
109
110
111
112
            try:
                ALPSBridge.init_bridge()
            except ALPSBridge.BridgeError:
                _logger.error('Bridge Initialization failed.  Retrying.')
113
114
115
116
                Cobalt.Util.sleep(10)
            except ComponentLookupError:
                _logger.warning('Error reaching forker.  Retrying.')
                Cobalt.Util.sleep(10)
117
118
119
            else:
                bridge_pending = False
                _logger.info('BRIDGE INITIALIZED')
120
        #process manager setup
121
        if spec is None:
Paul Rich's avatar
Paul Rich committed
122
            self.process_manager = ProcessGroupManager(pgroup_type=ALPSProcessGroup)
123
        else:
Paul Rich's avatar
Paul Rich committed
124
            self.process_manager = ProcessGroupManager(pgroup_type=ALPSProcessGroup).__setstate__(spec['process_manager'])
Paul Rich's avatar
Paul Rich committed
125
            self.logger.debug('pg type %s', self.process_manager.process_groups.item_cls)
126
127
        #self.process_manager.forkers.append('alps_script_forker')
        self.process_manager.update_launchers()
Paul Rich's avatar
Paul Rich committed
128
        self.pending_start_timeout = PENDING_STARTUP_TIMEOUT
129
130
        _logger.info('PROCESS MANAGER INTIALIZED')
        #resource management setup
131
        self.nodes = {} #cray node_id: CrayNode
132
133
134
135
        self.node_name_to_id = {} #cray node name to node_id map
        self.alps_reservations = {} #cobalt jobid : AlpsReservation object
        if spec is not None:
            self.alps_reservations = spec['alps_reservations']
136
        self._init_nodes_and_reservations()
137
138
139
        if spec is not None:
            node_info = spec.get('node_info', {})
            for nid, node in node_info.items():
140
141
142
143
                try:
                    self.nodes[nid].reset_info(node)
                except: #check the exception types later.  Carry on otherwise.
                    self.logger.warning("Node nid: %s not found in restart information.  Bringing up node in a clean configuration.", nid)
Paul Rich's avatar
Paul Rich committed
144
145
146
        #storage for pending job starts.  Allows us to handle slow starts vs
        #user deletes
        self.pending_starts = {} #jobid: time this should be cleared.
147
        self.nodes_by_queue = {} #queue:[node_ids]
148
        #populate initial state
149
        #state update thread and lock
150
        self._node_lock = threading.RLock()
151
        self._gen_node_to_queue()
Paul Rich's avatar
Paul Rich committed
152
        self.node_update_thread = thread.start_new_thread(self._run_update_state, tuple())
153
        _logger.info('UPDATE THREAD STARTED')
154
        self.current_equivalence_classes = []
Paul Rich's avatar
Paul Rich committed
155
        self.killing_jobs = {}
156
157
158
159
        #hold on to the initial spec in case nodes appear out of nowhere.
        self.init_spec = None
        if spec is not None:
            self.init_spec = spec
160

161
162
163
164
165
166
167
168
    def __getstate__(self):
        '''Save process, alps-reservation information, along with base
        information'''
        state = {}
        state.update(super(CraySystem, self).__getstate__())
        state['alps_system_statefile_version'] = 1
        state['process_manager'] = self.process_manager.__getstate__()
        state['alps_reservations'] = self.alps_reservations
169
        state['node_info'] = self.nodes
170
171
172
173
174
175
176
        return state

    def __setstate__(self, state):
        start_time = time.time()
        super(CraySystem, self).__setstate__(state)
        _logger.info('BASE SYSTEM INITIALIZED')
        self._common_init_restart(state)
177
        _logger.info('ALPS SYSTEM COMPONENT READY TO RUN')
178
        _logger.info('Reinitilaization complete in %s sec.', (time.time() -
179
                start_time))
180

181
182
183
184
185
    def save_me(self):
        '''Automatically save a copy of the state of the system component.'''
        #should we be holding the block lock as well?
        Component.save(self)
    save_me = automatic(save_me, SAVE_ME_INTERVAL)
186

187
188
189
190
    def _init_nodes_and_reservations(self):
        '''Initialize nodes from ALPS bridge data'''

        retnodes = {}
191
192
193
        pending = True
        while pending:
            try:
194
195
196
197
198
                # None of these queries has strictly degenerate data.  Inventory
                # is needed for raw reservation data.  System gets memory and a
                # much more compact representation of data.  Reservednodes gives
                # which notes are reserved.
                inventory = ALPSBridge.fetch_inventory()
Paul Rich's avatar
Paul Rich committed
199
                _logger.info('INVENTORY FETCHED')
200
                system = ALPSBridge.extract_system_node_data(ALPSBridge.system())
Paul Rich's avatar
Paul Rich committed
201
                _logger.info('SYSTEM DATA FETCHED')
202
                reservations = ALPSBridge.fetch_reservations()
Paul Rich's avatar
Paul Rich committed
203
                _logger.info('ALPS RESERVATION DATA FETCHED')
204
                # reserved_nodes = ALPSBridge.reserved_nodes()
205
206
207
            except Exception:
                #don't crash out here.  That may trash a statefile.
                _logger.error('Possible transient encountered during initialization. Retrying.',
208
                        exc_info=True)
209
210
211
212
                Cobalt.Util.sleep(10)
            else:
                pending = False

213
        self._assemble_nodes(inventory, system)
214
215
        #Reversing the node name to id lookup is going to save a lot of cycles.
        for node in self.nodes.values():
216
            self.node_name_to_id[node.name] = node.node_id
217
218
        _logger.info('NODE INFORMATION INITIALIZED')
        _logger.info('ALPS REPORTS %s NODES', len(self.nodes))
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
        # self._assemble_reservations(reservations, reserved_nodes)
        return

    def _assemble_nodes(self, inventory, system):
        '''merge together the INVENTORY and SYSTEM query data to form as
        complete a picture of a node as we can.

        '''
        nodes = {}
        for nodespec in inventory['nodes']:
            node = CrayNode(nodespec)
            node.managed = True
            nodes[node.node_id] = node
        for node_id, nodespec in system.iteritems():
            nodes[node_id].attributes.update(nodespec['attrs'])
234
235
236
237
            # Should this be a different status?
            nodes[node_id].role = nodespec['role'].upper()
            if nodes[node_id].role.upper() not in ['BATCH']:
                nodes[node_id].status = 'down'
238
239
240
241
242
243
            nodes[node_id].status = nodespec['state']
        self.nodes = nodes

    def _assemble_reservations(self, reservations, reserved_nodes):
        # FIXME: we can recover reservations now.  Implement this.
        pass
244

245
    def _gen_node_to_queue(self):
246
        '''(Re)Generate a mapping for fast lookup of node-id's to queues.'''
247
        with self._node_lock:
248
            self.nodes_by_queue = {}
249
250
251
            for node in self.nodes.values():
                for queue in node.queues:
                    if queue in self.nodes_by_queue.keys():
252
                        self.nodes_by_queue[queue].add(node.node_id)
253
                    else:
254
                        self.nodes_by_queue[queue] = set([node.node_id])
255

256
    @exposed
257
    def get_nodes(self, as_dict=False, node_ids=None, params=None, as_json=False):
258
259
        '''fetch the node dictionary.

260
261
262
263
264
265
266
            as_dict  - Return node information as a dictionary keyed to string
                        node_id value.
            node_ids - A list of node names to return, if None, return all nodes
                       (default None).
            params   - If requesting a dict, only request this list of
                       parameters of the node.
            json     - Encode to json before sending.  Useful on large systems.
267
268
269
270
271

            returns the node dictionary.  Can reutrn underlying node data as
            dictionary for XMLRPC purposes

        '''
272
273
274
275
276
277
278
279
280
        def node_filter(node):
            if node_ids is not None:
                return (str(node[0]) in [str(nid) for nid in node_ids])
            return True

        node_info = None
        if as_dict:
            retdict = {k:v.to_dict(True, params) for k, v in self.nodes.items()}
            node_info = dict(filter(node_filter, retdict.items()))
281
        else:
282
283
284
285
286
            node_info = dict(filter(node_filter, self.nodes.items()))
        if as_json:
            return json.dumps(node_info)
        return node_info

287
288
289
    def _run_update_state(self):
        '''automated node update functions on the update timer go here.'''
        while True:
290
291
292
293
294
295
296
297
298
            try:
                self.process_manager.update_launchers()
                self.update_node_state()
                self._get_exit_status()
            except Exception:
                # prevent the update thread from dying
                _logger.critical('Error in _run_update_state', exc_info=True)
            finally:
                Cobalt.Util.sleep(UPDATE_THREAD_TIMEOUT)
299

300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
    def _reconstruct_node(self, inven_node, inventory):
        '''Reconstruct a node from statefile information.  Needed whenever we
        find a new node.  If no statefile information from the orignal cobalt
        invocation exists, bring up a node in default states and mark node
        administratively down.

        This node was disabled and invisible to ALPS at the time Cobalt was
        initialized and so we have no current record of that node.

        '''
        nid = inven_node['node_id']
        new_node = None
        #construct basic node from inventory
        for node_info in inventory['nodes']:
            if int(node_info['node_id']) == int(nid):
                new_node = CrayNode(node_info)
                break
        if new_node is None:
            #we have a phantom node?
            self.logger.error('Unable to find inventory information for nid: %s', nid)
            return
        # if we have information from the statefile we need to repopulate the
        # node with the saved data.
        # Perhaps this should be how I construct all node data anyway?
        if self.init_spec is not None:
            node_info = self.init_spec.get('node_info', {})
            try:
                new_node.reset_info(node_info[str(nid)])
                self.logger.warning('Node %s reconstructed.', nid)
            except:
                self.logger.warning("Node nid: %s not found in restart information.  Bringing up node in a clean configuration.", nid, exc_info=True)
                #put into admin_down
                new_node.admin_down = True
                new_node.status = 'down'
                self.logger.warning('Node %s marked down.', nid)
Paul Rich's avatar
Paul Rich committed
335
        new_node.managed = True
336
337
338
        self.nodes[str(nid)] = new_node
        self.logger.warning('Node %s added to tracking.', nid)

339
340
341
342
343
344
    @exposed
    def update_node_state(self):
        '''update the state of cray nodes. Check reservation status and system
        stateus as reported by ALPS

        '''
Paul Rich's avatar
Paul Rich committed
345
346
347
        #Check clenaup progress.  Check ALPS reservations.  Check allocated
        #nodes.  If there is no resource reservation and the node is not in
        #current alps reservations, the node is ready to schedule again.
348
        now = time.time()
Paul Rich's avatar
Paul Rich committed
349
350
351
352
353
354
355
356
        startup_time_to_clear = []
        #clear pending starttimes.
        for jobid, start_time in self.pending_starts.items():
            if int(now) > int(start_time):
                startup_time_to_clear.append(jobid)
        for jobid in startup_time_to_clear:
            del self.pending_starts[jobid]

Paul Rich's avatar
Paul Rich committed
357
        self.check_killing_aprun()
358
        with self._node_lock:
Paul Rich's avatar
Paul Rich committed
359
            fetch_time_start = time.time()
360
            try:
361
362
                #I have seen problems with the kitchen-sink query here, where
                #the output gets truncated on it's way into Cobalt.
Paul Rich's avatar
Paul Rich committed
363
                #inventory = ALPSBridge.fetch_inventory(resinfo=True) #This is a full-refresh,
364
                #determine if summary may be used under normal operation
365
                #updated for >= 1.6 interface
Paul Rich's avatar
Paul Rich committed
366
367
368
                inven_nodes = ALPSBridge.extract_system_node_data(ALPSBridge.system())
                reservations = ALPSBridge.fetch_reservations()
                #reserved_nodes = ALPSBridge.reserved_nodes()
369
370
371
372
            except (ALPSBridge.ALPSError, ComponentLookupError):
                _logger.warning('Error contacting ALPS for state update.  Aborting this update',
                        exc_info=True)
                return
373
            inven_reservations = reservations.get('reservations', [])
Paul Rich's avatar
Paul Rich committed
374
            fetch_time_start = time.time()
375
            #_logger.debug("time in ALPS fetch: %s seconds", (time.time() - fetch_time_start))
376
            start_time = time.time()
Paul Rich's avatar
Paul Rich committed
377
            self._detect_rereservation(inven_reservations)
378
379
380
381
382
383
            # check our reservation objects.  If a res object doesn't correspond
            # to any backend reservations, this reservation object should be
            # dropped
            alps_res_to_delete = []
            current_alps_res_ids = [int(res['reservation_id']) for res in
                    inven_reservations]
Paul Rich's avatar
Paul Rich committed
384
            res_jobid_to_delete = []
385
386
387
388
            if self.alps_reservations == {}:
                # if we have nodes in cleanup-pending but no alps reservations,
                # then the nodes in cleanup pending are considered idle (or
                # at least not in cleanup).  Hardware check can catch these
389
390
                # later. This catches leftover reservations from hard-shutdowns
                # while running.
391
392
393
394
                for node in self.nodes.values():
                    if node.status in ['cleanup', 'cleanup-pending']:
                        node.status = 'idle'
            for alps_res in self.alps_reservations.values():
Paul Rich's avatar
Paul Rich committed
395
396
                if alps_res.jobid in self.pending_starts.keys():
                    continue #Get to this only after timeout happens
Paul Rich's avatar
Paul Rich committed
397
                #find alps_id associated reservation
398
                if int(alps_res.alps_res_id) not in current_alps_res_ids:
Paul Rich's avatar
Paul Rich committed
399
                    for node_id in alps_res.node_ids:
400
                        if not self.nodes[str(node_id)].reserved:
Paul Rich's avatar
Paul Rich committed
401
                            #pending hardware status update
402
                            self.nodes[str(node_id)].status = 'idle'
Paul Rich's avatar
Paul Rich committed
403
                    res_jobid_to_delete.append(alps_res.jobid)
404
405
                    _logger.info('Nodes %s cleanup complete.',
                            compact_num_list(alps_res.node_ids))
Paul Rich's avatar
Paul Rich committed
406
            for jobid in res_jobid_to_delete:
407
408
                _logger.info('%s: ALPS reservation for this job complete.', jobid)
                del self.alps_reservations[str(jobid)]
Paul Rich's avatar
Paul Rich committed
409
410
            #process group should already be on the way down since cqm released the
            #resource reservation
411
            cleanup_nodes = [node for node in self.nodes.values()
412
                             if node.status in ['cleanup-pending', 'cleanup']]
Paul Rich's avatar
Paul Rich committed
413
            #If we have a block marked for cleanup, send a release message.
Paul Rich's avatar
Paul Rich committed
414
            released_res_jobids = []
415
            cleaned_nodes = []
Paul Rich's avatar
Paul Rich committed
416
            for node in cleanup_nodes:
417
                found = False
418
                for alps_res in self.alps_reservations.values():
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
                    if str(node.node_id) in alps_res.node_ids:
                        found = True
                        if alps_res.jobid not in released_res_jobids:
                            #send only one release per iteration
                            apids = alps_res.release()
                            if apids is not None:
                                for apid in apids:
                                    self.signal_aprun(apid)
                            released_res_jobids.append(alps_res.jobid)
                if not found:
                    # There is no alps reservation to release, cleanup is
                    # already done.  This happens with very poorly timed
                    # qdel requests. Status will be set properly with the
                    # subsequent hardware status check.
                    _logger.info('Node %s cleanup complete.', node.node_id)
                    node.status = 'idle'
                    cleaned_nodes.append(node)
            for node in cleaned_nodes:
                cleanup_nodes.remove(node)
Paul Rich's avatar
Paul Rich committed
438
439

        #find hardware status
440
441
442
            #so we do this only once for nodes being added.
            #full inventory fetch is expensive.
            recon_inventory = None
Paul Rich's avatar
Paul Rich committed
443
            for inven_node in inven_nodes.values():
444
445
                if self.nodes.has_key(str(inven_node['node_id'])):
                    node = self.nodes[str(inven_node['node_id'])]
446
                    node.role = inven_node['role'].upper()
447
                    node.attributes.update(inven_node['attrs'])
Paul Rich's avatar
Paul Rich committed
448
                    if node.reserved:
449
450
                        #node marked as reserved.
                        if self.alps_reservations.has_key(str(node.reserved_jobid)):
Paul Rich's avatar
Paul Rich committed
451
452
                            node.status = 'busy'
                        else:
453
454
455
456
457
458
                            # check to see if the resource reservation should be
                            # released.
                            if node.reserved_until >= now:
                                node.status = 'allocated'
                            else:
                                node.release(user=None, jobid=None, force=True)
Paul Rich's avatar
Paul Rich committed
459
                    else:
460
                        node.status = inven_node['state'].upper()
461
462
                        if node.role.upper() not in ['BATCH'] and node.status is 'idle':
                            node.status = 'alps-interactive'
463
                else:
464
465
466
467
468
469
470
471
472
473
474
475
476
                    # Apparently, we CAN add nodes on the fly.  The node would
                    # have been disabled.  We need to add a new node and update
                    # it's state.
                    _logger.warning('Unknown node %s found. Starting reconstruction.', inven_node['node_id'])
                    try:
                        if recon_inventory is None:
                            recon_inventory = ALPSBridge.fetch_inventory()
                    except:
                        _logger.error('Failed to fetch inventory.  Will retry on next pass.', exc_info=True)
                    else:
                        self._reconstruct_node(inven_node, recon_inventory)
                   # _logger.error('UNS: ALPS reports node %s but not in our node list.',
                   #               inven_node['node_id'])
Paul Rich's avatar
Paul Rich committed
477
478
479
            #should down win over running in terms of display?
            #keep node that are marked for cleanup still in cleanup
            for node in cleanup_nodes:
480
                node.status = 'cleanup-pending'
481
        #_logger.debug("time in UNS lock: %s seconds", (time.time() - start_time))
482
483
        return

Paul Rich's avatar
Paul Rich committed
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
    def _detect_rereservation(self, inven_reservations):
        '''Detect and update the ALPS reservation associated with a running job.
        We are only concerned with BATCH reservations.  Others would be
        associated with running jobs, and should not be touched.

        '''
        def _construct_alps_res():
            with self._node_lock:
                job_nodes = [node.node_id for node in self.nodes.values()
                        if node.reserved_jobid == int(alps_res['batch_id'])]
            new_resspec = {'reserved_nodes': job_nodes,
                           'reservation_id': str(alps_res['reservation_id']),
                           'pagg_id': 0 #unknown.  Not used here.
                            }
            new_jobspec = {'jobid': int(alps_res['batch_id']),
                           'user' : alps_res['user_name']}

            return ALPSReservation(new_jobspec, new_resspec, self.nodes)

        replaced_reservation = None
        for alps_res in inven_reservations:
            try:
                #This traversal is terrible. May want to hide this in the API
                #somewhere
                if alps_res['ApplicationArray'][0]['Application'][0]['CommandArray'][0]['Command'][0]['cmd'] != 'BASIL':
                    # Not a reservation we're in direct control of.
                    continue
            except (KeyError, IndexError):
                #not a batch reservation
                continue
            if str(alps_res['batch_id']) in self.alps_reservations.keys():
                # This is a reservation we may already know about
                if (int(alps_res['reservation_id']) ==
                        self.alps_reservations[str(alps_res['batch_id'])].alps_res_id):
                    # Already know about this one
                    continue
                # we have a re-reservation.  If this has a batch id, we need
                # to add it to our list of tracked reservations, and inherit
                # other reservation details.  We can pull the reservation
                # information out of reserve_resources_until.

                # If this is a BATCH reservation and no hardware has that
                # reservation id, then this reservation needs to be released
                # Could happen if we have a job starting right at the RRU
                # boundary.
                new_alps_res = _construct_alps_res()
                tracked_res = self.alps_reservations.get(new_alps_res.jobid, None)
                if tracked_res is not None:
                    try:
Paul Rich's avatar
Paul Rich committed
533
                        apids = tracked_res.release()
Paul Rich's avatar
Paul Rich committed
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
                    except ALPSBridge.ALPSError:
                        # backend reservation probably is gone, which is why
                        # we are here in the first place.
                        pass
                self.alps_reservations[str(alps_res['batch_id'])] = new_alps_res
            else:
                #this is a basil reservation we don't know about already.
                new_alps_res = _construct_alps_res()
                if len(new_alps_res.node_ids) == 0:
                    # This reservation has no resources, so Cobalt's internal
                    # resource reservation tracking has no record.  This needs to
                    # be removed.
                    new_alps_res.release()
                else:
                    self.alps_reservations[str(alps_res['batch_id'])] = new_alps_res
Paul Rich's avatar
Paul Rich committed
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
        return

    def signal_aprun(self, aprun_id, signame='SIGINT'):
        '''Signal an aprun by aprun id (application_id).  Does not block.
        Use check_killing_aprun to determine completion/termination.  Does not
        depend on the host the aprun(s) was launched from.

        Input:
            aprun_id - integer application id number.
            signame  - string name of signal to send (default: SIGINT)
        Notes:
            Valid signals to apkill are:
            SIGHUP, SIGINT, SIGQUIT, SIGTERM, SIGABRT, SIGUSR1, SIGUSR2, SIGURG,
            and SIGWINCH (from apkill(1) man page.)  Also allowing SIGKILL.

        '''
        #Expect changes with an API updte

        #mark legal signals from docos
        if (signame not in ['SIGHUP', 'SIGINT', 'SIGQUIT', 'SIGTERM', 'SIGABRT',
            'SIGUSR1', 'SIGUSR2', 'SIGURG','SIGWINCH', 'SIGKILL']):
            raise ValueError('%s is not a legal value for signame.', signame)
        try:
            retval = Cobalt.Proxy.ComponentProxy('system_script_forker').fork(
                    [APKILL_CMD, '-%s' % signame, '%d' % int(aprun_id)],
                    'aprun_termination', '%s cleanup:'% aprun_id)
Paul Rich's avatar
Paul Rich committed
575
            _logger.info("killing backend ALPS application_id: %s", aprun_id)
Paul Rich's avatar
Paul Rich committed
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
        except xmlrpclib.Fault:
            _logger.warning("XMLRPC Error while killing backend job: %s, will retry.",
                    aprun_id, exc_info=True)
        except:
            _logger.critical("Unknown Error while killing backend job: %s, will retry.",
                    aprun_id, exc_info=True)
        else:
            self.killing_jobs[aprun_id] = retval
        return

    def check_killing_aprun(self):
        '''Check that apkill commands have completed and clean them from the
        system_script_forker.  Allows for non-blocking cleanup initiation.

        '''
591

Paul Rich's avatar
Paul Rich committed
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
        try:
            system_script_forker = Cobalt.Proxy.ComponentProxy('system_script_forker')
        except:
            self.logger.critical("Cannot connect to system_script_forker.",
                    exc_info=True)
            return
        complete_jobs = []
        rev_killing_jobs = dict([(v,k) for (k,v) in self.killing_jobs.iteritems()])
        removed_jobs = []
        current_killing_jobs = system_script_forker.get_children(None, self.killing_jobs.values())

        for job in current_killing_jobs:
            if job['complete']:
                del self.killing_jobs[rev_killing_jobs[int(job['id'])]]
                removed_jobs.append(job['id'])
        system_script_forker.cleanup_children(removed_jobs)
Paul Rich's avatar
Paul Rich committed
608
        return
Paul Rich's avatar
Paul Rich committed
609

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
    @exposed
    def find_queue_equivalence_classes(self, reservation_dict,
            active_queue_names, passthrough_blocking_res_list=[]):
        '''Aggregate queues together that can impact eachother in the same
        general pass (both drain and backfill pass) in find_job_location.
        Equivalence classes will then be used in find_job_location to consider
        placement of jobs and resources, in separate passes.  If multiple
        equivalence classes are returned, then they must contain orthogonal sets
        of resources.

        Inputs:
        reservation_dict -- a mapping of active reservations to resrouces.
                            These will block any job in a normal queue.
        active_queue_names -- A list of queues that are currently enabled.
                              Queues that are not in the 'running' state
                              are ignored.
        passthrough_partitions -- Not used on Cray systems currently.  This is
                                  for handling hardware that supports
                                  partitioned interconnect networks.

        Output:
        A list of dictionaries of queues that may impact eachother while
        scheduling resources.

        Side effects:
        None

        Internal Data:
        queue_assignments: a mapping of queues to schedulable locations.

        '''
        equiv = []
642
        node_active_queues = set([])
643
        self.current_equivalence_classes = [] #reverse mapping of queues to nodes
644
645
646
        for node in self.nodes.values():
            if node.managed and node.schedulable:
                #only condiser nodes that we are scheduling.
647
                node_active_queues = set([])
648
649
                for queue in node.queues:
                    if queue in active_queue_names:
650
651
                        node_active_queues.add(queue)
                if node_active_queues == set([]):
652
653
654
655
656
657
658
659
660
                    #this node has nothing active.  The next check can get
                    #expensive, so skip it.
                    continue
            #determine the queues that overlap.  Hardware has to be included so
            #that reservations can be mapped into the equiv classes.
            found_a_match = False
            for e in equiv:
                for queue in node_active_queues:
                    if queue in e['queues']:
661
                        e['data'].add(node.node_id)
662
663
664
665
666
667
                        e['queues'] = e['queues'] | set(node_active_queues)
                        found_a_match = True
                        break
                if found_a_match:
                    break
            if not found_a_match:
668
                equiv.append({'queues': set(node_active_queues),
669
                              'data': set([node.node_id]),
670
                              'reservations': set()})
671
672
673
674
675
        #second pass to merge queue lists based on hardware
        real_equiv = []
        for eq_class in equiv:
            found_a_match = False
            for e in real_equiv:
676
                if e['queues'].intersection(eq_class['queues']):
677
678
679
680
681
682
683
684
685
686
                    e['queues'].update(eq_class['queues'])
                    e['data'].update(eq_class['data'])
                    found_a_match = True
                    break
            if not found_a_match:
                real_equiv.append(eq_class)
        equiv = real_equiv
        #add in reservations:
        for eq_class in equiv:
            for res_name in reservation_dict:
687
688
689
690
                for node_hunk in reservation_dict[res_name].split(":"):
                    for node_id in expand_num_list(node_hunk):
                        if str(node_id) in eq_class['data']:
                            eq_class['reservations'].add(res_name)
691
                            break
692
            #don't send what could be a large block list back in the returun
693
694
695
            for key in eq_class:
                eq_class[key] = list(eq_class[key])
            del eq_class['data']
696
            self.current_equivalence_classes.append(eq_class)
697
698
        return equiv

699
700
701
702
703
704
    @staticmethod
    def _setup_special_locaitons(job):
        forbidden = set([str(loc) for loc in chain_loc_list(job.get('forbidden', []))])
        required = set([str(loc) for loc in chain_loc_list(job.get('required', []))])
        requested_locations = set([str(n) for n in expand_num_list(job['attrs'].get('location', ''))])
        return (forbidden, required, requested_locations)
705

706
    def _assemble_queue_data(self, job, idle_only=True, drain_time=None):
707
708
        '''put together data for a queue, or queue-like reservation structure.

709
710
711
712
713
714
715
        Input:
            job - dictionary of job data.
            idle_only - [default: True] if True, return only idle nodes.
                        Otherwise return nodes in any non-down status.

        return count of idle resources, and a list of valid nodes to run on.
        if idle_only is set to false, returns a set of candidate draining nodes.
716
717
718


        '''
719
720
721
722
        # RESERVATION SUPPORT: Reservation queues are ephemeral, so we will
        # not find the queue normally. In the event of a reservation we'll
        # have to intersect required nodes with the idle and available
        # we also have to forbid a bunch of locations, in  this case.
723
        unavailable_nodes = []
724
        forbidden, required, requested_locations = self._setup_special_locaitons(job)
725
726
727
728
729
730
        requested_loc_in_forbidden = False
        for loc in requested_locations:
            if loc in forbidden:
                #don't spam the logs.
                requested_loc_in_forbidden = True
                break
731
        if job['queue'] not in self.nodes_by_queue.keys():
732
733
734
735
736
            # Either a new queue with no resources, or a possible
            # reservation need to do extra work for a reservation
            node_id_list = list(required - forbidden)
        else:
            node_id_list = list(set(self.nodes_by_queue[job['queue']]) - forbidden)
737
        if requested_locations != set([]): # handle attrs location= requests
738
            job_set = set([str(nid) for nid in requested_locations])
739
            if job['queue'] not in self.nodes_by_queue.keys():
740
741
742
                #we're in a reservation and need to further restrict nodes.
                if job_set <= set(node_id_list):
                    # We are in a reservation there are no forbidden nodes.
743
                    node_id_list = list(requested_locations)
744
745
746
747
                else:
                    # We can't run this job.  Insufficent resources in this
                    # reservation to do so.  Don't risk blocking anything.
                    node_id_list = []
748
            else:
749
750
                #normal queues.  Restrict to the non-reserved nodes.
                if job_set <= set([str(node_id) for node_id in
751
                                    self.nodes_by_queue[job['queue']]]):
752
                    node_id_list = list(requested_locations)
753
754
755
756
757
758
759
                    if not set(node_id_list).isdisjoint(forbidden):
                        # this job has requested locations that are a part of an
                        # active reservation.  Remove locaitons and drop available
                        # nodecount appropriately.
                        node_id_list = list(set(node_id_list) - forbidden)
                else:
                    node_id_list = []
760
761
                    if not requested_loc_in_forbidden:
                        raise ValueError("forbidden locations not in queue")
762
        with self._node_lock:
763
764
765
766
767
768
769
            if idle_only:
                unavailable_nodes = [node_id for node_id in node_id_list
                        if self.nodes[str(node_id)].status not in ['idle']]
            else:
                unavailable_nodes = [node_id for node_id in node_id_list
                        if self.nodes[str(node_id)].status in
                        self.nodes[str(node_id)].DOWN_STATUSES]
770
            if drain_time is not None:
771
                unavailable_nodes.extend([node_id for node_id in node_id_list
772
                    if (self.nodes[str(node_id)].draining and
773
                        (self.nodes[str(node_id)].drain_until - BACKFILL_EPSILON) < int(drain_time))])
774
        for node_id in set(unavailable_nodes):
775
            node_id_list.remove(node_id)
776
        return sorted(node_id_list, key=lambda nid: int(nid))
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795

    def _select_first_nodes(self, job, node_id_list):
        '''Given a list of nids, select the first node count nodes fromt the
        list.  This is the target for alternate allocator replacement.

        Input:
            job - dictionary of job data from the scheduler
            node_id_list - a list of possible candidate nodes

        Return:
            A list of nodes.  [] if insufficient nodes for the allocation.

        Note: hold the node lock while doing this.  We really don't want a
        update to happen while doing this.

        '''
        ret_nodes = []
        with self._node_lock:
            if int(job['nodes']) <= len(node_id_list):
796
797
                node_id_list.sort(key=lambda nid: int(nid))
                ret_nodes = node_id_list[:int(job['nodes'])]
798
799
        return ret_nodes

800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
    def _select_first_nodes_prefer_memory_match(self, job, node_id_list):
        '''Given a list of nids, select the first node count nodes fromt the
        list.  Prefer nodes that match the memory modes for a given job, then
        go in nid order.

        Input:
            job - dictionary of job data from the scheduler
            node_id_list - a list of possible candidate nodes

        Return:
            A list of nodes.  [] if insufficient nodes for the allocation.

        Note: hold the node lock while doing this.  We really don't want a
        update to happen while doing this.

        '''
        if job.get('attrs', {}).get('mcdram', None) is None or job.get('attrs', {}).get('numa', None) is None:
            # insufficient information to include a mode match
            return self._select_first_nodes(job, node_id_list)
        ret_nodes = []
        with self._node_lock:
            considered_nodes = [node for node in self.nodes.values() if node.node_id in node_id_list]
            for node in considered_nodes:
                if (node.attributes['hbm_cache_pct'] == MCDRAM_TO_CACHEPCT[job['attrs']['mcdram']] and
                        node.attributes['numa_cfg'] == job['attrs']['numa']):
                    ret_nodes.append(node)
            if len(ret_nodes) < int(job['nodes']):
                node_id_list.sort(key=lambda nid: int(nid))
                for nid in node_id_list:
                    if self.nodes[nid] not in ret_nodes:
                        ret_nodes.append(self.nodes[nid])
        ret_nids = [node.node_id for node in ret_nodes]
        return ret_nids[:int(job['nodes'])]

834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
    def _associate_and_run_immediate(self, job, resource_until_time, node_id_list):
        '''Given a list of idle node ids, choose a set that can run a job
        immediately, if a set exists in the node_id_list.

        Inputs:
            job - Dictionary of job data
            node_id_list - a list of string node id values

        Side Effects:
            Will reserve resources in ALPS and will set resource reservations on
            allocated nodes.

        Return:
            None if no match, otherwise the pairing of a jobid and set of nids
            that have been allocated to a job.

        '''
        compact_locs = None
        if int(job['nodes']) <= len(node_id_list):
            #this job can be run immediately
854
            to_alps_list = self._select_first_nodes_prefer_memory_match(job, node_id_list)
855
856
857
858
859
860
861
862
            job_locs = self._ALPS_reserve_resources(job, resource_until_time,
                    to_alps_list)
            if job_locs is not None and len(job_locs) == int(job['nodes']):
                compact_locs = compact_num_list(job_locs)
                #temporary reservation until job actually starts
                self.pending_starts[job['jobid']] = resource_until_time
                self.reserve_resources_until(compact_locs, resource_until_time, job['jobid'])
        return compact_locs
863

864
    @locking
865
866
    @exposed
    def find_job_location(self, arg_list, end_times, pt_blocking_locations=[]):
867
868
869
870
871
872
        '''Given a list of jobs, and when jobs are ending, return a set of
        locations mapped to a jobid that can be run.  Also, set up draining
        as-needed to run top-scored jobs and backfill when possible.

        Called once per equivalence class.

873
874
        Args::
            arg_list: A list of dictionaries containning information on jobs to
875
                   cosnider.
876
            end_times: list containing a mapping of locations and the times jobs
877
878
                    runninng on those locations are scheduled to end.  End times
                    are in seconds from Epoch UTC.
879
            pt_blocking_locations: Not used for this system.  Used in partitioned
880
881
882
883
                                interconnect schemes. A list of locations that
                                should not be used to prevent passthrough issues
                                with other scheduler reservations.

884
        Returns:
885
886
887
888
889
890
891
892
893
894
895
896
        A mapping of jobids to locations to run a job to run immediately.

        Side Effects:
        May set draining flags and backfill windows on nodes.
        If nodes are being returned to run, set ALPS reservations on them.

        Notes:
        The reservation set on ALPS resources is uncomfirmed at this point.
        This reservation may timeout.  The forker when it confirms will detect
        this and will re-reserve as needed.  The alps reservation id may change
        in this case post job startup.

897
898
899
        pt_blocking_locations may be used later to block out nodes that are
        impacted by warmswap operations.

900
        This function *DOES NOT* hold the component lock.
901

902
903
904
        '''
        now = time.time()
        resource_until_time = now + TEMP_RESERVATION_TIME
905
        with self._node_lock:
906
            # only valid for this scheduler iteration.
907
908
909
910
            self._clear_draining_for_queues(arg_list[0]['queue'])
            #check if we can run immedaitely, if not drain.  Keep going until all
            #nodes are marked for draining or have a pending run.
            best_match = {} #jobid: list(locations)
911
            for job in arg_list:
912
                label = '%s/%s' % (job['jobid'], job['user'])
913
914
915
                # walltime is in minutes.  We should really fix the storage of
                # that --PMR
                job_endtime = now + (int(job['walltime']) * 60)
916
                try:
917
                    node_id_list = self._assemble_queue_data(job, drain_time=job_endtime)
918
                    available_node_list = self._assemble_queue_data(job, idle_only=False)
919
920
921
                except ValueError:
                    _logger.warning('Job %s: requesting locations that are not in requested queue.',
                            job['jobid'])
922
                    continue
923
                if int(job['nodes']) > len(available_node_list):
924
                    # Insufficient operational nodes for this job at all
925
                    continue
926
927
                elif len(node_id_list) == 0:
                    pass #allow for draining pass to run.
928
                elif int(job['nodes']) <= len(node_id_list):
929
930
                    # enough nodes are in a working state to consider the job.
                    # enough nodes are idle that we can run this job
Paul Rich's avatar
Paul Rich committed
931
932
                    compact_locs = self._associate_and_run_immediate(job,
                            resource_until_time, node_id_list)
933
934
935
                    # do we want to allow multiple placements in a single
                    # pass? That would likely help startup times.
                    if compact_locs is not None:
Paul Rich's avatar
Paul Rich committed
936
                        best_match[job['jobid']] = [compact_locs]
937
                        _logger.info("%s: Job selected for running on nodes  %s",
Paul Rich's avatar
Paul Rich committed
938
                                label, compact_locs)
939
                        break #for now only select one location
940
                if DRAIN_MODE in ['backfill', 'drain-only']:
941
                    # drain sufficient nodes for this job to run
942
                    drain_node_ids = self._select_nodes_for_draining(job,
Paul Rich's avatar
Paul Rich committed
943
                            end_times)
944
945
946
                    if drain_node_ids != []:
                        _logger.info('%s: nodes %s selected for draining.', label,
                                compact_num_list(drain_node_ids))
947
        return best_match
948

949
    def _ALPS_reserve_resources(self, job, new_time, node_id_list):
950
951
952
953
954
955
956
957
958
959
960
961
962
        '''Call ALPS to reserve resrources.  Use their allocator.  We can change
        this later to substitute our own allocator if-needed.

        Input:
        Nodecount - number of nodes to reserve for  a job.

        Returns: a list of locations that ALPS has reserved.

        Side effects:
        Places an ALPS reservation on resources.  Calls reserve resources until
        on the set of nodes, and will mark nodes as allocated.

        '''
963
964
        try:
            res_info = ALPSBridge.reserve(job['user'], job['jobid'],
965
                int(job['nodes']), job['attrs'], node_id_list)
966
        except ALPSBridge.ALPSError as exc:
967
            _logger.warning('unable to reserve resources from ALPS: %s', exc.message)
968
            return None
969
970
        new_alps_res = None
        if res_info is not None:
971
972
            new_alps_res = ALPSReservation(job, res_info, self.nodes)
            self.alps_reservations[job['jobid']] = new_alps_res
973
        return new_alps_res.node_ids
974
975
976
977
978
979
980
981
982
983
984
985

    def _clear_draining_for_queues(self, queue):
        '''Given a list of queues, remove the draining flags on nodes.

        queues - a queue in an equivalence class to consider.  This will clear
        the entire equiv class

        return - none

        Note: does not acquire block lock.  Must be locked externally.

        '''
986
        now = int(time.time())
987
988
989
990
991
        current_queues = []
        for equiv_class in self.current_equivalence_classes:
            if queue in equiv_class['queues']:
                current_queues = equiv_class['queues']
        if current_queues:
992
993
994
995
996
            with self._node_lock:
                for node in self.nodes.values():
                    for q in node.queues:
                        if q in current_queues:
                            node.clear_drain()
997
998
999
1000
1001
1002
1003

    def _select_nodes_for_draining(self, job, end_times):
        '''Select nodes to be drainined.  Set backfill windows on draining
        nodes.

        Inputs:
            job - dictionary of job information to consider
Paul Rich's avatar
Paul Rich committed
1004
1005
1006
1007
1008
            end_times - a list of nodes and their endtimes should be sorted
                        in order of location preference

        Side Effect:
            end_times will be sorted in ascending end-time order
1009
1010

        Return:
1011
1012
            List of node ids that have been selected for draining for this job,
            as well as the expected drain time.
1013
1014

        '''
1015
        now = int(time.time())
Paul Rich's avatar
Paul Rich committed
1016
        end_times.sort(key=lambda x: int(x[1]))
1017
1018
        drain_list = []
        candidate_list = []
1019
        cleanup_statuses = ['cleanup', 'cleanup-pending']
1020
        forbidden, required, requested_locations = self._setup_special_locaitons(job)
1021
1022
1023
        try:
            node_id_list = self._assemble_queue_data(job, idle_only=False)
        except ValueError:
Paul Rich's avatar
Paul Rich committed
1024
            _logger.warning('Job %s: requesting locations that are not in queue.', job['jobid'])
1025
1026
        else:
            with self._node_lock:
1027
                drain_time = None
1028
1029
                candidate_drain_time = None
                # remove the following from the list:
1030
1031
1032
1033
1034
1035
1036
                # 1. idle nodes that are already marked for draining.
                # 2. Nodes that are in an in-use status (busy, allocated).
                # 3. Nodes marked for cleanup that are not allocated to a real
                #    jobid. CLEANING_ID is a sentiel jobid value so we can set
                #    a drain window on cleaning nodes easiliy.  Not sure if this
                #    is the right thing to do. --PMR
                candidate_list = []
1037
                candidate_list = [nid for nid in node_id_list
1038
1039
1040
                        if (not self.nodes[str(nid)].draining and
                            (self.nodes[str(nid)].status in ['idle']) or
                            (self.nodes[str(nid)].status in cleanup_statuses)
1041
1042
1043
                            )]
                for nid in candidate_list:
                    if self.nodes[str(nid)].status in cleanup_statuses:
1044
                        candidate_drain_time = now + CLEANUP_DRAIN_WINDOW
1045
                for loc_time in end_times:
1046
1047
                    running_nodes = [str(nid) for nid in
                            expand_num_list(",".join(loc_time[0]))
1048
1049
1050
                            if ((job['queue'] in self.nodes[str(nid)].queues or
                                nid in required) and
                                not self.nodes[str(nid)].draining)]
1051
                    for nid in running_nodes:
1052
1053
1054
1055
1056
1057
1058
1059
                        # We set a drain on all running nodes for use in a later
                        # so that we can "favor" draining on the longest
                        # running set of nodes.
                        if (self.nodes[str(nid)].status != 'down' and
                                self.nodes[str(nid)].managed):
                            self.nodes[str(nid)].set_drain(loc_time[1], job['jobid'])
                    candidate_list.extend([nid for nid in running_nodes if
                        self.nodes[str(nid)].draining])
1060
                    candidate_drain_time = int(loc_time[1])
1061
1062
1063
                    if len(candidate_list) >= int(job['nodes']):
                        # Enough nodes have been found to drain for this job
                        break
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
                candidates = set(candidate_list)
                # We need to further restrict this list based on requested
                # location and reservation avoidance data:
                if forbidden != set([]):
                    candidates = candidates.difference(forbidden)
                if requested_locations != set([]):
                    candidates = candidates.intersection(requested_locations)
                candidate_list = list(candidates)
                if len(candidate_list) >= int(job['nodes']):
                    drain_time = candidate_drain_time
1074
1075
1076
                if drain_time is not None:
                    # order the node ids by id and drain-time. Longest drain
                    # first
1077
                    candidate_list.sort(key=lambda nid: int(nid))
1078
                    candidate_list.sort(reverse=True,
1079
                            key=lambda nid: self.nodes[str(nid)].drain_until)
1080
1081
1082
1083
                    drain_list = candidate_list[:int(job['nodes'])]
                    for nid in drain_list:
                        self.nodes[str(nid)].set_drain(drain_time, job['jobid'])
        return drain_list
1084
1085

    @exposed
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
    def reserve_resources_until(self, location, new_time, jobid):
        '''Place, adjust and release resource reservations.

        Input:
            location: the location to reserve [list of nodes]
            new_time: the new end time of a resource reservation
            jobid: the Cobalt jobid that this reservation is for

        Output:
            True if resource reservation is successfully placed.
            Otherwise False.

        Side Effects:
            * Sets/releases reservation on specified node list
            * Sets/releases ALPS reservation.  If set reservation is unconfirmed
              Confirmation must occur a cray_script_forker

        Notes:
            This holds the node data lock while it's running.
1105

1106
        '''
1107
        completed = False
1108
        with self._node_lock:
1109
1110
            succeeded_nodes = []
            failed_nodes = []
Paul Rich's avatar
Paul Rich committed
1111
1112
1113
            #assemble from locaion list:
            exp_location = []
            if isinstance(location, list):
1114
                exp_location = chain_loc_list(location)
Paul Rich's avatar
Paul Rich committed
1115
1116
1117
1118
            elif isinstance(location, str):
                exp_location = expand_num_list(location)
            else:
                raise TypeError("location type is %s.  Must be one of 'list' or 'str'", type(location))
1119
1120
1121
1122
1123
            if new_time is not None:
                #reserve the location. Unconfirmed reservations will have to
                #be lengthened.  Maintain a list of what we have reserved, so we
                #extend on the fly, and so that we don't accidentally get an
                #overallocation/user
Paul Rich's avatar
Paul Rich committed
1124
                for loc in exp_location:
1125
                    # node = self.nodes[self.node_name_to_id[loc]]
Paul Rich's avatar
Paul Rich committed
1126
                    node = self.nodes[str(loc)]
1127
1128
                    try:
                        node.reserve(new_time, jobid=jobid)
Paul Rich's avatar
Paul Rich committed
1129
                        succeeded_nodes.append(int(loc))
1130
1131
                    except Cobalt.Exceptions.ResourceReservationFailure as exc:
                        self.logger.error(exc)
1132
1133
1134
1135
1136
1137
1138
1139
1140
                        failed_nodes.append(loc)
                self.logger.info("job %s: nodes '%s' now reserved until %s",
                    jobid, compact_num_list(succeeded_nodes),
                    time.asctime(time.gmtime(new_time)))
                if failed_nodes != []:
                    self.logger.warning("job %s: failed to reserve nodes '%s'",
                        jobid, compact_num_list(failed_nodes))
                else:
                    completed = True
1141
1142
1143
            else:
                #release the reservation and the underlying ALPS reservation
                #and the reserration on blocks.