Statistics
| Branch: | Tag: | Revision:

root / lib / masterd / iallocator.py @ b3724640

History | View | Annotate | Download (27.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the iallocator code."""
23

    
24
from ganeti import compat
25
from ganeti import constants
26
from ganeti import errors
27
from ganeti import ht
28
from ganeti import outils
29
from ganeti import opcodes
30
import ganeti.rpc.node as rpc
31
from ganeti import serializer
32
from ganeti import utils
33

    
34
import ganeti.masterd.instance as gmi
35

    
36

    
37
_STRING_LIST = ht.TListOf(ht.TString)
38
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
39
   # pylint: disable=E1101
40
   # Class '...' has no 'OP_ID' member
41
   "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
42
                        opcodes.OpInstanceMigrate.OP_ID,
43
                        opcodes.OpInstanceReplaceDisks.OP_ID]),
44
   })))
45

    
46
_NEVAC_MOVED = \
47
  ht.TListOf(ht.TAnd(ht.TIsLength(3),
48
                     ht.TItems([ht.TNonEmptyString,
49
                                ht.TNonEmptyString,
50
                                ht.TListOf(ht.TNonEmptyString),
51
                                ])))
52
_NEVAC_FAILED = \
53
  ht.TListOf(ht.TAnd(ht.TIsLength(2),
54
                     ht.TItems([ht.TNonEmptyString,
55
                                ht.TMaybeString,
56
                                ])))
57
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
58
                        ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
59

    
60
_INST_NAME = ("name", ht.TNonEmptyString)
61
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
62

    
63

    
64
class _AutoReqParam(outils.AutoSlots):
65
  """Meta class for request definitions.
66

67
  """
68
  @classmethod
69
  def _GetSlots(mcs, attrs):
70
    """Extract the slots out of REQ_PARAMS.
71

72
    """
73
    params = attrs.setdefault("REQ_PARAMS", [])
74
    return [slot for (slot, _) in params]
75

    
76

    
77
class IARequestBase(outils.ValidatedSlots):
78
  """A generic IAllocator request object.
79

80
  """
81
  __metaclass__ = _AutoReqParam
82

    
83
  MODE = NotImplemented
84
  REQ_PARAMS = []
85
  REQ_RESULT = NotImplemented
86

    
87
  def __init__(self, **kwargs):
88
    """Constructor for IARequestBase.
89

90
    The constructor takes only keyword arguments and will set
91
    attributes on this object based on the passed arguments. As such,
92
    it means that you should not pass arguments which are not in the
93
    REQ_PARAMS attribute for this class.
94

95
    """
96
    outils.ValidatedSlots.__init__(self, **kwargs)
97

    
98
    self.Validate()
99

    
100
  def Validate(self):
101
    """Validates all parameters of the request.
102

103

104
    This method returns L{None} if the validation succeeds, or raises
105
    an exception otherwise.
106

107
    @rtype: NoneType
108
    @return: L{None}, if the validation succeeds
109

110
    @raise Exception: validation fails
111

112
    """
113
    assert self.MODE in constants.VALID_IALLOCATOR_MODES
114

    
115
    for (param, validator) in self.REQ_PARAMS:
116
      if not hasattr(self, param):
117
        raise errors.OpPrereqError("Request is missing '%s' parameter" % param,
118
                                   errors.ECODE_INVAL)
119

    
120
      value = getattr(self, param)
121
      if not validator(value):
122
        raise errors.OpPrereqError(("Request parameter '%s' has invalid"
123
                                    " type %s/value %s") %
124
                                    (param, type(value), value),
125
                                    errors.ECODE_INVAL)
126

    
127
  def GetRequest(self, cfg):
128
    """Gets the request data dict.
129

130
    @param cfg: The configuration instance
131

132
    """
133
    raise NotImplementedError
134

    
135
  def ValidateResult(self, ia, result):
136
    """Validates the result of an request.
137

138
    @param ia: The IAllocator instance
139
    @param result: The IAllocator run result
140
    @raises ResultValidationError: If validation fails
141

142
    """
143
    if ia.success and not self.REQ_RESULT(result):
144
      raise errors.ResultValidationError("iallocator returned invalid result,"
145
                                         " expected %s, got %s" %
146
                                         (self.REQ_RESULT, result))
147

    
148

    
149
class IAReqInstanceAlloc(IARequestBase):
150
  """An instance allocation request.
151

152
  """
153
  # pylint: disable=E1101
154
  MODE = constants.IALLOCATOR_MODE_ALLOC
155
  REQ_PARAMS = [
156
    _INST_NAME,
157
    ("memory", ht.TNonNegativeInt),
158
    ("spindle_use", ht.TNonNegativeInt),
159
    ("disks", ht.TListOf(ht.TDict)),
160
    ("disk_template", ht.TString),
161
    ("os", ht.TString),
162
    ("tags", _STRING_LIST),
163
    ("nics", ht.TListOf(ht.TDict)),
164
    ("vcpus", ht.TInt),
165
    ("hypervisor", ht.TString),
166
    ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
167
    ]
168
  REQ_RESULT = ht.TList
169

    
170
  def RequiredNodes(self):
171
    """Calculates the required nodes based on the disk_template.
172

173
    """
174
    if self.disk_template in constants.DTS_INT_MIRROR:
175
      return 2
176
    else:
177
      return 1
178

    
179
  def GetRequest(self, cfg):
180
    """Requests a new instance.
181

182
    The checks for the completeness of the opcode must have already been
183
    done.
184

185
    """
186
    disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks)
187

    
188
    return {
189
      "name": self.name,
190
      "disk_template": self.disk_template,
191
      "tags": self.tags,
192
      "os": self.os,
193
      "vcpus": self.vcpus,
194
      "memory": self.memory,
195
      "spindle_use": self.spindle_use,
196
      "disks": self.disks,
197
      "disk_space_total": disk_space,
198
      "nics": self.nics,
199
      "required_nodes": self.RequiredNodes(),
200
      "hypervisor": self.hypervisor,
201
      }
202

    
203
  def ValidateResult(self, ia, result):
204
    """Validates an single instance allocation request.
205

206
    """
207
    IARequestBase.ValidateResult(self, ia, result)
208

    
209
    if ia.success and len(result) != self.RequiredNodes():
210
      raise errors.ResultValidationError("iallocator returned invalid number"
211
                                         " of nodes (%s), required %s" %
212
                                         (len(result), self.RequiredNodes()))
213

    
214

    
215
class IAReqMultiInstanceAlloc(IARequestBase):
216
  """An multi instance allocation request.
217

218
  """
219
  # pylint: disable=E1101
220
  MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
221
  REQ_PARAMS = [
222
    ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
223
    ]
224
  _MASUCCESS = \
225
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
226
                       ht.TItems([ht.TNonEmptyString,
227
                                  ht.TListOf(ht.TNonEmptyString),
228
                                  ])))
229
  _MAFAILED = ht.TListOf(ht.TNonEmptyString)
230
  REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
231
                       ht.TItems([_MASUCCESS, _MAFAILED]))
232

    
233
  def GetRequest(self, cfg):
234
    return {
235
      "instances": [iareq.GetRequest(cfg) for iareq in self.instances],
236
      }
237

    
238

    
239
class IAReqRelocate(IARequestBase):
240
  """A relocation request.
241

242
  """
243
  # pylint: disable=E1101
244
  MODE = constants.IALLOCATOR_MODE_RELOC
245
  REQ_PARAMS = [
246
    _INST_UUID,
247
    ("relocate_from_node_uuids", _STRING_LIST),
248
    ]
249
  REQ_RESULT = ht.TList
250

    
251
  def GetRequest(self, cfg):
252
    """Request an relocation of an instance
253

254
    The checks for the completeness of the opcode must have already been
255
    done.
256

257
    """
258
    instance = cfg.GetInstanceInfo(self.inst_uuid)
259
    if instance is None:
260
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
261
                                   " IAllocator" % self.inst_uuid)
262

    
263
    if instance.disk_template not in constants.DTS_MIRRORED:
264
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
265
                                 errors.ECODE_INVAL)
266

    
267
    if (instance.disk_template in constants.DTS_INT_MIRROR and
268
        len(instance.secondary_nodes) != 1):
269
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
270
                                 errors.ECODE_STATE)
271

    
272
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
273
    disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
274

    
275
    return {
276
      "name": instance.name,
277
      "disk_space_total": disk_space,
278
      "required_nodes": 1,
279
      "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
280
      }
281

    
282
  def ValidateResult(self, ia, result):
283
    """Validates the result of an relocation request.
284

285
    """
286
    IARequestBase.ValidateResult(self, ia, result)
287

    
288
    node2group = dict((name, ndata["group"])
289
                      for (name, ndata) in ia.in_data["nodes"].items())
290

    
291
    fn = compat.partial(self._NodesToGroups, node2group,
292
                        ia.in_data["nodegroups"])
293

    
294
    instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
295
    request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
296
                        ia.cfg.GetNodeNames([instance.primary_node]))
297
    result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
298

    
299
    if ia.success and not set(result_groups).issubset(request_groups):
300
      raise errors.ResultValidationError("Groups of nodes returned by"
301
                                         " iallocator (%s) differ from original"
302
                                         " groups (%s)" %
303
                                         (utils.CommaJoin(result_groups),
304
                                          utils.CommaJoin(request_groups)))
305

    
306
  @staticmethod
307
  def _NodesToGroups(node2group, groups, nodes):
308
    """Returns a list of unique group names for a list of nodes.
309

310
    @type node2group: dict
311
    @param node2group: Map from node name to group UUID
312
    @type groups: dict
313
    @param groups: Group information
314
    @type nodes: list
315
    @param nodes: Node names
316

317
    """
318
    result = set()
319

    
320
    for node in nodes:
321
      try:
322
        group_uuid = node2group[node]
323
      except KeyError:
324
        # Ignore unknown node
325
        pass
326
      else:
327
        try:
328
          group = groups[group_uuid]
329
        except KeyError:
330
          # Can't find group, let's use UUID
331
          group_name = group_uuid
332
        else:
333
          group_name = group["name"]
334

    
335
        result.add(group_name)
336

    
337
    return sorted(result)
338

    
339

    
340
class IAReqNodeEvac(IARequestBase):
341
  """A node evacuation request.
342

343
  """
344
  # pylint: disable=E1101
345
  MODE = constants.IALLOCATOR_MODE_NODE_EVAC
346
  REQ_PARAMS = [
347
    ("instances", _STRING_LIST),
348
    ("evac_mode", ht.TEvacMode),
349
    ]
350
  REQ_RESULT = _NEVAC_RESULT
351

    
352
  def GetRequest(self, cfg):
353
    """Get data for node-evacuate requests.
354

355
    """
356
    return {
357
      "instances": self.instances,
358
      "evac_mode": self.evac_mode,
359
      }
360

    
361

    
362
class IAReqGroupChange(IARequestBase):
363
  """A group change request.
364

365
  """
366
  # pylint: disable=E1101
367
  MODE = constants.IALLOCATOR_MODE_CHG_GROUP
368
  REQ_PARAMS = [
369
    ("instances", _STRING_LIST),
370
    ("target_groups", _STRING_LIST),
371
    ]
372
  REQ_RESULT = _NEVAC_RESULT
373

    
374
  def GetRequest(self, cfg):
375
    """Get data for node-evacuate requests.
376

377
    """
378
    return {
379
      "instances": self.instances,
380
      "target_groups": self.target_groups,
381
      }
382

    
383

    
384
class IAllocator(object):
385
  """IAllocator framework.
386

387
  An IAllocator instance has three sets of attributes:
388
    - cfg that is needed to query the cluster
389
    - input data (all members of the _KEYS class attribute are required)
390
    - four buffer attributes (in|out_data|text), that represent the
391
      input (to the external script) in text and data structure format,
392
      and the output from it, again in two formats
393
    - the result variables from the script (success, info, nodes) for
394
      easy usage
395

396
  """
397
  # pylint: disable=R0902
398
  # lots of instance attributes
399

    
400
  def __init__(self, cfg, rpc_runner, req):
401
    self.cfg = cfg
402
    self.rpc = rpc_runner
403
    self.req = req
404
    # init buffer variables
405
    self.in_text = self.out_text = self.in_data = self.out_data = None
406
    # init result fields
407
    self.success = self.info = self.result = None
408

    
409
    self._BuildInputData(req)
410

    
411
  def _ComputeClusterDataNodeInfo(self, disk_templates, node_list,
412
                                  cluster_info, hypervisor_name):
413
    """Prepare and execute node info call.
414

415
    @type disk_templates: list of string
416
    @param disk_templates: the disk templates of the instances to be allocated
417
    @type node_list: list of strings
418
    @param node_list: list of nodes' UUIDs
419
    @type cluster_info: L{objects.Cluster}
420
    @param cluster_info: the cluster's information from the config
421
    @type hypervisor_name: string
422
    @param hypervisor_name: the hypervisor name
423
    @rtype: same as the result of the node info RPC call
424
    @return: the result of the node info RPC call
425

426
    """
427
    storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates)
428
    storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
429
                                                    node_list)
430
    hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
431
    return self.rpc.call_node_info(node_list, storage_units, hvspecs)
432

    
433
  def _ComputeClusterData(self, disk_template=None):
434
    """Compute the generic allocator input data.
435

436
    @type disk_template: list of string
437
    @param disk_template: the disk templates of the instances to be allocated
438

439
    """
440
    cluster_info = self.cfg.GetClusterInfo()
441
    # cluster data
442
    data = {
443
      "version": constants.IALLOCATOR_VERSION,
444
      "cluster_name": self.cfg.GetClusterName(),
445
      "cluster_tags": list(cluster_info.GetTags()),
446
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
447
      "ipolicy": cluster_info.ipolicy,
448
      }
449
    ninfo = self.cfg.GetAllNodesInfo()
450
    iinfo = self.cfg.GetAllInstancesInfo().values()
451
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
452

    
453
    # node data
454
    node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
455

    
456
    if isinstance(self.req, IAReqInstanceAlloc):
457
      hypervisor_name = self.req.hypervisor
458
      node_whitelist = self.req.node_whitelist
459
    elif isinstance(self.req, IAReqRelocate):
460
      hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor
461
      node_whitelist = None
462
    else:
463
      hypervisor_name = cluster_info.primary_hypervisor
464
      node_whitelist = None
465

    
466
    if not disk_template:
467
      disk_template = cluster_info.enabled_disk_templates[0]
468

    
469
    node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list,
470
                                                 cluster_info, hypervisor_name)
471

    
472
    node_iinfo = \
473
      self.rpc.call_all_instances_info(node_list,
474
                                       cluster_info.enabled_hypervisors,
475
                                       cluster_info.hvparams)
476

    
477
    data["nodegroups"] = self._ComputeNodeGroupData(self.cfg)
478

    
479
    config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist)
480
    data["nodes"] = self._ComputeDynamicNodeData(
481
        ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template)
482
    assert len(data["nodes"]) == len(ninfo), \
483
        "Incomplete node data computed"
484

    
485
    data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info,
486
                                                  i_list)
487

    
488
    self.in_data = data
489

    
490
  @staticmethod
491
  def _ComputeNodeGroupData(cfg):
492
    """Compute node groups data.
493

494
    """
495
    cluster = cfg.GetClusterInfo()
496
    ng = dict((guuid, {
497
      "name": gdata.name,
498
      "alloc_policy": gdata.alloc_policy,
499
      "networks": [net_uuid for net_uuid, _ in gdata.networks.items()],
500
      "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
501
      "tags": list(gdata.GetTags()),
502
      })
503
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
504

    
505
    return ng
506

    
507
  @staticmethod
508
  def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
509
    """Compute global node data.
510

511
    @rtype: dict
512
    @returns: a dict of name: (node dict, node config)
513

514
    """
515
    # fill in static (config-based) values
516
    node_results = dict((ninfo.name, {
517
      "tags": list(ninfo.GetTags()),
518
      "primary_ip": ninfo.primary_ip,
519
      "secondary_ip": ninfo.secondary_ip,
520
      "offline": (ninfo.offline or
521
                  not (node_whitelist is None or
522
                       ninfo.name in node_whitelist)),
523
      "drained": ninfo.drained,
524
      "master_candidate": ninfo.master_candidate,
525
      "group": ninfo.group,
526
      "master_capable": ninfo.master_capable,
527
      "vm_capable": ninfo.vm_capable,
528
      "ndparams": cfg.GetNdParams(ninfo),
529
      })
530
      for ninfo in node_cfg.values())
531

    
532
    return node_results
533

    
534
  @staticmethod
535
  def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
536
    """Extract an attribute from the hypervisor's node information.
537

538
    This is a helper function to extract data from the hypervisor's information
539
    about the node, as part of the result of a node_info query.
540

541
    @type hv_info: dict of strings
542
    @param hv_info: dictionary of node information from the hypervisor
543
    @type node_name: string
544
    @param node_name: name of the node
545
    @type attr: string
546
    @param attr: key of the attribute in the hv_info dictionary
547
    @rtype: integer
548
    @return: the value of the attribute
549
    @raises errors.OpExecError: if key not in dictionary or value not
550
      integer
551

552
    """
553
    if attr not in hv_info:
554
      raise errors.OpExecError("Node '%s' didn't return attribute"
555
                               " '%s'" % (node_name, attr))
556
    value = hv_info[attr]
557
    if not isinstance(value, int):
558
      raise errors.OpExecError("Node '%s' returned invalid value"
559
                               " for '%s': %s" %
560
                               (node_name, attr, value))
561
    return value
562

    
563
  @staticmethod
564
  def _ComputeStorageDataFromSpaceInfoByTemplate(
565
      space_info, node_name, disk_template):
566
    """Extract storage data from node info.
567

568
    @type space_info: see result of the RPC call node info
569
    @param space_info: the storage reporting part of the result of the RPC call
570
      node info
571
    @type node_name: string
572
    @param node_name: the node's name
573
    @type disk_template: string
574
    @param disk_template: the disk template to report space for
575
    @rtype: 4-tuple of integers
576
    @return: tuple of storage info (total_disk, free_disk, total_spindles,
577
       free_spindles)
578

579
    """
580
    storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
581
    if storage_type not in constants.STS_REPORT:
582
      total_disk = total_spindles = 0
583
      free_disk = free_spindles = 0
584
    else:
585
      template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate(
586
          space_info, disk_template)
587
      if not template_space_info:
588
        raise errors.OpExecError("Node '%s' didn't return space info for disk"
589
                                   "template '%s'" % (node_name, disk_template))
590
      total_disk = template_space_info["storage_size"]
591
      free_disk = template_space_info["storage_free"]
592

    
593
      total_spindles = 0
594
      free_spindles = 0
595
      if disk_template in constants.DTS_LVM:
596
        lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
597
           space_info, constants.ST_LVM_PV)
598
        if lvm_pv_info:
599
          total_spindles = lvm_pv_info["storage_size"]
600
          free_spindles = lvm_pv_info["storage_free"]
601
    return (total_disk, free_disk, total_spindles, free_spindles)
602

    
603
  @staticmethod
604
  def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
605
    """Extract storage data from node info.
606

607
    @type space_info: see result of the RPC call node info
608
    @param space_info: the storage reporting part of the result of the RPC call
609
      node info
610
    @type node_name: string
611
    @param node_name: the node's name
612
    @type has_lvm: boolean
613
    @param has_lvm: whether or not LVM storage information is requested
614
    @rtype: 4-tuple of integers
615
    @return: tuple of storage info (total_disk, free_disk, total_spindles,
616
       free_spindles)
617

618
    """
619
    # TODO: replace this with proper storage reporting
620
    if has_lvm:
621
      lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
622
         space_info, constants.ST_LVM_VG)
623
      if not lvm_vg_info:
624
        raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
625
                                 % (node_name))
626
      total_disk = lvm_vg_info["storage_size"]
627
      free_disk = lvm_vg_info["storage_free"]
628
      lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
629
         space_info, constants.ST_LVM_PV)
630
      if not lvm_pv_info:
631
        raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
632
                                 % (node_name))
633
      total_spindles = lvm_pv_info["storage_size"]
634
      free_spindles = lvm_pv_info["storage_free"]
635
    else:
636
      # we didn't even ask the node for VG status, so use zeros
637
      total_disk = free_disk = 0
638
      total_spindles = free_spindles = 0
639
    return (total_disk, free_disk, total_spindles, free_spindles)
640

    
641
  @staticmethod
642
  def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid,
643
                             input_mem_free):
644
    """Compute memory used by primary instances.
645

646
    @rtype: tuple (int, int, int)
647
    @returns: A tuple of three integers: 1. the sum of memory used by primary
648
      instances on the node (including the ones that are currently down), 2.
649
      the sum of memory used by primary instances of the node that are up, 3.
650
      the amount of memory that is free on the node considering the current
651
      usage of the instances.
652

653
    """
654
    i_p_mem = i_p_up_mem = 0
655
    mem_free = input_mem_free
656
    for iinfo, beinfo in instance_list:
657
      if iinfo.primary_node == node_uuid:
658
        i_p_mem += beinfo[constants.BE_MAXMEM]
659
        if iinfo.name not in node_instances_info[node_uuid].payload:
660
          i_used_mem = 0
661
        else:
662
          i_used_mem = int(node_instances_info[node_uuid]
663
                           .payload[iinfo.name]["memory"])
664
        i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
665
        mem_free -= max(0, i_mem_diff)
666

    
667
        if iinfo.admin_state == constants.ADMINST_UP:
668
          i_p_up_mem += beinfo[constants.BE_MAXMEM]
669
    return (i_p_mem, i_p_up_mem, mem_free)
670

    
671
  def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list,
672
                              node_results, disk_template):
673
    """Compute global node data.
674

675
    @param node_results: the basic node structures as filled from the config
676

677
    """
678
    #TODO(dynmem): compute the right data on MAX and MIN memory
679
    # make a copy of the current dict
680
    node_results = dict(node_results)
681
    for nuuid, nresult in node_data.items():
682
      ninfo = node_cfg[nuuid]
683
      assert ninfo.name in node_results, "Missing basic data for node %s" % \
684
                                         ninfo.name
685

    
686
      if not ninfo.offline:
687
        nresult.Raise("Can't get data for node %s" % ninfo.name)
688
        node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
689
                                ninfo.name)
690
        (_, space_info, (hv_info, )) = nresult.payload
691

    
692
        mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
693
                                                            "memory_free")
694

    
695
        (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
696
             i_list, node_iinfo, nuuid, mem_free)
697
        (total_disk, free_disk, total_spindles, free_spindles) = \
698
            self._ComputeStorageDataFromSpaceInfoByTemplate(
699
                space_info, ninfo.name, disk_template)
700

    
701
        # compute memory used by instances
702
        pnr_dyn = {
703
          "total_memory": self._GetAttributeFromHypervisorNodeData(
704
              hv_info, ninfo.name, "memory_total"),
705
          "reserved_memory": self._GetAttributeFromHypervisorNodeData(
706
              hv_info, ninfo.name, "memory_dom0"),
707
          "free_memory": mem_free,
708
          "total_disk": total_disk,
709
          "free_disk": free_disk,
710
          "total_spindles": total_spindles,
711
          "free_spindles": free_spindles,
712
          "total_cpus": self._GetAttributeFromHypervisorNodeData(
713
              hv_info, ninfo.name, "cpu_total"),
714
          "reserved_cpus": self._GetAttributeFromHypervisorNodeData(
715
            hv_info, ninfo.name, "cpu_dom0"),
716
          "i_pri_memory": i_p_mem,
717
          "i_pri_up_memory": i_p_up_mem,
718
          }
719
        pnr_dyn.update(node_results[ninfo.name])
720
        node_results[ninfo.name] = pnr_dyn
721

    
722
    return node_results
723

    
724
  @staticmethod
725
  def _ComputeInstanceData(cfg, cluster_info, i_list):
726
    """Compute global instance data.
727

728
    """
729
    instance_data = {}
730
    for iinfo, beinfo in i_list:
731
      nic_data = []
732
      for nic in iinfo.nics:
733
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
734
        nic_dict = {
735
          "mac": nic.mac,
736
          "ip": nic.ip,
737
          "mode": filled_params[constants.NIC_MODE],
738
          "link": filled_params[constants.NIC_LINK],
739
          }
740
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
741
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
742
        nic_data.append(nic_dict)
743
      pir = {
744
        "tags": list(iinfo.GetTags()),
745
        "admin_state": iinfo.admin_state,
746
        "vcpus": beinfo[constants.BE_VCPUS],
747
        "memory": beinfo[constants.BE_MAXMEM],
748
        "spindle_use": beinfo[constants.BE_SPINDLE_USE],
749
        "os": iinfo.os,
750
        "nodes": [cfg.GetNodeName(iinfo.primary_node)] +
751
                 cfg.GetNodeNames(iinfo.secondary_nodes),
752
        "nics": nic_data,
753
        "disks": [{constants.IDISK_SIZE: dsk.size,
754
                   constants.IDISK_MODE: dsk.mode,
755
                   constants.IDISK_SPINDLES: dsk.spindles}
756
                  for dsk in iinfo.disks],
757
        "disk_template": iinfo.disk_template,
758
        "disks_active": iinfo.disks_active,
759
        "hypervisor": iinfo.hypervisor,
760
        }
761
      pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
762
                                                    pir["disks"])
763
      instance_data[iinfo.name] = pir
764

    
765
    return instance_data
766

    
767
  def _BuildInputData(self, req):
768
    """Build input data structures.
769

770
    """
771
    request = req.GetRequest(self.cfg)
772
    disk_template = None
773
    if "disk_template" in request:
774
      disk_template = request["disk_template"]
775
    self._ComputeClusterData(disk_template=disk_template)
776

    
777
    request["type"] = req.MODE
778
    self.in_data["request"] = request
779

    
780
    self.in_text = serializer.Dump(self.in_data)
781

    
782
  def Run(self, name, validate=True, call_fn=None):
783
    """Run an instance allocator and return the results.
784

785
    """
786
    if call_fn is None:
787
      call_fn = self.rpc.call_iallocator_runner
788

    
789
    ial_params = self.cfg.GetDefaultIAllocatorParameters()
790

    
791
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params)
792
    result.Raise("Failure while running the iallocator script")
793

    
794
    self.out_text = result.payload
795
    if validate:
796
      self._ValidateResult()
797

    
798
  def _ValidateResult(self):
799
    """Process the allocator results.
800

801
    This will process and if successful save the result in
802
    self.out_data and the other parameters.
803

804
    """
805
    try:
806
      rdict = serializer.Load(self.out_text)
807
    except Exception, err:
808
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
809

    
810
    if not isinstance(rdict, dict):
811
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
812

    
813
    # TODO: remove backwards compatiblity in later versions
814
    if "nodes" in rdict and "result" not in rdict:
815
      rdict["result"] = rdict["nodes"]
816
      del rdict["nodes"]
817

    
818
    for key in "success", "info", "result":
819
      if key not in rdict:
820
        raise errors.OpExecError("Can't parse iallocator results:"
821
                                 " missing key '%s'" % key)
822
      setattr(self, key, rdict[key])
823

    
824
    self.req.ValidateResult(self, self.result)
825
    self.out_data = rdict