Statistics
| Branch: | Tag: | Revision:

root / lib / masterd / iallocator.py @ f43c898d

History | View | Annotate | Download (25.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the iallocator code."""
23

    
24
from ganeti import compat
25
from ganeti import constants
26
from ganeti import errors
27
from ganeti import ht
28
from ganeti import outils
29
from ganeti import opcodes
30
from ganeti import rpc
31
from ganeti import serializer
32
from ganeti import utils
33

    
34
import ganeti.masterd.instance as gmi
35

    
36

    
37
_STRING_LIST = ht.TListOf(ht.TString)
38
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
39
   # pylint: disable=E1101
40
   # Class '...' has no 'OP_ID' member
41
   "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
42
                        opcodes.OpInstanceMigrate.OP_ID,
43
                        opcodes.OpInstanceReplaceDisks.OP_ID]),
44
   })))
45

    
46
_NEVAC_MOVED = \
47
  ht.TListOf(ht.TAnd(ht.TIsLength(3),
48
                     ht.TItems([ht.TNonEmptyString,
49
                                ht.TNonEmptyString,
50
                                ht.TListOf(ht.TNonEmptyString),
51
                                ])))
52
_NEVAC_FAILED = \
53
  ht.TListOf(ht.TAnd(ht.TIsLength(2),
54
                     ht.TItems([ht.TNonEmptyString,
55
                                ht.TMaybeString,
56
                                ])))
57
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
58
                        ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
59

    
60
_INST_NAME = ("name", ht.TNonEmptyString)
61
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
62

    
63

    
64
class _AutoReqParam(outils.AutoSlots):
65
  """Meta class for request definitions.
66

67
  """
68
  @classmethod
69
  def _GetSlots(mcs, attrs):
70
    """Extract the slots out of REQ_PARAMS.
71

72
    """
73
    params = attrs.setdefault("REQ_PARAMS", [])
74
    return [slot for (slot, _) in params]
75

    
76

    
77
class IARequestBase(outils.ValidatedSlots):
78
  """A generic IAllocator request object.
79

80
  """
81
  __metaclass__ = _AutoReqParam
82

    
83
  MODE = NotImplemented
84
  REQ_PARAMS = []
85
  REQ_RESULT = NotImplemented
86

    
87
  def __init__(self, **kwargs):
88
    """Constructor for IARequestBase.
89

90
    The constructor takes only keyword arguments and will set
91
    attributes on this object based on the passed arguments. As such,
92
    it means that you should not pass arguments which are not in the
93
    REQ_PARAMS attribute for this class.
94

95
    """
96
    outils.ValidatedSlots.__init__(self, **kwargs)
97

    
98
    self.Validate()
99

    
100
  def Validate(self):
101
    """Validates all parameters of the request.
102

103
    """
104
    assert self.MODE in constants.VALID_IALLOCATOR_MODES
105

    
106
    for (param, validator) in self.REQ_PARAMS:
107
      if not hasattr(self, param):
108
        raise errors.OpPrereqError("Request is missing '%s' parameter" % param,
109
                                   errors.ECODE_INVAL)
110

    
111
      value = getattr(self, param)
112
      if not validator(value):
113
        raise errors.OpPrereqError(("Request parameter '%s' has invalid"
114
                                    " type %s/value %s") %
115
                                    (param, type(value), value),
116
                                    errors.ECODE_INVAL)
117

    
118
  def GetRequest(self, cfg):
119
    """Gets the request data dict.
120

121
    @param cfg: The configuration instance
122

123
    """
124
    raise NotImplementedError
125

    
126
  def ValidateResult(self, ia, result):
127
    """Validates the result of an request.
128

129
    @param ia: The IAllocator instance
130
    @param result: The IAllocator run result
131
    @raises ResultValidationError: If validation fails
132

133
    """
134
    if ia.success and not self.REQ_RESULT(result):
135
      raise errors.ResultValidationError("iallocator returned invalid result,"
136
                                         " expected %s, got %s" %
137
                                         (self.REQ_RESULT, result))
138

    
139

    
140
class IAReqInstanceAlloc(IARequestBase):
141
  """An instance allocation request.
142

143
  """
144
  # pylint: disable=E1101
145
  MODE = constants.IALLOCATOR_MODE_ALLOC
146
  REQ_PARAMS = [
147
    _INST_NAME,
148
    ("memory", ht.TNonNegativeInt),
149
    ("spindle_use", ht.TNonNegativeInt),
150
    ("disks", ht.TListOf(ht.TDict)),
151
    ("disk_template", ht.TString),
152
    ("os", ht.TString),
153
    ("tags", _STRING_LIST),
154
    ("nics", ht.TListOf(ht.TDict)),
155
    ("vcpus", ht.TInt),
156
    ("hypervisor", ht.TString),
157
    ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
158
    ]
159
  REQ_RESULT = ht.TList
160

    
161
  def RequiredNodes(self):
162
    """Calculates the required nodes based on the disk_template.
163

164
    """
165
    if self.disk_template in constants.DTS_INT_MIRROR:
166
      return 2
167
    else:
168
      return 1
169

    
170
  def GetRequest(self, cfg):
171
    """Requests a new instance.
172

173
    The checks for the completeness of the opcode must have already been
174
    done.
175

176
    """
177
    disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks)
178

    
179
    return {
180
      "name": self.name,
181
      "disk_template": self.disk_template,
182
      "tags": self.tags,
183
      "os": self.os,
184
      "vcpus": self.vcpus,
185
      "memory": self.memory,
186
      "spindle_use": self.spindle_use,
187
      "disks": self.disks,
188
      "disk_space_total": disk_space,
189
      "nics": self.nics,
190
      "required_nodes": self.RequiredNodes(),
191
      "hypervisor": self.hypervisor,
192
      }
193

    
194
  def ValidateResult(self, ia, result):
195
    """Validates an single instance allocation request.
196

197
    """
198
    IARequestBase.ValidateResult(self, ia, result)
199

    
200
    if ia.success and len(result) != self.RequiredNodes():
201
      raise errors.ResultValidationError("iallocator returned invalid number"
202
                                         " of nodes (%s), required %s" %
203
                                         (len(result), self.RequiredNodes()))
204

    
205

    
206
class IAReqMultiInstanceAlloc(IARequestBase):
207
  """An multi instance allocation request.
208

209
  """
210
  # pylint: disable=E1101
211
  MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
212
  REQ_PARAMS = [
213
    ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
214
    ]
215
  _MASUCCESS = \
216
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
217
                       ht.TItems([ht.TNonEmptyString,
218
                                  ht.TListOf(ht.TNonEmptyString),
219
                                  ])))
220
  _MAFAILED = ht.TListOf(ht.TNonEmptyString)
221
  REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
222
                       ht.TItems([_MASUCCESS, _MAFAILED]))
223

    
224
  def GetRequest(self, cfg):
225
    return {
226
      "instances": [iareq.GetRequest(cfg) for iareq in self.instances],
227
      }
228

    
229

    
230
class IAReqRelocate(IARequestBase):
231
  """A relocation request.
232

233
  """
234
  # pylint: disable=E1101
235
  MODE = constants.IALLOCATOR_MODE_RELOC
236
  REQ_PARAMS = [
237
    _INST_UUID,
238
    ("relocate_from_node_uuids", _STRING_LIST),
239
    ]
240
  REQ_RESULT = ht.TList
241

    
242
  def GetRequest(self, cfg):
243
    """Request an relocation of an instance
244

245
    The checks for the completeness of the opcode must have already been
246
    done.
247

248
    """
249
    instance = cfg.GetInstanceInfo(self.inst_uuid)
250
    if instance is None:
251
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
252
                                   " IAllocator" % self.inst_uuid)
253

    
254
    if instance.disk_template not in constants.DTS_MIRRORED:
255
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
256
                                 errors.ECODE_INVAL)
257

    
258
    if (instance.disk_template in constants.DTS_INT_MIRROR and
259
        len(instance.secondary_nodes) != 1):
260
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
261
                                 errors.ECODE_STATE)
262

    
263
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
264
    disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
265

    
266
    return {
267
      "name": instance.name,
268
      "disk_space_total": disk_space,
269
      "required_nodes": 1,
270
      "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
271
      }
272

    
273
  def ValidateResult(self, ia, result):
274
    """Validates the result of an relocation request.
275

276
    """
277
    IARequestBase.ValidateResult(self, ia, result)
278

    
279
    node2group = dict((name, ndata["group"])
280
                      for (name, ndata) in ia.in_data["nodes"].items())
281

    
282
    fn = compat.partial(self._NodesToGroups, node2group,
283
                        ia.in_data["nodegroups"])
284

    
285
    instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
286
    request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
287
                        ia.cfg.GetNodeNames([instance.primary_node]))
288
    result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
289

    
290
    if ia.success and not set(result_groups).issubset(request_groups):
291
      raise errors.ResultValidationError("Groups of nodes returned by"
292
                                         " iallocator (%s) differ from original"
293
                                         " groups (%s)" %
294
                                         (utils.CommaJoin(result_groups),
295
                                          utils.CommaJoin(request_groups)))
296

    
297
  @staticmethod
298
  def _NodesToGroups(node2group, groups, nodes):
299
    """Returns a list of unique group names for a list of nodes.
300

301
    @type node2group: dict
302
    @param node2group: Map from node name to group UUID
303
    @type groups: dict
304
    @param groups: Group information
305
    @type nodes: list
306
    @param nodes: Node names
307

308
    """
309
    result = set()
310

    
311
    for node in nodes:
312
      try:
313
        group_uuid = node2group[node]
314
      except KeyError:
315
        # Ignore unknown node
316
        pass
317
      else:
318
        try:
319
          group = groups[group_uuid]
320
        except KeyError:
321
          # Can't find group, let's use UUID
322
          group_name = group_uuid
323
        else:
324
          group_name = group["name"]
325

    
326
        result.add(group_name)
327

    
328
    return sorted(result)
329

    
330

    
331
class IAReqNodeEvac(IARequestBase):
332
  """A node evacuation request.
333

334
  """
335
  # pylint: disable=E1101
336
  MODE = constants.IALLOCATOR_MODE_NODE_EVAC
337
  REQ_PARAMS = [
338
    ("instances", _STRING_LIST),
339
    ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
340
    ]
341
  REQ_RESULT = _NEVAC_RESULT
342

    
343
  def GetRequest(self, cfg):
344
    """Get data for node-evacuate requests.
345

346
    """
347
    return {
348
      "instances": self.instances,
349
      "evac_mode": self.evac_mode,
350
      }
351

    
352

    
353
class IAReqGroupChange(IARequestBase):
354
  """A group change request.
355

356
  """
357
  # pylint: disable=E1101
358
  MODE = constants.IALLOCATOR_MODE_CHG_GROUP
359
  REQ_PARAMS = [
360
    ("instances", _STRING_LIST),
361
    ("target_groups", _STRING_LIST),
362
    ]
363
  REQ_RESULT = _NEVAC_RESULT
364

    
365
  def GetRequest(self, cfg):
366
    """Get data for node-evacuate requests.
367

368
    """
369
    return {
370
      "instances": self.instances,
371
      "target_groups": self.target_groups,
372
      }
373

    
374

    
375
class IAllocator(object):
376
  """IAllocator framework.
377

378
  An IAllocator instance has three sets of attributes:
379
    - cfg that is needed to query the cluster
380
    - input data (all members of the _KEYS class attribute are required)
381
    - four buffer attributes (in|out_data|text), that represent the
382
      input (to the external script) in text and data structure format,
383
      and the output from it, again in two formats
384
    - the result variables from the script (success, info, nodes) for
385
      easy usage
386

387
  """
388
  # pylint: disable=R0902
389
  # lots of instance attributes
390

    
391
  def __init__(self, cfg, rpc_runner, req):
392
    self.cfg = cfg
393
    self.rpc = rpc_runner
394
    self.req = req
395
    # init buffer variables
396
    self.in_text = self.out_text = self.in_data = self.out_data = None
397
    # init result fields
398
    self.success = self.info = self.result = None
399

    
400
    self._BuildInputData(req)
401

    
402
  def _ComputerClusterDataNodeInfo(self, node_list, cluster_info,
403
                                   hypervisor_name):
404
    """Prepare and execute node info call.
405

406
    @type node_list: list of strings
407
    @param node_list: list of nodes' UUIDs
408
    @type cluster_info: L{objects.Cluster}
409
    @param cluster_info: the cluster's information from the config
410
    @type hypervisor_name: string
411
    @param hypervisor_name: the hypervisor name
412
    @rtype: same as the result of the node info RPC call
413
    @return: the result of the node info RPC call
414

415
    """
416
    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_list)
417
    storage_units = utils.storage.GetStorageUnitsOfCluster(
418
        self.cfg, include_spindles=True)
419
    hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
420
    return self.rpc.call_node_info(node_list, storage_units, hvspecs, es_flags)
421

    
422
  def _ComputeClusterData(self):
423
    """Compute the generic allocator input data.
424

425
    This is the data that is independent of the actual operation.
426

427
    """
428
    cluster_info = self.cfg.GetClusterInfo()
429
    # cluster data
430
    data = {
431
      "version": constants.IALLOCATOR_VERSION,
432
      "cluster_name": self.cfg.GetClusterName(),
433
      "cluster_tags": list(cluster_info.GetTags()),
434
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
435
      "ipolicy": cluster_info.ipolicy,
436
      }
437
    ninfo = self.cfg.GetAllNodesInfo()
438
    iinfo = self.cfg.GetAllInstancesInfo().values()
439
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
440

    
441
    # node data
442
    node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
443

    
444
    if isinstance(self.req, IAReqInstanceAlloc):
445
      hypervisor_name = self.req.hypervisor
446
      node_whitelist = self.req.node_whitelist
447
    elif isinstance(self.req, IAReqRelocate):
448
      hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor
449
      node_whitelist = None
450
    else:
451
      hypervisor_name = cluster_info.primary_hypervisor
452
      node_whitelist = None
453

    
454
    has_lvm = utils.storage.IsLvmEnabled(cluster_info.enabled_disk_templates)
455
    node_data = self._ComputerClusterDataNodeInfo(node_list, cluster_info,
456
                                                  hypervisor_name)
457

    
458
    node_iinfo = \
459
      self.rpc.call_all_instances_info(node_list,
460
                                       cluster_info.enabled_hypervisors,
461
                                       cluster_info.hvparams)
462

    
463
    data["nodegroups"] = self._ComputeNodeGroupData(self.cfg)
464

    
465
    config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist)
466
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
467
                                                 i_list, config_ndata, has_lvm)
468
    assert len(data["nodes"]) == len(ninfo), \
469
        "Incomplete node data computed"
470

    
471
    data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info,
472
                                                  i_list)
473

    
474
    self.in_data = data
475

    
476
  @staticmethod
477
  def _ComputeNodeGroupData(cfg):
478
    """Compute node groups data.
479

480
    """
481
    cluster = cfg.GetClusterInfo()
482
    ng = dict((guuid, {
483
      "name": gdata.name,
484
      "alloc_policy": gdata.alloc_policy,
485
      "networks": [net_uuid for net_uuid, _ in gdata.networks.items()],
486
      "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
487
      "tags": list(gdata.GetTags()),
488
      })
489
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
490

    
491
    return ng
492

    
493
  @staticmethod
494
  def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
495
    """Compute global node data.
496

497
    @rtype: dict
498
    @returns: a dict of name: (node dict, node config)
499

500
    """
501
    # fill in static (config-based) values
502
    node_results = dict((ninfo.name, {
503
      "tags": list(ninfo.GetTags()),
504
      "primary_ip": ninfo.primary_ip,
505
      "secondary_ip": ninfo.secondary_ip,
506
      "offline": (ninfo.offline or
507
                  not (node_whitelist is None or
508
                       ninfo.name in node_whitelist)),
509
      "drained": ninfo.drained,
510
      "master_candidate": ninfo.master_candidate,
511
      "group": ninfo.group,
512
      "master_capable": ninfo.master_capable,
513
      "vm_capable": ninfo.vm_capable,
514
      "ndparams": cfg.GetNdParams(ninfo),
515
      })
516
      for ninfo in node_cfg.values())
517

    
518
    return node_results
519

    
520
  @staticmethod
521
  def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
522
    """Extract an attribute from the hypervisor's node information.
523

524
    This is a helper function to extract data from the hypervisor's information
525
    about the node, as part of the result of a node_info query.
526

527
    @type hv_info: dict of strings
528
    @param hv_info: dictionary of node information from the hypervisor
529
    @type node_name: string
530
    @param node_name: name of the node
531
    @type attr: string
532
    @param attr: key of the attribute in the hv_info dictionary
533
    @rtype: integer
534
    @return: the value of the attribute
535
    @raises errors.OpExecError: if key not in dictionary or value not
536
      integer
537

538
    """
539
    if attr not in hv_info:
540
      raise errors.OpExecError("Node '%s' didn't return attribute"
541
                               " '%s'" % (node_name, attr))
542
    value = hv_info[attr]
543
    if not isinstance(value, int):
544
      raise errors.OpExecError("Node '%s' returned invalid value"
545
                               " for '%s': %s" %
546
                               (node_name, attr, value))
547
    return value
548

    
549
  @staticmethod
550
  def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
551
    """Extract storage data from node info.
552

553
    @type space_info: see result of the RPC call node info
554
    @param space_info: the storage reporting part of the result of the RPC call
555
      node info
556
    @type node_name: string
557
    @param node_name: the node's name
558
    @type has_lvm: boolean
559
    @param has_lvm: whether or not LVM storage information is requested
560
    @rtype: 4-tuple of integers
561
    @return: tuple of storage info (total_disk, free_disk, total_spindles,
562
       free_spindles)
563

564
    """
565
    # TODO: replace this with proper storage reporting
566
    if has_lvm:
567
      lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
568
         space_info, constants.ST_LVM_VG)
569
      if not lvm_vg_info:
570
        raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
571
                                 % (node_name))
572
      total_disk = lvm_vg_info["storage_size"]
573
      free_disk = lvm_vg_info["storage_free"]
574
      lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
575
         space_info, constants.ST_LVM_PV)
576
      if not lvm_vg_info:
577
        raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
578
                                 % (node_name))
579
      total_spindles = lvm_pv_info["storage_size"]
580
      free_spindles = lvm_pv_info["storage_free"]
581
    else:
582
      # we didn't even ask the node for VG status, so use zeros
583
      total_disk = free_disk = 0
584
      total_spindles = free_spindles = 0
585
    return (total_disk, free_disk, total_spindles, free_spindles)
586

    
587
  @staticmethod
588
  def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid,
589
                             input_mem_free):
590
    """Compute memory used by primary instances.
591

592
    @rtype: tuple (int, int, int)
593
    @returns: A tuple of three integers: 1. the sum of memory used by primary
594
      instances on the node (including the ones that are currently down), 2.
595
      the sum of memory used by primary instances of the node that are up, 3.
596
      the amount of memory that is free on the node considering the current
597
      usage of the instances.
598

599
    """
600
    i_p_mem = i_p_up_mem = 0
601
    mem_free = input_mem_free
602
    for iinfo, beinfo in instance_list:
603
      if iinfo.primary_node == node_uuid:
604
        i_p_mem += beinfo[constants.BE_MAXMEM]
605
        if iinfo.name not in node_instances_info[node_uuid].payload:
606
          i_used_mem = 0
607
        else:
608
          i_used_mem = int(node_instances_info[node_uuid]
609
                           .payload[iinfo.name]["memory"])
610
        i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
611
        mem_free -= max(0, i_mem_diff)
612

    
613
        if iinfo.admin_state == constants.ADMINST_UP:
614
          i_p_up_mem += beinfo[constants.BE_MAXMEM]
615
    return (i_p_mem, i_p_up_mem, mem_free)
616

    
617
  def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list,
618
                              node_results, has_lvm):
619
    """Compute global node data.
620

621
    @param node_results: the basic node structures as filled from the config
622

623
    """
624
    #TODO(dynmem): compute the right data on MAX and MIN memory
625
    # make a copy of the current dict
626
    node_results = dict(node_results)
627
    for nuuid, nresult in node_data.items():
628
      ninfo = node_cfg[nuuid]
629
      assert ninfo.name in node_results, "Missing basic data for node %s" % \
630
                                         ninfo.name
631

    
632
      if not (ninfo.offline or ninfo.drained):
633
        nresult.Raise("Can't get data for node %s" % ninfo.name)
634
        node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
635
                                ninfo.name)
636
        (_, space_info, (hv_info, )) = nresult.payload
637

    
638
        mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
639
                                                            "memory_free")
640

    
641
        (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
642
             i_list, node_iinfo, nuuid, mem_free)
643
        (total_disk, free_disk, total_spindles, free_spindles) = \
644
            self._ComputeStorageDataFromSpaceInfo(space_info, ninfo.name,
645
                                                  has_lvm)
646

    
647
        # compute memory used by instances
648
        pnr_dyn = {
649
          "total_memory": self._GetAttributeFromHypervisorNodeData(
650
              hv_info, ninfo.name, "memory_total"),
651
          "reserved_memory": self._GetAttributeFromHypervisorNodeData(
652
              hv_info, ninfo.name, "memory_dom0"),
653
          "free_memory": mem_free,
654
          "total_disk": total_disk,
655
          "free_disk": free_disk,
656
          "total_spindles": total_spindles,
657
          "free_spindles": free_spindles,
658
          "total_cpus": self._GetAttributeFromHypervisorNodeData(
659
              hv_info, ninfo.name, "cpu_total"),
660
          "reserved_cpus": self._GetAttributeFromHypervisorNodeData(
661
            hv_info, ninfo.name, "cpu_dom0"),
662
          "i_pri_memory": i_p_mem,
663
          "i_pri_up_memory": i_p_up_mem,
664
          }
665
        pnr_dyn.update(node_results[ninfo.name])
666
        node_results[ninfo.name] = pnr_dyn
667

    
668
    return node_results
669

    
670
  @staticmethod
671
  def _ComputeInstanceData(cfg, cluster_info, i_list):
672
    """Compute global instance data.
673

674
    """
675
    instance_data = {}
676
    for iinfo, beinfo in i_list:
677
      nic_data = []
678
      for nic in iinfo.nics:
679
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
680
        nic_dict = {
681
          "mac": nic.mac,
682
          "ip": nic.ip,
683
          "mode": filled_params[constants.NIC_MODE],
684
          "link": filled_params[constants.NIC_LINK],
685
          }
686
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
687
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
688
        nic_data.append(nic_dict)
689
      pir = {
690
        "tags": list(iinfo.GetTags()),
691
        "admin_state": iinfo.admin_state,
692
        "vcpus": beinfo[constants.BE_VCPUS],
693
        "memory": beinfo[constants.BE_MAXMEM],
694
        "spindle_use": beinfo[constants.BE_SPINDLE_USE],
695
        "os": iinfo.os,
696
        "nodes": [cfg.GetNodeName(iinfo.primary_node)] +
697
                 cfg.GetNodeNames(iinfo.secondary_nodes),
698
        "nics": nic_data,
699
        "disks": [{constants.IDISK_SIZE: dsk.size,
700
                   constants.IDISK_MODE: dsk.mode,
701
                   constants.IDISK_SPINDLES: dsk.spindles}
702
                  for dsk in iinfo.disks],
703
        "disk_template": iinfo.disk_template,
704
        "disks_active": iinfo.disks_active,
705
        "hypervisor": iinfo.hypervisor,
706
        }
707
      pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
708
                                                    pir["disks"])
709
      instance_data[iinfo.name] = pir
710

    
711
    return instance_data
712

    
713
  def _BuildInputData(self, req):
714
    """Build input data structures.
715

716
    """
717
    self._ComputeClusterData()
718

    
719
    request = req.GetRequest(self.cfg)
720
    request["type"] = req.MODE
721
    self.in_data["request"] = request
722

    
723
    self.in_text = serializer.Dump(self.in_data)
724

    
725
  def Run(self, name, validate=True, call_fn=None):
726
    """Run an instance allocator and return the results.
727

728
    """
729
    if call_fn is None:
730
      call_fn = self.rpc.call_iallocator_runner
731

    
732
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
733
    result.Raise("Failure while running the iallocator script")
734

    
735
    self.out_text = result.payload
736
    if validate:
737
      self._ValidateResult()
738

    
739
  def _ValidateResult(self):
740
    """Process the allocator results.
741

742
    This will process and if successful save the result in
743
    self.out_data and the other parameters.
744

745
    """
746
    try:
747
      rdict = serializer.Load(self.out_text)
748
    except Exception, err:
749
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
750

    
751
    if not isinstance(rdict, dict):
752
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
753

    
754
    # TODO: remove backwards compatiblity in later versions
755
    if "nodes" in rdict and "result" not in rdict:
756
      rdict["result"] = rdict["nodes"]
757
      del rdict["nodes"]
758

    
759
    for key in "success", "info", "result":
760
      if key not in rdict:
761
        raise errors.OpExecError("Can't parse iallocator results:"
762
                                 " missing key '%s'" % key)
763
      setattr(self, key, rdict[key])
764

    
765
    self.req.ValidateResult(self, self.result)
766
    self.out_data = rdict