Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 1d4a4b26

History | View | Annotate | Download (63.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar name: master name
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "name",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.LDS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    if self.nodegroups is None:
443
      self.nodegroups = {}
444
    for nodegroup in self.nodegroups.values():
445
      nodegroup.UpgradeConfig()
446
    if self.cluster.drbd_usermode_helper is None:
447
      # To decide if we set an helper let's check if at least one instance has
448
      # a DRBD disk. This does not cover all the possible scenarios but it
449
      # gives a good approximation.
450
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
451
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452
    if self.networks is None:
453
      self.networks = {}
454
    for network in self.networks.values():
455
      network.UpgradeConfig()
456
    self._UpgradeEnabledDiskTemplates()
457

    
458
  def _UpgradeEnabledDiskTemplates(self):
459
    """Upgrade the cluster's enabled disk templates by inspecting the currently
460
       enabled and/or used disk templates.
461

462
    """
463
    # enabled_disk_templates in the cluster config were introduced in 2.8.
464
    # Remove this code once upgrading from earlier versions is deprecated.
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # FIXME: Adapt this when dis/enabling at configure time is removed.
473
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
474
      # might currently not be used.
475
      if constants.ENABLE_FILE_STORAGE:
476
        template_set.add(constants.DT_FILE)
477
      if constants.ENABLE_SHARED_FILE_STORAGE:
478
        template_set.add(constants.DT_SHARED_FILE)
479
      # Set enabled_disk_templates to the inferred disk templates. Order them
480
      # according to a preference list that is based on Ganeti's history of
481
      # supported disk templates.
482
      self.cluster.enabled_disk_templates = []
483
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
484
        if preferred_template in template_set:
485
          self.cluster.enabled_disk_templates.append(preferred_template)
486
          template_set.remove(preferred_template)
487
      self.cluster.enabled_disk_templates.extend(list(template_set))
488

    
489

    
490
class NIC(ConfigObject):
491
  """Config object representing a network card."""
492
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
493

    
494
  @classmethod
495
  def CheckParameterSyntax(cls, nicparams):
496
    """Check the given parameters for validity.
497

498
    @type nicparams:  dict
499
    @param nicparams: dictionary with parameter names/value
500
    @raise errors.ConfigurationError: when a parameter is not valid
501

502
    """
503
    mode = nicparams[constants.NIC_MODE]
504
    if (mode not in constants.NIC_VALID_MODES and
505
        mode != constants.VALUE_AUTO):
506
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
507

    
508
    if (mode == constants.NIC_MODE_BRIDGED and
509
        not nicparams[constants.NIC_LINK]):
510
      raise errors.ConfigurationError("Missing bridged NIC link")
511

    
512

    
513
class Disk(ConfigObject):
514
  """Config object representing a block device."""
515
  __slots__ = ["name", "dev_type", "logical_id", "physical_id",
516
               "children", "iv_name", "size", "mode", "params"] + _UUID
517

    
518
  def CreateOnSecondary(self):
519
    """Test if this device needs to be created on a secondary node."""
520
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
521

    
522
  def AssembleOnSecondary(self):
523
    """Test if this device needs to be assembled on a secondary node."""
524
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
525

    
526
  def OpenOnSecondary(self):
527
    """Test if this device needs to be opened on a secondary node."""
528
    return self.dev_type in (constants.LD_LV,)
529

    
530
  def StaticDevPath(self):
531
    """Return the device path if this device type has a static one.
532

533
    Some devices (LVM for example) live always at the same /dev/ path,
534
    irrespective of their status. For such devices, we return this
535
    path, for others we return None.
536

537
    @warning: The path returned is not a normalized pathname; callers
538
        should check that it is a valid path.
539

540
    """
541
    if self.dev_type == constants.LD_LV:
542
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
543
    elif self.dev_type == constants.LD_BLOCKDEV:
544
      return self.logical_id[1]
545
    elif self.dev_type == constants.LD_RBD:
546
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
547
    return None
548

    
549
  def ChildrenNeeded(self):
550
    """Compute the needed number of children for activation.
551

552
    This method will return either -1 (all children) or a positive
553
    number denoting the minimum number of children needed for
554
    activation (only mirrored devices will usually return >=0).
555

556
    Currently, only DRBD8 supports diskless activation (therefore we
557
    return 0), for all other we keep the previous semantics and return
558
    -1.
559

560
    """
561
    if self.dev_type == constants.LD_DRBD8:
562
      return 0
563
    return -1
564

    
565
  def IsBasedOnDiskType(self, dev_type):
566
    """Check if the disk or its children are based on the given type.
567

568
    @type dev_type: L{constants.LDS_BLOCK}
569
    @param dev_type: the type to look for
570
    @rtype: boolean
571
    @return: boolean indicating if a device of the given type was found or not
572

573
    """
574
    if self.children:
575
      for child in self.children:
576
        if child.IsBasedOnDiskType(dev_type):
577
          return True
578
    return self.dev_type == dev_type
579

    
580
  def GetNodes(self, node):
581
    """This function returns the nodes this device lives on.
582

583
    Given the node on which the parent of the device lives on (or, in
584
    case of a top-level device, the primary node of the devices'
585
    instance), this function will return a list of nodes on which this
586
    devices needs to (or can) be assembled.
587

588
    """
589
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
590
                         constants.LD_BLOCKDEV, constants.LD_RBD,
591
                         constants.LD_EXT]:
592
      result = [node]
593
    elif self.dev_type in constants.LDS_DRBD:
594
      result = [self.logical_id[0], self.logical_id[1]]
595
      if node not in result:
596
        raise errors.ConfigurationError("DRBD device passed unknown node")
597
    else:
598
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
599
    return result
600

    
601
  def ComputeNodeTree(self, parent_node):
602
    """Compute the node/disk tree for this disk and its children.
603

604
    This method, given the node on which the parent disk lives, will
605
    return the list of all (node, disk) pairs which describe the disk
606
    tree in the most compact way. For example, a drbd/lvm stack
607
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
608
    which represents all the top-level devices on the nodes.
609

610
    """
611
    my_nodes = self.GetNodes(parent_node)
612
    result = [(node, self) for node in my_nodes]
613
    if not self.children:
614
      # leaf device
615
      return result
616
    for node in my_nodes:
617
      for child in self.children:
618
        child_result = child.ComputeNodeTree(node)
619
        if len(child_result) == 1:
620
          # child (and all its descendants) is simple, doesn't split
621
          # over multiple hosts, so we don't need to describe it, our
622
          # own entry for this node describes it completely
623
          continue
624
        else:
625
          # check if child nodes differ from my nodes; note that
626
          # subdisk can differ from the child itself, and be instead
627
          # one of its descendants
628
          for subnode, subdisk in child_result:
629
            if subnode not in my_nodes:
630
              result.append((subnode, subdisk))
631
            # otherwise child is under our own node, so we ignore this
632
            # entry (but probably the other results in the list will
633
            # be different)
634
    return result
635

    
636
  def ComputeGrowth(self, amount):
637
    """Compute the per-VG growth requirements.
638

639
    This only works for VG-based disks.
640

641
    @type amount: integer
642
    @param amount: the desired increase in (user-visible) disk space
643
    @rtype: dict
644
    @return: a dictionary of volume-groups and the required size
645

646
    """
647
    if self.dev_type == constants.LD_LV:
648
      return {self.logical_id[0]: amount}
649
    elif self.dev_type == constants.LD_DRBD8:
650
      if self.children:
651
        return self.children[0].ComputeGrowth(amount)
652
      else:
653
        return {}
654
    else:
655
      # Other disk types do not require VG space
656
      return {}
657

    
658
  def RecordGrow(self, amount):
659
    """Update the size of this disk after growth.
660

661
    This method recurses over the disks's children and updates their
662
    size correspondigly. The method needs to be kept in sync with the
663
    actual algorithms from bdev.
664

665
    """
666
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
667
                         constants.LD_RBD, constants.LD_EXT):
668
      self.size += amount
669
    elif self.dev_type == constants.LD_DRBD8:
670
      if self.children:
671
        self.children[0].RecordGrow(amount)
672
      self.size += amount
673
    else:
674
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
675
                                   " disk type %s" % self.dev_type)
676

    
677
  def Update(self, size=None, mode=None):
678
    """Apply changes to size and mode.
679

680
    """
681
    if self.dev_type == constants.LD_DRBD8:
682
      if self.children:
683
        self.children[0].Update(size=size, mode=mode)
684
    else:
685
      assert not self.children
686

    
687
    if size is not None:
688
      self.size = size
689
    if mode is not None:
690
      self.mode = mode
691

    
692
  def UnsetSize(self):
693
    """Sets recursively the size to zero for the disk and its children.
694

695
    """
696
    if self.children:
697
      for child in self.children:
698
        child.UnsetSize()
699
    self.size = 0
700

    
701
  def SetPhysicalID(self, target_node, nodes_ip):
702
    """Convert the logical ID to the physical ID.
703

704
    This is used only for drbd, which needs ip/port configuration.
705

706
    The routine descends down and updates its children also, because
707
    this helps when the only the top device is passed to the remote
708
    node.
709

710
    Arguments:
711
      - target_node: the node we wish to configure for
712
      - nodes_ip: a mapping of node name to ip
713

714
    The target_node must exist in in nodes_ip, and must be one of the
715
    nodes in the logical ID for each of the DRBD devices encountered
716
    in the disk tree.
717

718
    """
719
    if self.children:
720
      for child in self.children:
721
        child.SetPhysicalID(target_node, nodes_ip)
722

    
723
    if self.logical_id is None and self.physical_id is not None:
724
      return
725
    if self.dev_type in constants.LDS_DRBD:
726
      pnode, snode, port, pminor, sminor, secret = self.logical_id
727
      if target_node not in (pnode, snode):
728
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
729
                                        target_node)
730
      pnode_ip = nodes_ip.get(pnode, None)
731
      snode_ip = nodes_ip.get(snode, None)
732
      if pnode_ip is None or snode_ip is None:
733
        raise errors.ConfigurationError("Can't find primary or secondary node"
734
                                        " for %s" % str(self))
735
      p_data = (pnode_ip, port)
736
      s_data = (snode_ip, port)
737
      if pnode == target_node:
738
        self.physical_id = p_data + s_data + (pminor, secret)
739
      else: # it must be secondary, we tested above
740
        self.physical_id = s_data + p_data + (sminor, secret)
741
    else:
742
      self.physical_id = self.logical_id
743
    return
744

    
745
  def ToDict(self):
746
    """Disk-specific conversion to standard python types.
747

748
    This replaces the children lists of objects with lists of
749
    standard python types.
750

751
    """
752
    bo = super(Disk, self).ToDict()
753

    
754
    for attr in ("children",):
755
      alist = bo.get(attr, None)
756
      if alist:
757
        bo[attr] = outils.ContainerToDicts(alist)
758
    return bo
759

    
760
  @classmethod
761
  def FromDict(cls, val):
762
    """Custom function for Disks
763

764
    """
765
    obj = super(Disk, cls).FromDict(val)
766
    if obj.children:
767
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
768
    if obj.logical_id and isinstance(obj.logical_id, list):
769
      obj.logical_id = tuple(obj.logical_id)
770
    if obj.physical_id and isinstance(obj.physical_id, list):
771
      obj.physical_id = tuple(obj.physical_id)
772
    if obj.dev_type in constants.LDS_DRBD:
773
      # we need a tuple of length six here
774
      if len(obj.logical_id) < 6:
775
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
776
    return obj
777

    
778
  def __str__(self):
779
    """Custom str() formatter for disks.
780

781
    """
782
    if self.dev_type == constants.LD_LV:
783
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
784
    elif self.dev_type in constants.LDS_DRBD:
785
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
786
      val = "<DRBD8("
787
      if self.physical_id is None:
788
        phy = "unconfigured"
789
      else:
790
        phy = ("configured as %s:%s %s:%s" %
791
               (self.physical_id[0], self.physical_id[1],
792
                self.physical_id[2], self.physical_id[3]))
793

    
794
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
795
              (node_a, minor_a, node_b, minor_b, port, phy))
796
      if self.children and self.children.count(None) == 0:
797
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
798
      else:
799
        val += "no local storage"
800
    else:
801
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
802
             (self.dev_type, self.logical_id, self.physical_id, self.children))
803
    if self.iv_name is None:
804
      val += ", not visible"
805
    else:
806
      val += ", visible as /dev/%s" % self.iv_name
807
    if isinstance(self.size, int):
808
      val += ", size=%dm)>" % self.size
809
    else:
810
      val += ", size='%s')>" % (self.size,)
811
    return val
812

    
813
  def Verify(self):
814
    """Checks that this disk is correctly configured.
815

816
    """
817
    all_errors = []
818
    if self.mode not in constants.DISK_ACCESS_SET:
819
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
820
    return all_errors
821

    
822
  def UpgradeConfig(self):
823
    """Fill defaults for missing configuration values.
824

825
    """
826
    if self.children:
827
      for child in self.children:
828
        child.UpgradeConfig()
829

    
830
    # FIXME: Make this configurable in Ganeti 2.7
831
    self.params = {}
832
    # add here config upgrade for this disk
833

    
834
  @staticmethod
835
  def ComputeLDParams(disk_template, disk_params):
836
    """Computes Logical Disk parameters from Disk Template parameters.
837

838
    @type disk_template: string
839
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
840
    @type disk_params: dict
841
    @param disk_params: disk template parameters;
842
                        dict(template_name -> parameters
843
    @rtype: list(dict)
844
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
845
      contains the LD parameters of the node. The tree is flattened in-order.
846

847
    """
848
    if disk_template not in constants.DISK_TEMPLATES:
849
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
850

    
851
    assert disk_template in disk_params
852

    
853
    result = list()
854
    dt_params = disk_params[disk_template]
855
    if disk_template == constants.DT_DRBD8:
856
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
857
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
858
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
859
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
860
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
861
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
862
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
863
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
864
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
865
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
866
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
867
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
868
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
869
        }))
870

    
871
      # data LV
872
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
873
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
874
        }))
875

    
876
      # metadata LV
877
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
878
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
879
        }))
880

    
881
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
882
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
883

    
884
    elif disk_template == constants.DT_PLAIN:
885
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
886
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
887
        }))
888

    
889
    elif disk_template == constants.DT_BLOCK:
890
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
891

    
892
    elif disk_template == constants.DT_RBD:
893
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
894
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
895
        }))
896

    
897
    elif disk_template == constants.DT_EXT:
898
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
899

    
900
    return result
901

    
902

    
903
class InstancePolicy(ConfigObject):
904
  """Config object representing instance policy limits dictionary.
905

906
  Note that this object is not actually used in the config, it's just
907
  used as a placeholder for a few functions.
908

909
  """
910
  @classmethod
911
  def CheckParameterSyntax(cls, ipolicy, check_std):
912
    """ Check the instance policy for validity.
913

914
    @type ipolicy: dict
915
    @param ipolicy: dictionary with min/max/std specs and policies
916
    @type check_std: bool
917
    @param check_std: Whether to check std value or just assume compliance
918
    @raise errors.ConfigurationError: when the policy is not legal
919

920
    """
921
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
922
    if constants.IPOLICY_DTS in ipolicy:
923
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
924
    for key in constants.IPOLICY_PARAMETERS:
925
      if key in ipolicy:
926
        InstancePolicy.CheckParameter(key, ipolicy[key])
927
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
928
    if wrong_keys:
929
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
930
                                      utils.CommaJoin(wrong_keys))
931

    
932
  @classmethod
933
  def _CheckIncompleteSpec(cls, spec, keyname):
934
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
935
    if missing_params:
936
      msg = ("Missing instance specs parameters for %s: %s" %
937
             (keyname, utils.CommaJoin(missing_params)))
938
      raise errors.ConfigurationError(msg)
939

    
940
  @classmethod
941
  def CheckISpecSyntax(cls, ipolicy, check_std):
942
    """Check the instance policy specs for validity.
943

944
    @type ipolicy: dict
945
    @param ipolicy: dictionary with min/max/std specs
946
    @type check_std: bool
947
    @param check_std: Whether to check std value or just assume compliance
948
    @raise errors.ConfigurationError: when specs are not valid
949

950
    """
951
    if constants.ISPECS_MINMAX not in ipolicy:
952
      # Nothing to check
953
      return
954

    
955
    if check_std and constants.ISPECS_STD not in ipolicy:
956
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
957
      raise errors.ConfigurationError(msg)
958
    stdspec = ipolicy.get(constants.ISPECS_STD)
959
    if check_std:
960
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
961

    
962
    if not ipolicy[constants.ISPECS_MINMAX]:
963
      raise errors.ConfigurationError("Empty minmax specifications")
964
    std_is_good = False
965
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
966
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
967
      if missing:
968
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
969
        raise errors.ConfigurationError(msg)
970
      for (key, spec) in minmaxspecs.items():
971
        InstancePolicy._CheckIncompleteSpec(spec, key)
972

    
973
      spec_std_ok = True
974
      for param in constants.ISPECS_PARAMETERS:
975
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
976
                                                           param, check_std)
977
        spec_std_ok = spec_std_ok and par_std_ok
978
      std_is_good = std_is_good or spec_std_ok
979
    if not std_is_good:
980
      raise errors.ConfigurationError("Invalid std specifications")
981

    
982
  @classmethod
983
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
984
    """Check the instance policy specs for validity on a given key.
985

986
    We check if the instance specs makes sense for a given key, that is
987
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
988

989
    @type minmaxspecs: dict
990
    @param minmaxspecs: dictionary with min and max instance spec
991
    @type stdspec: dict
992
    @param stdspec: dictionary with standard instance spec
993
    @type name: string
994
    @param name: what are the limits for
995
    @type check_std: bool
996
    @param check_std: Whether to check std value or just assume compliance
997
    @rtype: bool
998
    @return: C{True} when specs are valid, C{False} when standard spec for the
999
        given name is not valid
1000
    @raise errors.ConfigurationError: when min/max specs for the given name
1001
        are not valid
1002

1003
    """
1004
    minspec = minmaxspecs[constants.ISPECS_MIN]
1005
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1006
    min_v = minspec[name]
1007
    max_v = maxspec[name]
1008

    
1009
    if min_v > max_v:
1010
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1011
             (name, min_v, max_v))
1012
      raise errors.ConfigurationError(err)
1013
    elif check_std:
1014
      std_v = stdspec.get(name, min_v)
1015
      return std_v >= min_v and std_v <= max_v
1016
    else:
1017
      return True
1018

    
1019
  @classmethod
1020
  def CheckDiskTemplates(cls, disk_templates):
1021
    """Checks the disk templates for validity.
1022

1023
    """
1024
    if not disk_templates:
1025
      raise errors.ConfigurationError("Instance policy must contain" +
1026
                                      " at least one disk template")
1027
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1028
    if wrong:
1029
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1030
                                      utils.CommaJoin(wrong))
1031

    
1032
  @classmethod
1033
  def CheckParameter(cls, key, value):
1034
    """Checks a parameter.
1035

1036
    Currently we expect all parameters to be float values.
1037

1038
    """
1039
    try:
1040
      float(value)
1041
    except (TypeError, ValueError), err:
1042
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1043
                                      " '%s', error: %s" % (key, value, err))
1044

    
1045

    
1046
class Instance(TaggableObject):
1047
  """Config object representing an instance."""
1048
  __slots__ = [
1049
    "name",
1050
    "primary_node",
1051
    "os",
1052
    "hypervisor",
1053
    "hvparams",
1054
    "beparams",
1055
    "osparams",
1056
    "admin_state",
1057
    "nics",
1058
    "disks",
1059
    "disk_template",
1060
    "disks_active",
1061
    "network_port",
1062
    "serial_no",
1063
    ] + _TIMESTAMPS + _UUID
1064

    
1065
  def _ComputeSecondaryNodes(self):
1066
    """Compute the list of secondary nodes.
1067

1068
    This is a simple wrapper over _ComputeAllNodes.
1069

1070
    """
1071
    all_nodes = set(self._ComputeAllNodes())
1072
    all_nodes.discard(self.primary_node)
1073
    return tuple(all_nodes)
1074

    
1075
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1076
                             "List of names of secondary nodes")
1077

    
1078
  def _ComputeAllNodes(self):
1079
    """Compute the list of all nodes.
1080

1081
    Since the data is already there (in the drbd disks), keeping it as
1082
    a separate normal attribute is redundant and if not properly
1083
    synchronised can cause problems. Thus it's better to compute it
1084
    dynamically.
1085

1086
    """
1087
    def _Helper(nodes, device):
1088
      """Recursively computes nodes given a top device."""
1089
      if device.dev_type in constants.LDS_DRBD:
1090
        nodea, nodeb = device.logical_id[:2]
1091
        nodes.add(nodea)
1092
        nodes.add(nodeb)
1093
      if device.children:
1094
        for child in device.children:
1095
          _Helper(nodes, child)
1096

    
1097
    all_nodes = set()
1098
    all_nodes.add(self.primary_node)
1099
    for device in self.disks:
1100
      _Helper(all_nodes, device)
1101
    return tuple(all_nodes)
1102

    
1103
  all_nodes = property(_ComputeAllNodes, None, None,
1104
                       "List of names of all the nodes of the instance")
1105

    
1106
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1107
    """Provide a mapping of nodes to LVs this instance owns.
1108

1109
    This function figures out what logical volumes should belong on
1110
    which nodes, recursing through a device tree.
1111

1112
    @param lvmap: optional dictionary to receive the
1113
        'node' : ['lv', ...] data.
1114

1115
    @return: None if lvmap arg is given, otherwise, a dictionary of
1116
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1117
        volumeN is of the form "vg_name/lv_name", compatible with
1118
        GetVolumeList()
1119

1120
    """
1121
    if node is None:
1122
      node = self.primary_node
1123

    
1124
    if lvmap is None:
1125
      lvmap = {
1126
        node: [],
1127
        }
1128
      ret = lvmap
1129
    else:
1130
      if not node in lvmap:
1131
        lvmap[node] = []
1132
      ret = None
1133

    
1134
    if not devs:
1135
      devs = self.disks
1136

    
1137
    for dev in devs:
1138
      if dev.dev_type == constants.LD_LV:
1139
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1140

    
1141
      elif dev.dev_type in constants.LDS_DRBD:
1142
        if dev.children:
1143
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1144
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1145

    
1146
      elif dev.children:
1147
        self.MapLVsByNode(lvmap, dev.children, node)
1148

    
1149
    return ret
1150

    
1151
  def FindDisk(self, idx):
1152
    """Find a disk given having a specified index.
1153

1154
    This is just a wrapper that does validation of the index.
1155

1156
    @type idx: int
1157
    @param idx: the disk index
1158
    @rtype: L{Disk}
1159
    @return: the corresponding disk
1160
    @raise errors.OpPrereqError: when the given index is not valid
1161

1162
    """
1163
    try:
1164
      idx = int(idx)
1165
      return self.disks[idx]
1166
    except (TypeError, ValueError), err:
1167
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1168
                                 errors.ECODE_INVAL)
1169
    except IndexError:
1170
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1171
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1172
                                 errors.ECODE_INVAL)
1173

    
1174
  def ToDict(self):
1175
    """Instance-specific conversion to standard python types.
1176

1177
    This replaces the children lists of objects with lists of standard
1178
    python types.
1179

1180
    """
1181
    bo = super(Instance, self).ToDict()
1182

    
1183
    for attr in "nics", "disks":
1184
      alist = bo.get(attr, None)
1185
      if alist:
1186
        nlist = outils.ContainerToDicts(alist)
1187
      else:
1188
        nlist = []
1189
      bo[attr] = nlist
1190
    return bo
1191

    
1192
  @classmethod
1193
  def FromDict(cls, val):
1194
    """Custom function for instances.
1195

1196
    """
1197
    if "admin_state" not in val:
1198
      if val.get("admin_up", False):
1199
        val["admin_state"] = constants.ADMINST_UP
1200
      else:
1201
        val["admin_state"] = constants.ADMINST_DOWN
1202
    if "admin_up" in val:
1203
      del val["admin_up"]
1204
    obj = super(Instance, cls).FromDict(val)
1205
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1206
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1207
    return obj
1208

    
1209
  def UpgradeConfig(self):
1210
    """Fill defaults for missing configuration values.
1211

1212
    """
1213
    for nic in self.nics:
1214
      nic.UpgradeConfig()
1215
    for disk in self.disks:
1216
      disk.UpgradeConfig()
1217
    if self.hvparams:
1218
      for key in constants.HVC_GLOBALS:
1219
        try:
1220
          del self.hvparams[key]
1221
        except KeyError:
1222
          pass
1223
    if self.osparams is None:
1224
      self.osparams = {}
1225
    UpgradeBeParams(self.beparams)
1226

    
1227

    
1228
class OS(ConfigObject):
1229
  """Config object representing an operating system.
1230

1231
  @type supported_parameters: list
1232
  @ivar supported_parameters: a list of tuples, name and description,
1233
      containing the supported parameters by this OS
1234

1235
  @type VARIANT_DELIM: string
1236
  @cvar VARIANT_DELIM: the variant delimiter
1237

1238
  """
1239
  __slots__ = [
1240
    "name",
1241
    "path",
1242
    "api_versions",
1243
    "create_script",
1244
    "export_script",
1245
    "import_script",
1246
    "rename_script",
1247
    "verify_script",
1248
    "supported_variants",
1249
    "supported_parameters",
1250
    ]
1251

    
1252
  VARIANT_DELIM = "+"
1253

    
1254
  @classmethod
1255
  def SplitNameVariant(cls, name):
1256
    """Splits the name into the proper name and variant.
1257

1258
    @param name: the OS (unprocessed) name
1259
    @rtype: list
1260
    @return: a list of two elements; if the original name didn't
1261
        contain a variant, it's returned as an empty string
1262

1263
    """
1264
    nv = name.split(cls.VARIANT_DELIM, 1)
1265
    if len(nv) == 1:
1266
      nv.append("")
1267
    return nv
1268

    
1269
  @classmethod
1270
  def GetName(cls, name):
1271
    """Returns the proper name of the os (without the variant).
1272

1273
    @param name: the OS (unprocessed) name
1274

1275
    """
1276
    return cls.SplitNameVariant(name)[0]
1277

    
1278
  @classmethod
1279
  def GetVariant(cls, name):
1280
    """Returns the variant the os (without the base name).
1281

1282
    @param name: the OS (unprocessed) name
1283

1284
    """
1285
    return cls.SplitNameVariant(name)[1]
1286

    
1287

    
1288
class ExtStorage(ConfigObject):
1289
  """Config object representing an External Storage Provider.
1290

1291
  """
1292
  __slots__ = [
1293
    "name",
1294
    "path",
1295
    "create_script",
1296
    "remove_script",
1297
    "grow_script",
1298
    "attach_script",
1299
    "detach_script",
1300
    "setinfo_script",
1301
    "verify_script",
1302
    "supported_parameters",
1303
    ]
1304

    
1305

    
1306
class NodeHvState(ConfigObject):
1307
  """Hypvervisor state on a node.
1308

1309
  @ivar mem_total: Total amount of memory
1310
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1311
    available)
1312
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1313
    rounding
1314
  @ivar mem_inst: Memory used by instances living on node
1315
  @ivar cpu_total: Total node CPU core count
1316
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1317

1318
  """
1319
  __slots__ = [
1320
    "mem_total",
1321
    "mem_node",
1322
    "mem_hv",
1323
    "mem_inst",
1324
    "cpu_total",
1325
    "cpu_node",
1326
    ] + _TIMESTAMPS
1327

    
1328

    
1329
class NodeDiskState(ConfigObject):
1330
  """Disk state on a node.
1331

1332
  """
1333
  __slots__ = [
1334
    "total",
1335
    "reserved",
1336
    "overhead",
1337
    ] + _TIMESTAMPS
1338

    
1339

    
1340
class Node(TaggableObject):
1341
  """Config object representing a node.
1342

1343
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1344
  @ivar hv_state_static: Hypervisor state overriden by user
1345
  @ivar disk_state: Disk state (e.g. free space)
1346
  @ivar disk_state_static: Disk state overriden by user
1347

1348
  """
1349
  __slots__ = [
1350
    "name",
1351
    "primary_ip",
1352
    "secondary_ip",
1353
    "serial_no",
1354
    "master_candidate",
1355
    "offline",
1356
    "drained",
1357
    "group",
1358
    "master_capable",
1359
    "vm_capable",
1360
    "ndparams",
1361
    "powered",
1362
    "hv_state",
1363
    "hv_state_static",
1364
    "disk_state",
1365
    "disk_state_static",
1366
    ] + _TIMESTAMPS + _UUID
1367

    
1368
  def UpgradeConfig(self):
1369
    """Fill defaults for missing configuration values.
1370

1371
    """
1372
    # pylint: disable=E0203
1373
    # because these are "defined" via slots, not manually
1374
    if self.master_capable is None:
1375
      self.master_capable = True
1376

    
1377
    if self.vm_capable is None:
1378
      self.vm_capable = True
1379

    
1380
    if self.ndparams is None:
1381
      self.ndparams = {}
1382
    # And remove any global parameter
1383
    for key in constants.NDC_GLOBALS:
1384
      if key in self.ndparams:
1385
        logging.warning("Ignoring %s node parameter for node %s",
1386
                        key, self.name)
1387
        del self.ndparams[key]
1388

    
1389
    if self.powered is None:
1390
      self.powered = True
1391

    
1392
  def ToDict(self):
1393
    """Custom function for serializing.
1394

1395
    """
1396
    data = super(Node, self).ToDict()
1397

    
1398
    hv_state = data.get("hv_state", None)
1399
    if hv_state is not None:
1400
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1401

    
1402
    disk_state = data.get("disk_state", None)
1403
    if disk_state is not None:
1404
      data["disk_state"] = \
1405
        dict((key, outils.ContainerToDicts(value))
1406
             for (key, value) in disk_state.items())
1407

    
1408
    return data
1409

    
1410
  @classmethod
1411
  def FromDict(cls, val):
1412
    """Custom function for deserializing.
1413

1414
    """
1415
    obj = super(Node, cls).FromDict(val)
1416

    
1417
    if obj.hv_state is not None:
1418
      obj.hv_state = \
1419
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1420

    
1421
    if obj.disk_state is not None:
1422
      obj.disk_state = \
1423
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1424
             for (key, value) in obj.disk_state.items())
1425

    
1426
    return obj
1427

    
1428

    
1429
class NodeGroup(TaggableObject):
1430
  """Config object representing a node group."""
1431
  __slots__ = [
1432
    "name",
1433
    "members",
1434
    "ndparams",
1435
    "diskparams",
1436
    "ipolicy",
1437
    "serial_no",
1438
    "hv_state_static",
1439
    "disk_state_static",
1440
    "alloc_policy",
1441
    "networks",
1442
    ] + _TIMESTAMPS + _UUID
1443

    
1444
  def ToDict(self):
1445
    """Custom function for nodegroup.
1446

1447
    This discards the members object, which gets recalculated and is only kept
1448
    in memory.
1449

1450
    """
1451
    mydict = super(NodeGroup, self).ToDict()
1452
    del mydict["members"]
1453
    return mydict
1454

    
1455
  @classmethod
1456
  def FromDict(cls, val):
1457
    """Custom function for nodegroup.
1458

1459
    The members slot is initialized to an empty list, upon deserialization.
1460

1461
    """
1462
    obj = super(NodeGroup, cls).FromDict(val)
1463
    obj.members = []
1464
    return obj
1465

    
1466
  def UpgradeConfig(self):
1467
    """Fill defaults for missing configuration values.
1468

1469
    """
1470
    if self.ndparams is None:
1471
      self.ndparams = {}
1472

    
1473
    if self.serial_no is None:
1474
      self.serial_no = 1
1475

    
1476
    if self.alloc_policy is None:
1477
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1478

    
1479
    # We only update mtime, and not ctime, since we would not be able
1480
    # to provide a correct value for creation time.
1481
    if self.mtime is None:
1482
      self.mtime = time.time()
1483

    
1484
    if self.diskparams is None:
1485
      self.diskparams = {}
1486
    if self.ipolicy is None:
1487
      self.ipolicy = MakeEmptyIPolicy()
1488

    
1489
    if self.networks is None:
1490
      self.networks = {}
1491

    
1492
  def FillND(self, node):
1493
    """Return filled out ndparams for L{objects.Node}
1494

1495
    @type node: L{objects.Node}
1496
    @param node: A Node object to fill
1497
    @return a copy of the node's ndparams with defaults filled
1498

1499
    """
1500
    return self.SimpleFillND(node.ndparams)
1501

    
1502
  def SimpleFillND(self, ndparams):
1503
    """Fill a given ndparams dict with defaults.
1504

1505
    @type ndparams: dict
1506
    @param ndparams: the dict to fill
1507
    @rtype: dict
1508
    @return: a copy of the passed in ndparams with missing keys filled
1509
        from the node group defaults
1510

1511
    """
1512
    return FillDict(self.ndparams, ndparams)
1513

    
1514

    
1515
class Cluster(TaggableObject):
1516
  """Config object representing the cluster."""
1517
  __slots__ = [
1518
    "serial_no",
1519
    "rsahostkeypub",
1520
    "highest_used_port",
1521
    "tcpudp_port_pool",
1522
    "mac_prefix",
1523
    "volume_group_name",
1524
    "reserved_lvs",
1525
    "drbd_usermode_helper",
1526
    "default_bridge",
1527
    "default_hypervisor",
1528
    "master_node",
1529
    "master_ip",
1530
    "master_netdev",
1531
    "master_netmask",
1532
    "use_external_mip_script",
1533
    "cluster_name",
1534
    "file_storage_dir",
1535
    "shared_file_storage_dir",
1536
    "enabled_hypervisors",
1537
    "hvparams",
1538
    "ipolicy",
1539
    "os_hvp",
1540
    "beparams",
1541
    "osparams",
1542
    "nicparams",
1543
    "ndparams",
1544
    "diskparams",
1545
    "candidate_pool_size",
1546
    "modify_etc_hosts",
1547
    "modify_ssh_setup",
1548
    "maintain_node_health",
1549
    "uid_pool",
1550
    "default_iallocator",
1551
    "hidden_os",
1552
    "blacklisted_os",
1553
    "primary_ip_family",
1554
    "prealloc_wipe_disks",
1555
    "hv_state_static",
1556
    "disk_state_static",
1557
    "enabled_disk_templates",
1558
    ] + _TIMESTAMPS + _UUID
1559

    
1560
  def UpgradeConfig(self):
1561
    """Fill defaults for missing configuration values.
1562

1563
    """
1564
    # pylint: disable=E0203
1565
    # because these are "defined" via slots, not manually
1566
    if self.hvparams is None:
1567
      self.hvparams = constants.HVC_DEFAULTS
1568
    else:
1569
      for hypervisor in self.hvparams:
1570
        self.hvparams[hypervisor] = FillDict(
1571
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1572

    
1573
    if self.os_hvp is None:
1574
      self.os_hvp = {}
1575

    
1576
    # osparams added before 2.2
1577
    if self.osparams is None:
1578
      self.osparams = {}
1579

    
1580
    self.ndparams = UpgradeNDParams(self.ndparams)
1581

    
1582
    self.beparams = UpgradeGroupedParams(self.beparams,
1583
                                         constants.BEC_DEFAULTS)
1584
    for beparams_group in self.beparams:
1585
      UpgradeBeParams(self.beparams[beparams_group])
1586

    
1587
    migrate_default_bridge = not self.nicparams
1588
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1589
                                          constants.NICC_DEFAULTS)
1590
    if migrate_default_bridge:
1591
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1592
        self.default_bridge
1593

    
1594
    if self.modify_etc_hosts is None:
1595
      self.modify_etc_hosts = True
1596

    
1597
    if self.modify_ssh_setup is None:
1598
      self.modify_ssh_setup = True
1599

    
1600
    # default_bridge is no longer used in 2.1. The slot is left there to
1601
    # support auto-upgrading. It can be removed once we decide to deprecate
1602
    # upgrading straight from 2.0.
1603
    if self.default_bridge is not None:
1604
      self.default_bridge = None
1605

    
1606
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1607
    # code can be removed once upgrading straight from 2.0 is deprecated.
1608
    if self.default_hypervisor is not None:
1609
      self.enabled_hypervisors = ([self.default_hypervisor] +
1610
                                  [hvname for hvname in self.enabled_hypervisors
1611
                                   if hvname != self.default_hypervisor])
1612
      self.default_hypervisor = None
1613

    
1614
    # maintain_node_health added after 2.1.1
1615
    if self.maintain_node_health is None:
1616
      self.maintain_node_health = False
1617

    
1618
    if self.uid_pool is None:
1619
      self.uid_pool = []
1620

    
1621
    if self.default_iallocator is None:
1622
      self.default_iallocator = ""
1623

    
1624
    # reserved_lvs added before 2.2
1625
    if self.reserved_lvs is None:
1626
      self.reserved_lvs = []
1627

    
1628
    # hidden and blacklisted operating systems added before 2.2.1
1629
    if self.hidden_os is None:
1630
      self.hidden_os = []
1631

    
1632
    if self.blacklisted_os is None:
1633
      self.blacklisted_os = []
1634

    
1635
    # primary_ip_family added before 2.3
1636
    if self.primary_ip_family is None:
1637
      self.primary_ip_family = AF_INET
1638

    
1639
    if self.master_netmask is None:
1640
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1641
      self.master_netmask = ipcls.iplen
1642

    
1643
    if self.prealloc_wipe_disks is None:
1644
      self.prealloc_wipe_disks = False
1645

    
1646
    # shared_file_storage_dir added before 2.5
1647
    if self.shared_file_storage_dir is None:
1648
      self.shared_file_storage_dir = ""
1649

    
1650
    if self.use_external_mip_script is None:
1651
      self.use_external_mip_script = False
1652

    
1653
    if self.diskparams:
1654
      self.diskparams = UpgradeDiskParams(self.diskparams)
1655
    else:
1656
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1657

    
1658
    # instance policy added before 2.6
1659
    if self.ipolicy is None:
1660
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1661
    else:
1662
      # we can either make sure to upgrade the ipolicy always, or only
1663
      # do it in some corner cases (e.g. missing keys); note that this
1664
      # will break any removal of keys from the ipolicy dict
1665
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1666
      if wrongkeys:
1667
        # These keys would be silently removed by FillIPolicy()
1668
        msg = ("Cluster instance policy contains spurious keys: %s" %
1669
               utils.CommaJoin(wrongkeys))
1670
        raise errors.ConfigurationError(msg)
1671
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1672

    
1673
  @property
1674
  def primary_hypervisor(self):
1675
    """The first hypervisor is the primary.
1676

1677
    Useful, for example, for L{Node}'s hv/disk state.
1678

1679
    """
1680
    return self.enabled_hypervisors[0]
1681

    
1682
  def ToDict(self):
1683
    """Custom function for cluster.
1684

1685
    """
1686
    mydict = super(Cluster, self).ToDict()
1687

    
1688
    if self.tcpudp_port_pool is None:
1689
      tcpudp_port_pool = []
1690
    else:
1691
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1692

    
1693
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1694

    
1695
    return mydict
1696

    
1697
  @classmethod
1698
  def FromDict(cls, val):
1699
    """Custom function for cluster.
1700

1701
    """
1702
    obj = super(Cluster, cls).FromDict(val)
1703

    
1704
    if obj.tcpudp_port_pool is None:
1705
      obj.tcpudp_port_pool = set()
1706
    elif not isinstance(obj.tcpudp_port_pool, set):
1707
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1708

    
1709
    return obj
1710

    
1711
  def SimpleFillDP(self, diskparams):
1712
    """Fill a given diskparams dict with cluster defaults.
1713

1714
    @param diskparams: The diskparams
1715
    @return: The defaults dict
1716

1717
    """
1718
    return FillDiskParams(self.diskparams, diskparams)
1719

    
1720
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1721
    """Get the default hypervisor parameters for the cluster.
1722

1723
    @param hypervisor: the hypervisor name
1724
    @param os_name: if specified, we'll also update the defaults for this OS
1725
    @param skip_keys: if passed, list of keys not to use
1726
    @return: the defaults dict
1727

1728
    """
1729
    if skip_keys is None:
1730
      skip_keys = []
1731

    
1732
    fill_stack = [self.hvparams.get(hypervisor, {})]
1733
    if os_name is not None:
1734
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1735
      fill_stack.append(os_hvp)
1736

    
1737
    ret_dict = {}
1738
    for o_dict in fill_stack:
1739
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1740

    
1741
    return ret_dict
1742

    
1743
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1744
    """Fill a given hvparams dict with cluster defaults.
1745

1746
    @type hv_name: string
1747
    @param hv_name: the hypervisor to use
1748
    @type os_name: string
1749
    @param os_name: the OS to use for overriding the hypervisor defaults
1750
    @type skip_globals: boolean
1751
    @param skip_globals: if True, the global hypervisor parameters will
1752
        not be filled
1753
    @rtype: dict
1754
    @return: a copy of the given hvparams with missing keys filled from
1755
        the cluster defaults
1756

1757
    """
1758
    if skip_globals:
1759
      skip_keys = constants.HVC_GLOBALS
1760
    else:
1761
      skip_keys = []
1762

    
1763
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1764
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1765

    
1766
  def FillHV(self, instance, skip_globals=False):
1767
    """Fill an instance's hvparams dict with cluster defaults.
1768

1769
    @type instance: L{objects.Instance}
1770
    @param instance: the instance parameter to fill
1771
    @type skip_globals: boolean
1772
    @param skip_globals: if True, the global hypervisor parameters will
1773
        not be filled
1774
    @rtype: dict
1775
    @return: a copy of the instance's hvparams with missing keys filled from
1776
        the cluster defaults
1777

1778
    """
1779
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1780
                             instance.hvparams, skip_globals)
1781

    
1782
  def SimpleFillBE(self, beparams):
1783
    """Fill a given beparams dict with cluster defaults.
1784

1785
    @type beparams: dict
1786
    @param beparams: the dict to fill
1787
    @rtype: dict
1788
    @return: a copy of the passed in beparams with missing keys filled
1789
        from the cluster defaults
1790

1791
    """
1792
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1793

    
1794
  def FillBE(self, instance):
1795
    """Fill an instance's beparams dict with cluster defaults.
1796

1797
    @type instance: L{objects.Instance}
1798
    @param instance: the instance parameter to fill
1799
    @rtype: dict
1800
    @return: a copy of the instance's beparams with missing keys filled from
1801
        the cluster defaults
1802

1803
    """
1804
    return self.SimpleFillBE(instance.beparams)
1805

    
1806
  def SimpleFillNIC(self, nicparams):
1807
    """Fill a given nicparams dict with cluster defaults.
1808

1809
    @type nicparams: dict
1810
    @param nicparams: the dict to fill
1811
    @rtype: dict
1812
    @return: a copy of the passed in nicparams with missing keys filled
1813
        from the cluster defaults
1814

1815
    """
1816
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1817

    
1818
  def SimpleFillOS(self, os_name, os_params):
1819
    """Fill an instance's osparams dict with cluster defaults.
1820

1821
    @type os_name: string
1822
    @param os_name: the OS name to use
1823
    @type os_params: dict
1824
    @param os_params: the dict to fill with default values
1825
    @rtype: dict
1826
    @return: a copy of the instance's osparams with missing keys filled from
1827
        the cluster defaults
1828

1829
    """
1830
    name_only = os_name.split("+", 1)[0]
1831
    # base OS
1832
    result = self.osparams.get(name_only, {})
1833
    # OS with variant
1834
    result = FillDict(result, self.osparams.get(os_name, {}))
1835
    # specified params
1836
    return FillDict(result, os_params)
1837

    
1838
  @staticmethod
1839
  def SimpleFillHvState(hv_state):
1840
    """Fill an hv_state sub dict with cluster defaults.
1841

1842
    """
1843
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1844

    
1845
  @staticmethod
1846
  def SimpleFillDiskState(disk_state):
1847
    """Fill an disk_state sub dict with cluster defaults.
1848

1849
    """
1850
    return FillDict(constants.DS_DEFAULTS, disk_state)
1851

    
1852
  def FillND(self, node, nodegroup):
1853
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1854

1855
    @type node: L{objects.Node}
1856
    @param node: A Node object to fill
1857
    @type nodegroup: L{objects.NodeGroup}
1858
    @param nodegroup: A Node object to fill
1859
    @return a copy of the node's ndparams with defaults filled
1860

1861
    """
1862
    return self.SimpleFillND(nodegroup.FillND(node))
1863

    
1864
  def SimpleFillND(self, ndparams):
1865
    """Fill a given ndparams dict with defaults.
1866

1867
    @type ndparams: dict
1868
    @param ndparams: the dict to fill
1869
    @rtype: dict
1870
    @return: a copy of the passed in ndparams with missing keys filled
1871
        from the cluster defaults
1872

1873
    """
1874
    return FillDict(self.ndparams, ndparams)
1875

    
1876
  def SimpleFillIPolicy(self, ipolicy):
1877
    """ Fill instance policy dict with defaults.
1878

1879
    @type ipolicy: dict
1880
    @param ipolicy: the dict to fill
1881
    @rtype: dict
1882
    @return: a copy of passed ipolicy with missing keys filled from
1883
      the cluster defaults
1884

1885
    """
1886
    return FillIPolicy(self.ipolicy, ipolicy)
1887

    
1888

    
1889
class BlockDevStatus(ConfigObject):
1890
  """Config object representing the status of a block device."""
1891
  __slots__ = [
1892
    "dev_path",
1893
    "major",
1894
    "minor",
1895
    "sync_percent",
1896
    "estimated_time",
1897
    "is_degraded",
1898
    "ldisk_status",
1899
    ]
1900

    
1901

    
1902
class ImportExportStatus(ConfigObject):
1903
  """Config object representing the status of an import or export."""
1904
  __slots__ = [
1905
    "recent_output",
1906
    "listen_port",
1907
    "connected",
1908
    "progress_mbytes",
1909
    "progress_throughput",
1910
    "progress_eta",
1911
    "progress_percent",
1912
    "exit_status",
1913
    "error_message",
1914
    ] + _TIMESTAMPS
1915

    
1916

    
1917
class ImportExportOptions(ConfigObject):
1918
  """Options for import/export daemon
1919

1920
  @ivar key_name: X509 key name (None for cluster certificate)
1921
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1922
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1923
  @ivar magic: Used to ensure the connection goes to the right disk
1924
  @ivar ipv6: Whether to use IPv6
1925
  @ivar connect_timeout: Number of seconds for establishing connection
1926

1927
  """
1928
  __slots__ = [
1929
    "key_name",
1930
    "ca_pem",
1931
    "compress",
1932
    "magic",
1933
    "ipv6",
1934
    "connect_timeout",
1935
    ]
1936

    
1937

    
1938
class ConfdRequest(ConfigObject):
1939
  """Object holding a confd request.
1940

1941
  @ivar protocol: confd protocol version
1942
  @ivar type: confd query type
1943
  @ivar query: query request
1944
  @ivar rsalt: requested reply salt
1945

1946
  """
1947
  __slots__ = [
1948
    "protocol",
1949
    "type",
1950
    "query",
1951
    "rsalt",
1952
    ]
1953

    
1954

    
1955
class ConfdReply(ConfigObject):
1956
  """Object holding a confd reply.
1957

1958
  @ivar protocol: confd protocol version
1959
  @ivar status: reply status code (ok, error)
1960
  @ivar answer: confd query reply
1961
  @ivar serial: configuration serial number
1962

1963
  """
1964
  __slots__ = [
1965
    "protocol",
1966
    "status",
1967
    "answer",
1968
    "serial",
1969
    ]
1970

    
1971

    
1972
class QueryFieldDefinition(ConfigObject):
1973
  """Object holding a query field definition.
1974

1975
  @ivar name: Field name
1976
  @ivar title: Human-readable title
1977
  @ivar kind: Field type
1978
  @ivar doc: Human-readable description
1979

1980
  """
1981
  __slots__ = [
1982
    "name",
1983
    "title",
1984
    "kind",
1985
    "doc",
1986
    ]
1987

    
1988

    
1989
class _QueryResponseBase(ConfigObject):
1990
  __slots__ = [
1991
    "fields",
1992
    ]
1993

    
1994
  def ToDict(self):
1995
    """Custom function for serializing.
1996

1997
    """
1998
    mydict = super(_QueryResponseBase, self).ToDict()
1999
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2000
    return mydict
2001

    
2002
  @classmethod
2003
  def FromDict(cls, val):
2004
    """Custom function for de-serializing.
2005

2006
    """
2007
    obj = super(_QueryResponseBase, cls).FromDict(val)
2008
    obj.fields = \
2009
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2010
    return obj
2011

    
2012

    
2013
class QueryResponse(_QueryResponseBase):
2014
  """Object holding the response to a query.
2015

2016
  @ivar fields: List of L{QueryFieldDefinition} objects
2017
  @ivar data: Requested data
2018

2019
  """
2020
  __slots__ = [
2021
    "data",
2022
    ]
2023

    
2024

    
2025
class QueryFieldsRequest(ConfigObject):
2026
  """Object holding a request for querying available fields.
2027

2028
  """
2029
  __slots__ = [
2030
    "what",
2031
    "fields",
2032
    ]
2033

    
2034

    
2035
class QueryFieldsResponse(_QueryResponseBase):
2036
  """Object holding the response to a query for fields.
2037

2038
  @ivar fields: List of L{QueryFieldDefinition} objects
2039

2040
  """
2041
  __slots__ = []
2042

    
2043

    
2044
class MigrationStatus(ConfigObject):
2045
  """Object holding the status of a migration.
2046

2047
  """
2048
  __slots__ = [
2049
    "status",
2050
    "transferred_ram",
2051
    "total_ram",
2052
    ]
2053

    
2054

    
2055
class InstanceConsole(ConfigObject):
2056
  """Object describing how to access the console of an instance.
2057

2058
  """
2059
  __slots__ = [
2060
    "instance",
2061
    "kind",
2062
    "message",
2063
    "host",
2064
    "port",
2065
    "user",
2066
    "command",
2067
    "display",
2068
    ]
2069

    
2070
  def Validate(self):
2071
    """Validates contents of this object.
2072

2073
    """
2074
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2075
    assert self.instance, "Missing instance name"
2076
    assert self.message or self.kind in [constants.CONS_SSH,
2077
                                         constants.CONS_SPICE,
2078
                                         constants.CONS_VNC]
2079
    assert self.host or self.kind == constants.CONS_MESSAGE
2080
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2081
                                      constants.CONS_SSH]
2082
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2083
                                      constants.CONS_SPICE,
2084
                                      constants.CONS_VNC]
2085
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2086
                                         constants.CONS_SPICE,
2087
                                         constants.CONS_VNC]
2088
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2089
                                         constants.CONS_SPICE,
2090
                                         constants.CONS_SSH]
2091
    return True
2092

    
2093

    
2094
class Network(TaggableObject):
2095
  """Object representing a network definition for ganeti.
2096

2097
  """
2098
  __slots__ = [
2099
    "name",
2100
    "serial_no",
2101
    "mac_prefix",
2102
    "network",
2103
    "network6",
2104
    "gateway",
2105
    "gateway6",
2106
    "reservations",
2107
    "ext_reservations",
2108
    ] + _TIMESTAMPS + _UUID
2109

    
2110
  def HooksDict(self, prefix=""):
2111
    """Export a dictionary used by hooks with a network's information.
2112

2113
    @type prefix: String
2114
    @param prefix: Prefix to prepend to the dict entries
2115

2116
    """
2117
    result = {
2118
      "%sNETWORK_NAME" % prefix: self.name,
2119
      "%sNETWORK_UUID" % prefix: self.uuid,
2120
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2121
    }
2122
    if self.network:
2123
      result["%sNETWORK_SUBNET" % prefix] = self.network
2124
    if self.gateway:
2125
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2126
    if self.network6:
2127
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2128
    if self.gateway6:
2129
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2130
    if self.mac_prefix:
2131
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2132

    
2133
    return result
2134

    
2135
  @classmethod
2136
  def FromDict(cls, val):
2137
    """Custom function for networks.
2138

2139
    Remove deprecated network_type and family.
2140

2141
    """
2142
    if "network_type" in val:
2143
      del val["network_type"]
2144
    if "family" in val:
2145
      del val["family"]
2146
    obj = super(Network, cls).FromDict(val)
2147
    return obj
2148

    
2149

    
2150
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2151
  """Simple wrapper over ConfigParse that allows serialization.
2152

2153
  This class is basically ConfigParser.SafeConfigParser with two
2154
  additional methods that allow it to serialize/unserialize to/from a
2155
  buffer.
2156

2157
  """
2158
  def Dumps(self):
2159
    """Dump this instance and return the string representation."""
2160
    buf = StringIO()
2161
    self.write(buf)
2162
    return buf.getvalue()
2163

    
2164
  @classmethod
2165
  def Loads(cls, data):
2166
    """Load data from a string."""
2167
    buf = StringIO(data)
2168
    cfp = cls()
2169
    cfp.readfp(buf)
2170
    return cfp
2171

    
2172

    
2173
class LvmPvInfo(ConfigObject):
2174
  """Information about an LVM physical volume (PV).
2175

2176
  @type name: string
2177
  @ivar name: name of the PV
2178
  @type vg_name: string
2179
  @ivar vg_name: name of the volume group containing the PV
2180
  @type size: float
2181
  @ivar size: size of the PV in MiB
2182
  @type free: float
2183
  @ivar free: free space in the PV, in MiB
2184
  @type attributes: string
2185
  @ivar attributes: PV attributes
2186
  @type lv_list: list of strings
2187
  @ivar lv_list: names of the LVs hosted on the PV
2188
  """
2189
  __slots__ = [
2190
    "name",
2191
    "vg_name",
2192
    "size",
2193
    "free",
2194
    "attributes",
2195
    "lv_list"
2196
    ]
2197

    
2198
  def IsEmpty(self):
2199
    """Is this PV empty?
2200

2201
    """
2202
    return self.size <= (self.free + 1)
2203

    
2204
  def IsAllocatable(self):
2205
    """Is this PV allocatable?
2206

2207
    """
2208
    return ("a" in self.attributes)