Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 3a9fe2bc

History | View | Annotate | Download (63.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar name: master name
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "name",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.LDS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    if self.nodegroups is None:
443
      self.nodegroups = {}
444
    for nodegroup in self.nodegroups.values():
445
      nodegroup.UpgradeConfig()
446
    if self.cluster.drbd_usermode_helper is None:
447
      # To decide if we set an helper let's check if at least one instance has
448
      # a DRBD disk. This does not cover all the possible scenarios but it
449
      # gives a good approximation.
450
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
451
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452
    if self.networks is None:
453
      self.networks = {}
454
    for network in self.networks.values():
455
      network.UpgradeConfig()
456
    self._UpgradeEnabledDiskTemplates()
457

    
458
  def _UpgradeEnabledDiskTemplates(self):
459
    """Upgrade the cluster's enabled disk templates by inspecting the currently
460
       enabled and/or used disk templates.
461

462
    """
463
    # enabled_disk_templates in the cluster config were introduced in 2.8.
464
    # Remove this code once upgrading from earlier versions is deprecated.
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # FIXME: Adapt this when dis/enabling at configure time is removed.
473
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
474
      # might currently not be used.
475
      if constants.ENABLE_FILE_STORAGE:
476
        template_set.add(constants.DT_FILE)
477
      if constants.ENABLE_SHARED_FILE_STORAGE:
478
        template_set.add(constants.DT_SHARED_FILE)
479
      # Set enabled_disk_templates to the inferred disk templates. Order them
480
      # according to a preference list that is based on Ganeti's history of
481
      # supported disk templates.
482
      self.cluster.enabled_disk_templates = []
483
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
484
        if preferred_template in template_set:
485
          self.cluster.enabled_disk_templates.append(preferred_template)
486
          template_set.remove(preferred_template)
487
      self.cluster.enabled_disk_templates.extend(list(template_set))
488

    
489

    
490
class NIC(ConfigObject):
491
  """Config object representing a network card."""
492
  __slots__ = ["name", "mac", "ip", "network",
493
               "nicparams", "netinfo", "pci"] + _UUID
494

    
495
  @classmethod
496
  def CheckParameterSyntax(cls, nicparams):
497
    """Check the given parameters for validity.
498

499
    @type nicparams:  dict
500
    @param nicparams: dictionary with parameter names/value
501
    @raise errors.ConfigurationError: when a parameter is not valid
502

503
    """
504
    mode = nicparams[constants.NIC_MODE]
505
    if (mode not in constants.NIC_VALID_MODES and
506
        mode != constants.VALUE_AUTO):
507
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
508

    
509
    if (mode == constants.NIC_MODE_BRIDGED and
510
        not nicparams[constants.NIC_LINK]):
511
      raise errors.ConfigurationError("Missing bridged NIC link")
512

    
513

    
514
class Disk(ConfigObject):
515
  """Config object representing a block device."""
516
  __slots__ = ["name", "dev_type", "logical_id", "physical_id",
517
               "children", "iv_name", "size", "mode", "params", "pci"] + _UUID
518

    
519
  def CreateOnSecondary(self):
520
    """Test if this device needs to be created on a secondary node."""
521
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
522

    
523
  def AssembleOnSecondary(self):
524
    """Test if this device needs to be assembled on a secondary node."""
525
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
526

    
527
  def OpenOnSecondary(self):
528
    """Test if this device needs to be opened on a secondary node."""
529
    return self.dev_type in (constants.LD_LV,)
530

    
531
  def StaticDevPath(self):
532
    """Return the device path if this device type has a static one.
533

534
    Some devices (LVM for example) live always at the same /dev/ path,
535
    irrespective of their status. For such devices, we return this
536
    path, for others we return None.
537

538
    @warning: The path returned is not a normalized pathname; callers
539
        should check that it is a valid path.
540

541
    """
542
    if self.dev_type == constants.LD_LV:
543
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
544
    elif self.dev_type == constants.LD_BLOCKDEV:
545
      return self.logical_id[1]
546
    elif self.dev_type == constants.LD_RBD:
547
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
548
    return None
549

    
550
  def ChildrenNeeded(self):
551
    """Compute the needed number of children for activation.
552

553
    This method will return either -1 (all children) or a positive
554
    number denoting the minimum number of children needed for
555
    activation (only mirrored devices will usually return >=0).
556

557
    Currently, only DRBD8 supports diskless activation (therefore we
558
    return 0), for all other we keep the previous semantics and return
559
    -1.
560

561
    """
562
    if self.dev_type == constants.LD_DRBD8:
563
      return 0
564
    return -1
565

    
566
  def IsBasedOnDiskType(self, dev_type):
567
    """Check if the disk or its children are based on the given type.
568

569
    @type dev_type: L{constants.LDS_BLOCK}
570
    @param dev_type: the type to look for
571
    @rtype: boolean
572
    @return: boolean indicating if a device of the given type was found or not
573

574
    """
575
    if self.children:
576
      for child in self.children:
577
        if child.IsBasedOnDiskType(dev_type):
578
          return True
579
    return self.dev_type == dev_type
580

    
581
  def GetNodes(self, node):
582
    """This function returns the nodes this device lives on.
583

584
    Given the node on which the parent of the device lives on (or, in
585
    case of a top-level device, the primary node of the devices'
586
    instance), this function will return a list of nodes on which this
587
    devices needs to (or can) be assembled.
588

589
    """
590
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
591
                         constants.LD_BLOCKDEV, constants.LD_RBD,
592
                         constants.LD_EXT]:
593
      result = [node]
594
    elif self.dev_type in constants.LDS_DRBD:
595
      result = [self.logical_id[0], self.logical_id[1]]
596
      if node not in result:
597
        raise errors.ConfigurationError("DRBD device passed unknown node")
598
    else:
599
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
600
    return result
601

    
602
  def ComputeNodeTree(self, parent_node):
603
    """Compute the node/disk tree for this disk and its children.
604

605
    This method, given the node on which the parent disk lives, will
606
    return the list of all (node, disk) pairs which describe the disk
607
    tree in the most compact way. For example, a drbd/lvm stack
608
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
609
    which represents all the top-level devices on the nodes.
610

611
    """
612
    my_nodes = self.GetNodes(parent_node)
613
    result = [(node, self) for node in my_nodes]
614
    if not self.children:
615
      # leaf device
616
      return result
617
    for node in my_nodes:
618
      for child in self.children:
619
        child_result = child.ComputeNodeTree(node)
620
        if len(child_result) == 1:
621
          # child (and all its descendants) is simple, doesn't split
622
          # over multiple hosts, so we don't need to describe it, our
623
          # own entry for this node describes it completely
624
          continue
625
        else:
626
          # check if child nodes differ from my nodes; note that
627
          # subdisk can differ from the child itself, and be instead
628
          # one of its descendants
629
          for subnode, subdisk in child_result:
630
            if subnode not in my_nodes:
631
              result.append((subnode, subdisk))
632
            # otherwise child is under our own node, so we ignore this
633
            # entry (but probably the other results in the list will
634
            # be different)
635
    return result
636

    
637
  def ComputeGrowth(self, amount):
638
    """Compute the per-VG growth requirements.
639

640
    This only works for VG-based disks.
641

642
    @type amount: integer
643
    @param amount: the desired increase in (user-visible) disk space
644
    @rtype: dict
645
    @return: a dictionary of volume-groups and the required size
646

647
    """
648
    if self.dev_type == constants.LD_LV:
649
      return {self.logical_id[0]: amount}
650
    elif self.dev_type == constants.LD_DRBD8:
651
      if self.children:
652
        return self.children[0].ComputeGrowth(amount)
653
      else:
654
        return {}
655
    else:
656
      # Other disk types do not require VG space
657
      return {}
658

    
659
  def RecordGrow(self, amount):
660
    """Update the size of this disk after growth.
661

662
    This method recurses over the disks's children and updates their
663
    size correspondigly. The method needs to be kept in sync with the
664
    actual algorithms from bdev.
665

666
    """
667
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
668
                         constants.LD_RBD, constants.LD_EXT):
669
      self.size += amount
670
    elif self.dev_type == constants.LD_DRBD8:
671
      if self.children:
672
        self.children[0].RecordGrow(amount)
673
      self.size += amount
674
    else:
675
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
676
                                   " disk type %s" % self.dev_type)
677

    
678
  def Update(self, size=None, mode=None):
679
    """Apply changes to size and mode.
680

681
    """
682
    if self.dev_type == constants.LD_DRBD8:
683
      if self.children:
684
        self.children[0].Update(size=size, mode=mode)
685
    else:
686
      assert not self.children
687

    
688
    if size is not None:
689
      self.size = size
690
    if mode is not None:
691
      self.mode = mode
692

    
693
  def UnsetSize(self):
694
    """Sets recursively the size to zero for the disk and its children.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UnsetSize()
700
    self.size = 0
701

    
702
  def SetPhysicalID(self, target_node, nodes_ip):
703
    """Convert the logical ID to the physical ID.
704

705
    This is used only for drbd, which needs ip/port configuration.
706

707
    The routine descends down and updates its children also, because
708
    this helps when the only the top device is passed to the remote
709
    node.
710

711
    Arguments:
712
      - target_node: the node we wish to configure for
713
      - nodes_ip: a mapping of node name to ip
714

715
    The target_node must exist in in nodes_ip, and must be one of the
716
    nodes in the logical ID for each of the DRBD devices encountered
717
    in the disk tree.
718

719
    """
720
    if self.children:
721
      for child in self.children:
722
        child.SetPhysicalID(target_node, nodes_ip)
723

    
724
    if self.logical_id is None and self.physical_id is not None:
725
      return
726
    if self.dev_type in constants.LDS_DRBD:
727
      pnode, snode, port, pminor, sminor, secret = self.logical_id
728
      if target_node not in (pnode, snode):
729
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
730
                                        target_node)
731
      pnode_ip = nodes_ip.get(pnode, None)
732
      snode_ip = nodes_ip.get(snode, None)
733
      if pnode_ip is None or snode_ip is None:
734
        raise errors.ConfigurationError("Can't find primary or secondary node"
735
                                        " for %s" % str(self))
736
      p_data = (pnode_ip, port)
737
      s_data = (snode_ip, port)
738
      if pnode == target_node:
739
        self.physical_id = p_data + s_data + (pminor, secret)
740
      else: # it must be secondary, we tested above
741
        self.physical_id = s_data + p_data + (sminor, secret)
742
    else:
743
      self.physical_id = self.logical_id
744
    return
745

    
746
  def ToDict(self):
747
    """Disk-specific conversion to standard python types.
748

749
    This replaces the children lists of objects with lists of
750
    standard python types.
751

752
    """
753
    bo = super(Disk, self).ToDict()
754

    
755
    for attr in ("children",):
756
      alist = bo.get(attr, None)
757
      if alist:
758
        bo[attr] = outils.ContainerToDicts(alist)
759
    return bo
760

    
761
  @classmethod
762
  def FromDict(cls, val):
763
    """Custom function for Disks
764

765
    """
766
    obj = super(Disk, cls).FromDict(val)
767
    if obj.children:
768
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
769
    if obj.logical_id and isinstance(obj.logical_id, list):
770
      obj.logical_id = tuple(obj.logical_id)
771
    if obj.physical_id and isinstance(obj.physical_id, list):
772
      obj.physical_id = tuple(obj.physical_id)
773
    if obj.dev_type in constants.LDS_DRBD:
774
      # we need a tuple of length six here
775
      if len(obj.logical_id) < 6:
776
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
777
    return obj
778

    
779
  def __str__(self):
780
    """Custom str() formatter for disks.
781

782
    """
783
    if self.dev_type == constants.LD_LV:
784
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
785
    elif self.dev_type in constants.LDS_DRBD:
786
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
787
      val = "<DRBD8("
788
      if self.physical_id is None:
789
        phy = "unconfigured"
790
      else:
791
        phy = ("configured as %s:%s %s:%s" %
792
               (self.physical_id[0], self.physical_id[1],
793
                self.physical_id[2], self.physical_id[3]))
794

    
795
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
796
              (node_a, minor_a, node_b, minor_b, port, phy))
797
      if self.children and self.children.count(None) == 0:
798
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
799
      else:
800
        val += "no local storage"
801
    else:
802
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
803
             (self.dev_type, self.logical_id, self.physical_id, self.children))
804
    if self.iv_name is None:
805
      val += ", not visible"
806
    else:
807
      val += ", visible as /dev/%s" % self.iv_name
808
    if isinstance(self.size, int):
809
      val += ", size=%dm)>" % self.size
810
    else:
811
      val += ", size='%s')>" % (self.size,)
812
    return val
813

    
814
  def Verify(self):
815
    """Checks that this disk is correctly configured.
816

817
    """
818
    all_errors = []
819
    if self.mode not in constants.DISK_ACCESS_SET:
820
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
821
    return all_errors
822

    
823
  def UpgradeConfig(self):
824
    """Fill defaults for missing configuration values.
825

826
    """
827
    if self.children:
828
      for child in self.children:
829
        child.UpgradeConfig()
830

    
831
    # FIXME: Make this configurable in Ganeti 2.7
832
    self.params = {}
833
    # add here config upgrade for this disk
834

    
835
  @staticmethod
836
  def ComputeLDParams(disk_template, disk_params):
837
    """Computes Logical Disk parameters from Disk Template parameters.
838

839
    @type disk_template: string
840
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
841
    @type disk_params: dict
842
    @param disk_params: disk template parameters;
843
                        dict(template_name -> parameters
844
    @rtype: list(dict)
845
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
846
      contains the LD parameters of the node. The tree is flattened in-order.
847

848
    """
849
    if disk_template not in constants.DISK_TEMPLATES:
850
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
851

    
852
    assert disk_template in disk_params
853

    
854
    result = list()
855
    dt_params = disk_params[disk_template]
856
    if disk_template == constants.DT_DRBD8:
857
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
858
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
859
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
860
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
861
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
862
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
863
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
864
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
865
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
866
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
867
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
868
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
869
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
870
        }))
871

    
872
      # data LV
873
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
874
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
875
        }))
876

    
877
      # metadata LV
878
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
879
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
880
        }))
881

    
882
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
883
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
884

    
885
    elif disk_template == constants.DT_PLAIN:
886
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
887
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
888
        }))
889

    
890
    elif disk_template == constants.DT_BLOCK:
891
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
892

    
893
    elif disk_template == constants.DT_RBD:
894
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
895
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
896
        }))
897

    
898
    elif disk_template == constants.DT_EXT:
899
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
900

    
901
    return result
902

    
903

    
904
class InstancePolicy(ConfigObject):
905
  """Config object representing instance policy limits dictionary.
906

907
  Note that this object is not actually used in the config, it's just
908
  used as a placeholder for a few functions.
909

910
  """
911
  @classmethod
912
  def CheckParameterSyntax(cls, ipolicy, check_std):
913
    """ Check the instance policy for validity.
914

915
    @type ipolicy: dict
916
    @param ipolicy: dictionary with min/max/std specs and policies
917
    @type check_std: bool
918
    @param check_std: Whether to check std value or just assume compliance
919
    @raise errors.ConfigurationError: when the policy is not legal
920

921
    """
922
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
923
    if constants.IPOLICY_DTS in ipolicy:
924
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
925
    for key in constants.IPOLICY_PARAMETERS:
926
      if key in ipolicy:
927
        InstancePolicy.CheckParameter(key, ipolicy[key])
928
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
929
    if wrong_keys:
930
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
931
                                      utils.CommaJoin(wrong_keys))
932

    
933
  @classmethod
934
  def _CheckIncompleteSpec(cls, spec, keyname):
935
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
936
    if missing_params:
937
      msg = ("Missing instance specs parameters for %s: %s" %
938
             (keyname, utils.CommaJoin(missing_params)))
939
      raise errors.ConfigurationError(msg)
940

    
941
  @classmethod
942
  def CheckISpecSyntax(cls, ipolicy, check_std):
943
    """Check the instance policy specs for validity.
944

945
    @type ipolicy: dict
946
    @param ipolicy: dictionary with min/max/std specs
947
    @type check_std: bool
948
    @param check_std: Whether to check std value or just assume compliance
949
    @raise errors.ConfigurationError: when specs are not valid
950

951
    """
952
    if constants.ISPECS_MINMAX not in ipolicy:
953
      # Nothing to check
954
      return
955

    
956
    if check_std and constants.ISPECS_STD not in ipolicy:
957
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
958
      raise errors.ConfigurationError(msg)
959
    stdspec = ipolicy.get(constants.ISPECS_STD)
960
    if check_std:
961
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
962

    
963
    if not ipolicy[constants.ISPECS_MINMAX]:
964
      raise errors.ConfigurationError("Empty minmax specifications")
965
    std_is_good = False
966
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
967
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
968
      if missing:
969
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
970
        raise errors.ConfigurationError(msg)
971
      for (key, spec) in minmaxspecs.items():
972
        InstancePolicy._CheckIncompleteSpec(spec, key)
973

    
974
      spec_std_ok = True
975
      for param in constants.ISPECS_PARAMETERS:
976
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
977
                                                           param, check_std)
978
        spec_std_ok = spec_std_ok and par_std_ok
979
      std_is_good = std_is_good or spec_std_ok
980
    if not std_is_good:
981
      raise errors.ConfigurationError("Invalid std specifications")
982

    
983
  @classmethod
984
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
985
    """Check the instance policy specs for validity on a given key.
986

987
    We check if the instance specs makes sense for a given key, that is
988
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
989

990
    @type minmaxspecs: dict
991
    @param minmaxspecs: dictionary with min and max instance spec
992
    @type stdspec: dict
993
    @param stdspec: dictionary with standard instance spec
994
    @type name: string
995
    @param name: what are the limits for
996
    @type check_std: bool
997
    @param check_std: Whether to check std value or just assume compliance
998
    @rtype: bool
999
    @return: C{True} when specs are valid, C{False} when standard spec for the
1000
        given name is not valid
1001
    @raise errors.ConfigurationError: when min/max specs for the given name
1002
        are not valid
1003

1004
    """
1005
    minspec = minmaxspecs[constants.ISPECS_MIN]
1006
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1007
    min_v = minspec[name]
1008
    max_v = maxspec[name]
1009

    
1010
    if min_v > max_v:
1011
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1012
             (name, min_v, max_v))
1013
      raise errors.ConfigurationError(err)
1014
    elif check_std:
1015
      std_v = stdspec.get(name, min_v)
1016
      return std_v >= min_v and std_v <= max_v
1017
    else:
1018
      return True
1019

    
1020
  @classmethod
1021
  def CheckDiskTemplates(cls, disk_templates):
1022
    """Checks the disk templates for validity.
1023

1024
    """
1025
    if not disk_templates:
1026
      raise errors.ConfigurationError("Instance policy must contain" +
1027
                                      " at least one disk template")
1028
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1029
    if wrong:
1030
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1031
                                      utils.CommaJoin(wrong))
1032

    
1033
  @classmethod
1034
  def CheckParameter(cls, key, value):
1035
    """Checks a parameter.
1036

1037
    Currently we expect all parameters to be float values.
1038

1039
    """
1040
    try:
1041
      float(value)
1042
    except (TypeError, ValueError), err:
1043
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1044
                                      " '%s', error: %s" % (key, value, err))
1045

    
1046

    
1047
class Instance(TaggableObject):
1048
  """Config object representing an instance."""
1049
  __slots__ = [
1050
    "name",
1051
    "primary_node",
1052
    "os",
1053
    "hypervisor",
1054
    "hvparams",
1055
    "beparams",
1056
    "osparams",
1057
    "admin_state",
1058
    "nics",
1059
    "disks",
1060
    "disk_template",
1061
    "disks_active",
1062
    "network_port",
1063
    "serial_no",
1064
    ] + _TIMESTAMPS + _UUID
1065

    
1066
  def _ComputeSecondaryNodes(self):
1067
    """Compute the list of secondary nodes.
1068

1069
    This is a simple wrapper over _ComputeAllNodes.
1070

1071
    """
1072
    all_nodes = set(self._ComputeAllNodes())
1073
    all_nodes.discard(self.primary_node)
1074
    return tuple(all_nodes)
1075

    
1076
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1077
                             "List of names of secondary nodes")
1078

    
1079
  def _ComputeAllNodes(self):
1080
    """Compute the list of all nodes.
1081

1082
    Since the data is already there (in the drbd disks), keeping it as
1083
    a separate normal attribute is redundant and if not properly
1084
    synchronised can cause problems. Thus it's better to compute it
1085
    dynamically.
1086

1087
    """
1088
    def _Helper(nodes, device):
1089
      """Recursively computes nodes given a top device."""
1090
      if device.dev_type in constants.LDS_DRBD:
1091
        nodea, nodeb = device.logical_id[:2]
1092
        nodes.add(nodea)
1093
        nodes.add(nodeb)
1094
      if device.children:
1095
        for child in device.children:
1096
          _Helper(nodes, child)
1097

    
1098
    all_nodes = set()
1099
    all_nodes.add(self.primary_node)
1100
    for device in self.disks:
1101
      _Helper(all_nodes, device)
1102
    return tuple(all_nodes)
1103

    
1104
  all_nodes = property(_ComputeAllNodes, None, None,
1105
                       "List of names of all the nodes of the instance")
1106

    
1107
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1108
    """Provide a mapping of nodes to LVs this instance owns.
1109

1110
    This function figures out what logical volumes should belong on
1111
    which nodes, recursing through a device tree.
1112

1113
    @param lvmap: optional dictionary to receive the
1114
        'node' : ['lv', ...] data.
1115

1116
    @return: None if lvmap arg is given, otherwise, a dictionary of
1117
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1118
        volumeN is of the form "vg_name/lv_name", compatible with
1119
        GetVolumeList()
1120

1121
    """
1122
    if node is None:
1123
      node = self.primary_node
1124

    
1125
    if lvmap is None:
1126
      lvmap = {
1127
        node: [],
1128
        }
1129
      ret = lvmap
1130
    else:
1131
      if not node in lvmap:
1132
        lvmap[node] = []
1133
      ret = None
1134

    
1135
    if not devs:
1136
      devs = self.disks
1137

    
1138
    for dev in devs:
1139
      if dev.dev_type == constants.LD_LV:
1140
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1141

    
1142
      elif dev.dev_type in constants.LDS_DRBD:
1143
        if dev.children:
1144
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1145
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1146

    
1147
      elif dev.children:
1148
        self.MapLVsByNode(lvmap, dev.children, node)
1149

    
1150
    return ret
1151

    
1152
  def FindDisk(self, idx):
1153
    """Find a disk given having a specified index.
1154

1155
    This is just a wrapper that does validation of the index.
1156

1157
    @type idx: int
1158
    @param idx: the disk index
1159
    @rtype: L{Disk}
1160
    @return: the corresponding disk
1161
    @raise errors.OpPrereqError: when the given index is not valid
1162

1163
    """
1164
    try:
1165
      idx = int(idx)
1166
      return self.disks[idx]
1167
    except (TypeError, ValueError), err:
1168
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1169
                                 errors.ECODE_INVAL)
1170
    except IndexError:
1171
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1172
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1173
                                 errors.ECODE_INVAL)
1174

    
1175
  def ToDict(self):
1176
    """Instance-specific conversion to standard python types.
1177

1178
    This replaces the children lists of objects with lists of standard
1179
    python types.
1180

1181
    """
1182
    bo = super(Instance, self).ToDict()
1183

    
1184
    for attr in "nics", "disks":
1185
      alist = bo.get(attr, None)
1186
      if alist:
1187
        nlist = outils.ContainerToDicts(alist)
1188
      else:
1189
        nlist = []
1190
      bo[attr] = nlist
1191
    return bo
1192

    
1193
  @classmethod
1194
  def FromDict(cls, val):
1195
    """Custom function for instances.
1196

1197
    """
1198
    if "admin_state" not in val:
1199
      if val.get("admin_up", False):
1200
        val["admin_state"] = constants.ADMINST_UP
1201
      else:
1202
        val["admin_state"] = constants.ADMINST_DOWN
1203
    if "admin_up" in val:
1204
      del val["admin_up"]
1205
    obj = super(Instance, cls).FromDict(val)
1206
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1207
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1208
    return obj
1209

    
1210
  def UpgradeConfig(self):
1211
    """Fill defaults for missing configuration values.
1212

1213
    """
1214
    for nic in self.nics:
1215
      nic.UpgradeConfig()
1216
    for disk in self.disks:
1217
      disk.UpgradeConfig()
1218
    if self.hvparams:
1219
      for key in constants.HVC_GLOBALS:
1220
        try:
1221
          del self.hvparams[key]
1222
        except KeyError:
1223
          pass
1224
    if self.osparams is None:
1225
      self.osparams = {}
1226
    UpgradeBeParams(self.beparams)
1227
    if self.disks_active is None:
1228
      self.disks_active = self.admin_state == constants.ADMINST_UP
1229

    
1230

    
1231
class OS(ConfigObject):
1232
  """Config object representing an operating system.
1233

1234
  @type supported_parameters: list
1235
  @ivar supported_parameters: a list of tuples, name and description,
1236
      containing the supported parameters by this OS
1237

1238
  @type VARIANT_DELIM: string
1239
  @cvar VARIANT_DELIM: the variant delimiter
1240

1241
  """
1242
  __slots__ = [
1243
    "name",
1244
    "path",
1245
    "api_versions",
1246
    "create_script",
1247
    "export_script",
1248
    "import_script",
1249
    "rename_script",
1250
    "verify_script",
1251
    "supported_variants",
1252
    "supported_parameters",
1253
    ]
1254

    
1255
  VARIANT_DELIM = "+"
1256

    
1257
  @classmethod
1258
  def SplitNameVariant(cls, name):
1259
    """Splits the name into the proper name and variant.
1260

1261
    @param name: the OS (unprocessed) name
1262
    @rtype: list
1263
    @return: a list of two elements; if the original name didn't
1264
        contain a variant, it's returned as an empty string
1265

1266
    """
1267
    nv = name.split(cls.VARIANT_DELIM, 1)
1268
    if len(nv) == 1:
1269
      nv.append("")
1270
    return nv
1271

    
1272
  @classmethod
1273
  def GetName(cls, name):
1274
    """Returns the proper name of the os (without the variant).
1275

1276
    @param name: the OS (unprocessed) name
1277

1278
    """
1279
    return cls.SplitNameVariant(name)[0]
1280

    
1281
  @classmethod
1282
  def GetVariant(cls, name):
1283
    """Returns the variant the os (without the base name).
1284

1285
    @param name: the OS (unprocessed) name
1286

1287
    """
1288
    return cls.SplitNameVariant(name)[1]
1289

    
1290

    
1291
class ExtStorage(ConfigObject):
1292
  """Config object representing an External Storage Provider.
1293

1294
  """
1295
  __slots__ = [
1296
    "name",
1297
    "path",
1298
    "create_script",
1299
    "remove_script",
1300
    "grow_script",
1301
    "attach_script",
1302
    "detach_script",
1303
    "setinfo_script",
1304
    "verify_script",
1305
    "snapshot_script",
1306
    "supported_parameters",
1307
    ]
1308

    
1309

    
1310
class NodeHvState(ConfigObject):
1311
  """Hypvervisor state on a node.
1312

1313
  @ivar mem_total: Total amount of memory
1314
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1315
    available)
1316
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1317
    rounding
1318
  @ivar mem_inst: Memory used by instances living on node
1319
  @ivar cpu_total: Total node CPU core count
1320
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1321

1322
  """
1323
  __slots__ = [
1324
    "mem_total",
1325
    "mem_node",
1326
    "mem_hv",
1327
    "mem_inst",
1328
    "cpu_total",
1329
    "cpu_node",
1330
    ] + _TIMESTAMPS
1331

    
1332

    
1333
class NodeDiskState(ConfigObject):
1334
  """Disk state on a node.
1335

1336
  """
1337
  __slots__ = [
1338
    "total",
1339
    "reserved",
1340
    "overhead",
1341
    ] + _TIMESTAMPS
1342

    
1343

    
1344
class Node(TaggableObject):
1345
  """Config object representing a node.
1346

1347
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1348
  @ivar hv_state_static: Hypervisor state overriden by user
1349
  @ivar disk_state: Disk state (e.g. free space)
1350
  @ivar disk_state_static: Disk state overriden by user
1351

1352
  """
1353
  __slots__ = [
1354
    "name",
1355
    "primary_ip",
1356
    "secondary_ip",
1357
    "serial_no",
1358
    "master_candidate",
1359
    "offline",
1360
    "drained",
1361
    "group",
1362
    "master_capable",
1363
    "vm_capable",
1364
    "ndparams",
1365
    "powered",
1366
    "hv_state",
1367
    "hv_state_static",
1368
    "disk_state",
1369
    "disk_state_static",
1370
    ] + _TIMESTAMPS + _UUID
1371

    
1372
  def UpgradeConfig(self):
1373
    """Fill defaults for missing configuration values.
1374

1375
    """
1376
    # pylint: disable=E0203
1377
    # because these are "defined" via slots, not manually
1378
    if self.master_capable is None:
1379
      self.master_capable = True
1380

    
1381
    if self.vm_capable is None:
1382
      self.vm_capable = True
1383

    
1384
    if self.ndparams is None:
1385
      self.ndparams = {}
1386
    # And remove any global parameter
1387
    for key in constants.NDC_GLOBALS:
1388
      if key in self.ndparams:
1389
        logging.warning("Ignoring %s node parameter for node %s",
1390
                        key, self.name)
1391
        del self.ndparams[key]
1392

    
1393
    if self.powered is None:
1394
      self.powered = True
1395

    
1396
  def ToDict(self):
1397
    """Custom function for serializing.
1398

1399
    """
1400
    data = super(Node, self).ToDict()
1401

    
1402
    hv_state = data.get("hv_state", None)
1403
    if hv_state is not None:
1404
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1405

    
1406
    disk_state = data.get("disk_state", None)
1407
    if disk_state is not None:
1408
      data["disk_state"] = \
1409
        dict((key, outils.ContainerToDicts(value))
1410
             for (key, value) in disk_state.items())
1411

    
1412
    return data
1413

    
1414
  @classmethod
1415
  def FromDict(cls, val):
1416
    """Custom function for deserializing.
1417

1418
    """
1419
    obj = super(Node, cls).FromDict(val)
1420

    
1421
    if obj.hv_state is not None:
1422
      obj.hv_state = \
1423
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1424

    
1425
    if obj.disk_state is not None:
1426
      obj.disk_state = \
1427
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1428
             for (key, value) in obj.disk_state.items())
1429

    
1430
    return obj
1431

    
1432

    
1433
class NodeGroup(TaggableObject):
1434
  """Config object representing a node group."""
1435
  __slots__ = [
1436
    "name",
1437
    "members",
1438
    "ndparams",
1439
    "diskparams",
1440
    "ipolicy",
1441
    "serial_no",
1442
    "hv_state_static",
1443
    "disk_state_static",
1444
    "alloc_policy",
1445
    "networks",
1446
    ] + _TIMESTAMPS + _UUID
1447

    
1448
  def ToDict(self):
1449
    """Custom function for nodegroup.
1450

1451
    This discards the members object, which gets recalculated and is only kept
1452
    in memory.
1453

1454
    """
1455
    mydict = super(NodeGroup, self).ToDict()
1456
    del mydict["members"]
1457
    return mydict
1458

    
1459
  @classmethod
1460
  def FromDict(cls, val):
1461
    """Custom function for nodegroup.
1462

1463
    The members slot is initialized to an empty list, upon deserialization.
1464

1465
    """
1466
    obj = super(NodeGroup, cls).FromDict(val)
1467
    obj.members = []
1468
    return obj
1469

    
1470
  def UpgradeConfig(self):
1471
    """Fill defaults for missing configuration values.
1472

1473
    """
1474
    if self.ndparams is None:
1475
      self.ndparams = {}
1476

    
1477
    if self.serial_no is None:
1478
      self.serial_no = 1
1479

    
1480
    if self.alloc_policy is None:
1481
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1482

    
1483
    # We only update mtime, and not ctime, since we would not be able
1484
    # to provide a correct value for creation time.
1485
    if self.mtime is None:
1486
      self.mtime = time.time()
1487

    
1488
    if self.diskparams is None:
1489
      self.diskparams = {}
1490
    if self.ipolicy is None:
1491
      self.ipolicy = MakeEmptyIPolicy()
1492

    
1493
    if self.networks is None:
1494
      self.networks = {}
1495

    
1496
  def FillND(self, node):
1497
    """Return filled out ndparams for L{objects.Node}
1498

1499
    @type node: L{objects.Node}
1500
    @param node: A Node object to fill
1501
    @return a copy of the node's ndparams with defaults filled
1502

1503
    """
1504
    return self.SimpleFillND(node.ndparams)
1505

    
1506
  def SimpleFillND(self, ndparams):
1507
    """Fill a given ndparams dict with defaults.
1508

1509
    @type ndparams: dict
1510
    @param ndparams: the dict to fill
1511
    @rtype: dict
1512
    @return: a copy of the passed in ndparams with missing keys filled
1513
        from the node group defaults
1514

1515
    """
1516
    return FillDict(self.ndparams, ndparams)
1517

    
1518

    
1519
class Cluster(TaggableObject):
1520
  """Config object representing the cluster."""
1521
  __slots__ = [
1522
    "serial_no",
1523
    "rsahostkeypub",
1524
    "dsahostkeypub",
1525
    "highest_used_port",
1526
    "tcpudp_port_pool",
1527
    "mac_prefix",
1528
    "volume_group_name",
1529
    "reserved_lvs",
1530
    "drbd_usermode_helper",
1531
    "default_bridge",
1532
    "default_hypervisor",
1533
    "master_node",
1534
    "master_ip",
1535
    "master_netdev",
1536
    "master_netmask",
1537
    "use_external_mip_script",
1538
    "cluster_name",
1539
    "file_storage_dir",
1540
    "shared_file_storage_dir",
1541
    "enabled_hypervisors",
1542
    "hvparams",
1543
    "ipolicy",
1544
    "os_hvp",
1545
    "beparams",
1546
    "osparams",
1547
    "nicparams",
1548
    "ndparams",
1549
    "diskparams",
1550
    "candidate_pool_size",
1551
    "modify_etc_hosts",
1552
    "modify_ssh_setup",
1553
    "maintain_node_health",
1554
    "uid_pool",
1555
    "default_iallocator",
1556
    "hidden_os",
1557
    "blacklisted_os",
1558
    "primary_ip_family",
1559
    "prealloc_wipe_disks",
1560
    "hv_state_static",
1561
    "disk_state_static",
1562
    "enabled_disk_templates",
1563
    ] + _TIMESTAMPS + _UUID
1564

    
1565
  def UpgradeConfig(self):
1566
    """Fill defaults for missing configuration values.
1567

1568
    """
1569
    # pylint: disable=E0203
1570
    # because these are "defined" via slots, not manually
1571
    if self.hvparams is None:
1572
      self.hvparams = constants.HVC_DEFAULTS
1573
    else:
1574
      for hypervisor in self.hvparams:
1575
        self.hvparams[hypervisor] = FillDict(
1576
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1577

    
1578
    if self.os_hvp is None:
1579
      self.os_hvp = {}
1580

    
1581
    # osparams added before 2.2
1582
    if self.osparams is None:
1583
      self.osparams = {}
1584

    
1585
    self.ndparams = UpgradeNDParams(self.ndparams)
1586

    
1587
    self.beparams = UpgradeGroupedParams(self.beparams,
1588
                                         constants.BEC_DEFAULTS)
1589
    for beparams_group in self.beparams:
1590
      UpgradeBeParams(self.beparams[beparams_group])
1591

    
1592
    migrate_default_bridge = not self.nicparams
1593
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1594
                                          constants.NICC_DEFAULTS)
1595
    if migrate_default_bridge:
1596
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1597
        self.default_bridge
1598

    
1599
    if self.modify_etc_hosts is None:
1600
      self.modify_etc_hosts = True
1601

    
1602
    if self.modify_ssh_setup is None:
1603
      self.modify_ssh_setup = True
1604

    
1605
    # default_bridge is no longer used in 2.1. The slot is left there to
1606
    # support auto-upgrading. It can be removed once we decide to deprecate
1607
    # upgrading straight from 2.0.
1608
    if self.default_bridge is not None:
1609
      self.default_bridge = None
1610

    
1611
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1612
    # code can be removed once upgrading straight from 2.0 is deprecated.
1613
    if self.default_hypervisor is not None:
1614
      self.enabled_hypervisors = ([self.default_hypervisor] +
1615
                                  [hvname for hvname in self.enabled_hypervisors
1616
                                   if hvname != self.default_hypervisor])
1617
      self.default_hypervisor = None
1618

    
1619
    # maintain_node_health added after 2.1.1
1620
    if self.maintain_node_health is None:
1621
      self.maintain_node_health = False
1622

    
1623
    if self.uid_pool is None:
1624
      self.uid_pool = []
1625

    
1626
    if self.default_iallocator is None:
1627
      self.default_iallocator = ""
1628

    
1629
    # reserved_lvs added before 2.2
1630
    if self.reserved_lvs is None:
1631
      self.reserved_lvs = []
1632

    
1633
    # hidden and blacklisted operating systems added before 2.2.1
1634
    if self.hidden_os is None:
1635
      self.hidden_os = []
1636

    
1637
    if self.blacklisted_os is None:
1638
      self.blacklisted_os = []
1639

    
1640
    # primary_ip_family added before 2.3
1641
    if self.primary_ip_family is None:
1642
      self.primary_ip_family = AF_INET
1643

    
1644
    if self.master_netmask is None:
1645
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1646
      self.master_netmask = ipcls.iplen
1647

    
1648
    if self.prealloc_wipe_disks is None:
1649
      self.prealloc_wipe_disks = False
1650

    
1651
    # shared_file_storage_dir added before 2.5
1652
    if self.shared_file_storage_dir is None:
1653
      self.shared_file_storage_dir = ""
1654

    
1655
    if self.use_external_mip_script is None:
1656
      self.use_external_mip_script = False
1657

    
1658
    if self.diskparams:
1659
      self.diskparams = UpgradeDiskParams(self.diskparams)
1660
    else:
1661
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1662

    
1663
    # instance policy added before 2.6
1664
    if self.ipolicy is None:
1665
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1666
    else:
1667
      # we can either make sure to upgrade the ipolicy always, or only
1668
      # do it in some corner cases (e.g. missing keys); note that this
1669
      # will break any removal of keys from the ipolicy dict
1670
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1671
      if wrongkeys:
1672
        # These keys would be silently removed by FillIPolicy()
1673
        msg = ("Cluster instance policy contains spurious keys: %s" %
1674
               utils.CommaJoin(wrongkeys))
1675
        raise errors.ConfigurationError(msg)
1676
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1677

    
1678
  @property
1679
  def primary_hypervisor(self):
1680
    """The first hypervisor is the primary.
1681

1682
    Useful, for example, for L{Node}'s hv/disk state.
1683

1684
    """
1685
    return self.enabled_hypervisors[0]
1686

    
1687
  def ToDict(self):
1688
    """Custom function for cluster.
1689

1690
    """
1691
    mydict = super(Cluster, self).ToDict()
1692

    
1693
    if self.tcpudp_port_pool is None:
1694
      tcpudp_port_pool = []
1695
    else:
1696
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1697

    
1698
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1699

    
1700
    return mydict
1701

    
1702
  @classmethod
1703
  def FromDict(cls, val):
1704
    """Custom function for cluster.
1705

1706
    """
1707
    obj = super(Cluster, cls).FromDict(val)
1708

    
1709
    if obj.tcpudp_port_pool is None:
1710
      obj.tcpudp_port_pool = set()
1711
    elif not isinstance(obj.tcpudp_port_pool, set):
1712
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1713

    
1714
    return obj
1715

    
1716
  def SimpleFillDP(self, diskparams):
1717
    """Fill a given diskparams dict with cluster defaults.
1718

1719
    @param diskparams: The diskparams
1720
    @return: The defaults dict
1721

1722
    """
1723
    return FillDiskParams(self.diskparams, diskparams)
1724

    
1725
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1726
    """Get the default hypervisor parameters for the cluster.
1727

1728
    @param hypervisor: the hypervisor name
1729
    @param os_name: if specified, we'll also update the defaults for this OS
1730
    @param skip_keys: if passed, list of keys not to use
1731
    @return: the defaults dict
1732

1733
    """
1734
    if skip_keys is None:
1735
      skip_keys = []
1736

    
1737
    fill_stack = [self.hvparams.get(hypervisor, {})]
1738
    if os_name is not None:
1739
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1740
      fill_stack.append(os_hvp)
1741

    
1742
    ret_dict = {}
1743
    for o_dict in fill_stack:
1744
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1745

    
1746
    return ret_dict
1747

    
1748
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1749
    """Fill a given hvparams dict with cluster defaults.
1750

1751
    @type hv_name: string
1752
    @param hv_name: the hypervisor to use
1753
    @type os_name: string
1754
    @param os_name: the OS to use for overriding the hypervisor defaults
1755
    @type skip_globals: boolean
1756
    @param skip_globals: if True, the global hypervisor parameters will
1757
        not be filled
1758
    @rtype: dict
1759
    @return: a copy of the given hvparams with missing keys filled from
1760
        the cluster defaults
1761

1762
    """
1763
    if skip_globals:
1764
      skip_keys = constants.HVC_GLOBALS
1765
    else:
1766
      skip_keys = []
1767

    
1768
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1769
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1770

    
1771
  def FillHV(self, instance, skip_globals=False):
1772
    """Fill an instance's hvparams dict with cluster defaults.
1773

1774
    @type instance: L{objects.Instance}
1775
    @param instance: the instance parameter to fill
1776
    @type skip_globals: boolean
1777
    @param skip_globals: if True, the global hypervisor parameters will
1778
        not be filled
1779
    @rtype: dict
1780
    @return: a copy of the instance's hvparams with missing keys filled from
1781
        the cluster defaults
1782

1783
    """
1784
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1785
                             instance.hvparams, skip_globals)
1786

    
1787
  def SimpleFillBE(self, beparams):
1788
    """Fill a given beparams dict with cluster defaults.
1789

1790
    @type beparams: dict
1791
    @param beparams: the dict to fill
1792
    @rtype: dict
1793
    @return: a copy of the passed in beparams with missing keys filled
1794
        from the cluster defaults
1795

1796
    """
1797
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1798

    
1799
  def FillBE(self, instance):
1800
    """Fill an instance's beparams dict with cluster defaults.
1801

1802
    @type instance: L{objects.Instance}
1803
    @param instance: the instance parameter to fill
1804
    @rtype: dict
1805
    @return: a copy of the instance's beparams with missing keys filled from
1806
        the cluster defaults
1807

1808
    """
1809
    return self.SimpleFillBE(instance.beparams)
1810

    
1811
  def SimpleFillNIC(self, nicparams):
1812
    """Fill a given nicparams dict with cluster defaults.
1813

1814
    @type nicparams: dict
1815
    @param nicparams: the dict to fill
1816
    @rtype: dict
1817
    @return: a copy of the passed in nicparams with missing keys filled
1818
        from the cluster defaults
1819

1820
    """
1821
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1822

    
1823
  def SimpleFillOS(self, os_name, os_params):
1824
    """Fill an instance's osparams dict with cluster defaults.
1825

1826
    @type os_name: string
1827
    @param os_name: the OS name to use
1828
    @type os_params: dict
1829
    @param os_params: the dict to fill with default values
1830
    @rtype: dict
1831
    @return: a copy of the instance's osparams with missing keys filled from
1832
        the cluster defaults
1833

1834
    """
1835
    name_only = os_name.split("+", 1)[0]
1836
    # base OS
1837
    result = self.osparams.get(name_only, {})
1838
    # OS with variant
1839
    result = FillDict(result, self.osparams.get(os_name, {}))
1840
    # specified params
1841
    return FillDict(result, os_params)
1842

    
1843
  @staticmethod
1844
  def SimpleFillHvState(hv_state):
1845
    """Fill an hv_state sub dict with cluster defaults.
1846

1847
    """
1848
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1849

    
1850
  @staticmethod
1851
  def SimpleFillDiskState(disk_state):
1852
    """Fill an disk_state sub dict with cluster defaults.
1853

1854
    """
1855
    return FillDict(constants.DS_DEFAULTS, disk_state)
1856

    
1857
  def FillND(self, node, nodegroup):
1858
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1859

1860
    @type node: L{objects.Node}
1861
    @param node: A Node object to fill
1862
    @type nodegroup: L{objects.NodeGroup}
1863
    @param nodegroup: A Node object to fill
1864
    @return a copy of the node's ndparams with defaults filled
1865

1866
    """
1867
    return self.SimpleFillND(nodegroup.FillND(node))
1868

    
1869
  def SimpleFillND(self, ndparams):
1870
    """Fill a given ndparams dict with defaults.
1871

1872
    @type ndparams: dict
1873
    @param ndparams: the dict to fill
1874
    @rtype: dict
1875
    @return: a copy of the passed in ndparams with missing keys filled
1876
        from the cluster defaults
1877

1878
    """
1879
    return FillDict(self.ndparams, ndparams)
1880

    
1881
  def SimpleFillIPolicy(self, ipolicy):
1882
    """ Fill instance policy dict with defaults.
1883

1884
    @type ipolicy: dict
1885
    @param ipolicy: the dict to fill
1886
    @rtype: dict
1887
    @return: a copy of passed ipolicy with missing keys filled from
1888
      the cluster defaults
1889

1890
    """
1891
    return FillIPolicy(self.ipolicy, ipolicy)
1892

    
1893

    
1894
class BlockDevStatus(ConfigObject):
1895
  """Config object representing the status of a block device."""
1896
  __slots__ = [
1897
    "dev_path",
1898
    "major",
1899
    "minor",
1900
    "sync_percent",
1901
    "estimated_time",
1902
    "is_degraded",
1903
    "ldisk_status",
1904
    ]
1905

    
1906

    
1907
class ImportExportStatus(ConfigObject):
1908
  """Config object representing the status of an import or export."""
1909
  __slots__ = [
1910
    "recent_output",
1911
    "listen_port",
1912
    "connected",
1913
    "progress_mbytes",
1914
    "progress_throughput",
1915
    "progress_eta",
1916
    "progress_percent",
1917
    "exit_status",
1918
    "error_message",
1919
    ] + _TIMESTAMPS
1920

    
1921

    
1922
class ImportExportOptions(ConfigObject):
1923
  """Options for import/export daemon
1924

1925
  @ivar key_name: X509 key name (None for cluster certificate)
1926
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1927
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1928
  @ivar magic: Used to ensure the connection goes to the right disk
1929
  @ivar ipv6: Whether to use IPv6
1930
  @ivar connect_timeout: Number of seconds for establishing connection
1931

1932
  """
1933
  __slots__ = [
1934
    "key_name",
1935
    "ca_pem",
1936
    "compress",
1937
    "magic",
1938
    "ipv6",
1939
    "connect_timeout",
1940
    ]
1941

    
1942

    
1943
class ConfdRequest(ConfigObject):
1944
  """Object holding a confd request.
1945

1946
  @ivar protocol: confd protocol version
1947
  @ivar type: confd query type
1948
  @ivar query: query request
1949
  @ivar rsalt: requested reply salt
1950

1951
  """
1952
  __slots__ = [
1953
    "protocol",
1954
    "type",
1955
    "query",
1956
    "rsalt",
1957
    ]
1958

    
1959

    
1960
class ConfdReply(ConfigObject):
1961
  """Object holding a confd reply.
1962

1963
  @ivar protocol: confd protocol version
1964
  @ivar status: reply status code (ok, error)
1965
  @ivar answer: confd query reply
1966
  @ivar serial: configuration serial number
1967

1968
  """
1969
  __slots__ = [
1970
    "protocol",
1971
    "status",
1972
    "answer",
1973
    "serial",
1974
    ]
1975

    
1976

    
1977
class QueryFieldDefinition(ConfigObject):
1978
  """Object holding a query field definition.
1979

1980
  @ivar name: Field name
1981
  @ivar title: Human-readable title
1982
  @ivar kind: Field type
1983
  @ivar doc: Human-readable description
1984

1985
  """
1986
  __slots__ = [
1987
    "name",
1988
    "title",
1989
    "kind",
1990
    "doc",
1991
    ]
1992

    
1993

    
1994
class _QueryResponseBase(ConfigObject):
1995
  __slots__ = [
1996
    "fields",
1997
    ]
1998

    
1999
  def ToDict(self):
2000
    """Custom function for serializing.
2001

2002
    """
2003
    mydict = super(_QueryResponseBase, self).ToDict()
2004
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2005
    return mydict
2006

    
2007
  @classmethod
2008
  def FromDict(cls, val):
2009
    """Custom function for de-serializing.
2010

2011
    """
2012
    obj = super(_QueryResponseBase, cls).FromDict(val)
2013
    obj.fields = \
2014
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2015
    return obj
2016

    
2017

    
2018
class QueryResponse(_QueryResponseBase):
2019
  """Object holding the response to a query.
2020

2021
  @ivar fields: List of L{QueryFieldDefinition} objects
2022
  @ivar data: Requested data
2023

2024
  """
2025
  __slots__ = [
2026
    "data",
2027
    ]
2028

    
2029

    
2030
class QueryFieldsRequest(ConfigObject):
2031
  """Object holding a request for querying available fields.
2032

2033
  """
2034
  __slots__ = [
2035
    "what",
2036
    "fields",
2037
    ]
2038

    
2039

    
2040
class QueryFieldsResponse(_QueryResponseBase):
2041
  """Object holding the response to a query for fields.
2042

2043
  @ivar fields: List of L{QueryFieldDefinition} objects
2044

2045
  """
2046
  __slots__ = []
2047

    
2048

    
2049
class MigrationStatus(ConfigObject):
2050
  """Object holding the status of a migration.
2051

2052
  """
2053
  __slots__ = [
2054
    "status",
2055
    "transferred_ram",
2056
    "total_ram",
2057
    ]
2058

    
2059

    
2060
class InstanceConsole(ConfigObject):
2061
  """Object describing how to access the console of an instance.
2062

2063
  """
2064
  __slots__ = [
2065
    "instance",
2066
    "kind",
2067
    "message",
2068
    "host",
2069
    "port",
2070
    "user",
2071
    "command",
2072
    "display",
2073
    ]
2074

    
2075
  def Validate(self):
2076
    """Validates contents of this object.
2077

2078
    """
2079
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2080
    assert self.instance, "Missing instance name"
2081
    assert self.message or self.kind in [constants.CONS_SSH,
2082
                                         constants.CONS_SPICE,
2083
                                         constants.CONS_VNC]
2084
    assert self.host or self.kind == constants.CONS_MESSAGE
2085
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2086
                                      constants.CONS_SSH]
2087
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2088
                                      constants.CONS_SPICE,
2089
                                      constants.CONS_VNC]
2090
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2091
                                         constants.CONS_SPICE,
2092
                                         constants.CONS_VNC]
2093
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2094
                                         constants.CONS_SPICE,
2095
                                         constants.CONS_SSH]
2096
    return True
2097

    
2098

    
2099
class Network(TaggableObject):
2100
  """Object representing a network definition for ganeti.
2101

2102
  """
2103
  __slots__ = [
2104
    "name",
2105
    "serial_no",
2106
    "mac_prefix",
2107
    "network",
2108
    "network6",
2109
    "gateway",
2110
    "gateway6",
2111
    "reservations",
2112
    "ext_reservations",
2113
    ] + _TIMESTAMPS + _UUID
2114

    
2115
  def HooksDict(self, prefix=""):
2116
    """Export a dictionary used by hooks with a network's information.
2117

2118
    @type prefix: String
2119
    @param prefix: Prefix to prepend to the dict entries
2120

2121
    """
2122
    result = {
2123
      "%sNETWORK_NAME" % prefix: self.name,
2124
      "%sNETWORK_UUID" % prefix: self.uuid,
2125
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2126
    }
2127
    if self.network:
2128
      result["%sNETWORK_SUBNET" % prefix] = self.network
2129
    if self.gateway:
2130
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2131
    if self.network6:
2132
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2133
    if self.gateway6:
2134
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2135
    if self.mac_prefix:
2136
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2137

    
2138
    return result
2139

    
2140
  @classmethod
2141
  def FromDict(cls, val):
2142
    """Custom function for networks.
2143

2144
    Remove deprecated network_type and family.
2145

2146
    """
2147
    if "network_type" in val:
2148
      del val["network_type"]
2149
    if "family" in val:
2150
      del val["family"]
2151
    obj = super(Network, cls).FromDict(val)
2152
    return obj
2153

    
2154

    
2155
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2156
  """Simple wrapper over ConfigParse that allows serialization.
2157

2158
  This class is basically ConfigParser.SafeConfigParser with two
2159
  additional methods that allow it to serialize/unserialize to/from a
2160
  buffer.
2161

2162
  """
2163
  def Dumps(self):
2164
    """Dump this instance and return the string representation."""
2165
    buf = StringIO()
2166
    self.write(buf)
2167
    return buf.getvalue()
2168

    
2169
  @classmethod
2170
  def Loads(cls, data):
2171
    """Load data from a string."""
2172
    buf = StringIO(data)
2173
    cfp = cls()
2174
    cfp.readfp(buf)
2175
    return cfp
2176

    
2177

    
2178
class LvmPvInfo(ConfigObject):
2179
  """Information about an LVM physical volume (PV).
2180

2181
  @type name: string
2182
  @ivar name: name of the PV
2183
  @type vg_name: string
2184
  @ivar vg_name: name of the volume group containing the PV
2185
  @type size: float
2186
  @ivar size: size of the PV in MiB
2187
  @type free: float
2188
  @ivar free: free space in the PV, in MiB
2189
  @type attributes: string
2190
  @ivar attributes: PV attributes
2191
  @type lv_list: list of strings
2192
  @ivar lv_list: names of the LVs hosted on the PV
2193
  """
2194
  __slots__ = [
2195
    "name",
2196
    "vg_name",
2197
    "size",
2198
    "free",
2199
    "attributes",
2200
    "lv_list"
2201
    ]
2202

    
2203
  def IsEmpty(self):
2204
    """Is this PV empty?
2205

2206
    """
2207
    return self.size <= (self.free + 1)
2208

    
2209
  def IsAllocatable(self):
2210
    """Is this PV allocatable?
2211

2212
    """
2213
    return ("a" in self.attributes)