Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 06c2fb4a

History | View | Annotate | Download (64.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def FillIPolicy(default_ipolicy, custom_ipolicy):
86
  """Fills an instance policy with defaults.
87

88
  """
89
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90
  ret_dict = copy.deepcopy(custom_ipolicy)
91
  for key in default_ipolicy:
92
    if key not in ret_dict:
93
      ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94
    elif key == constants.ISPECS_STD:
95
      ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96
  return ret_dict
97

    
98

    
99
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100
  """Fills the disk parameter defaults.
101

102
  @see: L{FillDict} for parameters and return value
103

104
  """
105
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106

    
107
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108
                             skip_keys=skip_keys))
109
              for dt in constants.DISK_TEMPLATES)
110

    
111

    
112
def UpgradeGroupedParams(target, defaults):
113
  """Update all groups for the target parameter.
114

115
  @type target: dict of dicts
116
  @param target: {group: {parameter: value}}
117
  @type defaults: dict
118
  @param defaults: default parameter values
119

120
  """
121
  if target is None:
122
    target = {constants.PP_DEFAULT: defaults}
123
  else:
124
    for group in target:
125
      target[group] = FillDict(defaults, target[group])
126
  return target
127

    
128

    
129
def UpgradeBeParams(target):
130
  """Update the be parameters dict to the new format.
131

132
  @type target: dict
133
  @param target: "be" parameters dict
134

135
  """
136
  if constants.BE_MEMORY in target:
137
    memory = target[constants.BE_MEMORY]
138
    target[constants.BE_MAXMEM] = memory
139
    target[constants.BE_MINMEM] = memory
140
    del target[constants.BE_MEMORY]
141

    
142

    
143
def UpgradeDiskParams(diskparams):
144
  """Upgrade the disk parameters.
145

146
  @type diskparams: dict
147
  @param diskparams: disk parameters to upgrade
148
  @rtype: dict
149
  @return: the upgraded disk parameters dict
150

151
  """
152
  if not diskparams:
153
    result = {}
154
  else:
155
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156

    
157
  return result
158

    
159

    
160
def UpgradeNDParams(ndparams):
161
  """Upgrade ndparams structure.
162

163
  @type ndparams: dict
164
  @param ndparams: disk parameters to upgrade
165
  @rtype: dict
166
  @return: the upgraded node parameters dict
167

168
  """
169
  if ndparams is None:
170
    ndparams = {}
171

    
172
  if (constants.ND_OOB_PROGRAM in ndparams and
173
      ndparams[constants.ND_OOB_PROGRAM] is None):
174
    # will be reset by the line below
175
    del ndparams[constants.ND_OOB_PROGRAM]
176
  return FillDict(constants.NDC_DEFAULTS, ndparams)
177

    
178

    
179
def MakeEmptyIPolicy():
180
  """Create empty IPolicy dictionary.
181

182
  """
183
  return {}
184

    
185

    
186
class ConfigObject(outils.ValidatedSlots):
187
  """A generic config object.
188

189
  It has the following properties:
190

191
    - provides somewhat safe recursive unpickling and pickling for its classes
192
    - unset attributes which are defined in slots are always returned
193
      as None instead of raising an error
194

195
  Classes derived from this must always declare __slots__ (we use many
196
  config objects and the memory reduction is useful)
197

198
  """
199
  __slots__ = []
200

    
201
  def __getattr__(self, name):
202
    if name not in self.GetAllSlots():
203
      raise AttributeError("Invalid object attribute %s.%s" %
204
                           (type(self).__name__, name))
205
    return None
206

    
207
  def __setstate__(self, state):
208
    slots = self.GetAllSlots()
209
    for name in state:
210
      if name in slots:
211
        setattr(self, name, state[name])
212

    
213
  def Validate(self):
214
    """Validates the slots.
215

216
    """
217

    
218
  def ToDict(self):
219
    """Convert to a dict holding only standard python types.
220

221
    The generic routine just dumps all of this object's attributes in
222
    a dict. It does not work if the class has children who are
223
    ConfigObjects themselves (e.g. the nics list in an Instance), in
224
    which case the object should subclass the function in order to
225
    make sure all objects returned are only standard python types.
226

227
    """
228
    result = {}
229
    for name in self.GetAllSlots():
230
      value = getattr(self, name, None)
231
      if value is not None:
232
        result[name] = value
233
    return result
234

    
235
  __getstate__ = ToDict
236

    
237
  @classmethod
238
  def FromDict(cls, val):
239
    """Create an object from a dictionary.
240

241
    This generic routine takes a dict, instantiates a new instance of
242
    the given class, and sets attributes based on the dict content.
243

244
    As for `ToDict`, this does not work if the class has children
245
    who are ConfigObjects themselves (e.g. the nics list in an
246
    Instance), in which case the object should subclass the function
247
    and alter the objects.
248

249
    """
250
    if not isinstance(val, dict):
251
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
252
                                      " expected dict, got %s" % type(val))
253
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
254
    obj = cls(**val_str) # pylint: disable=W0142
255
    return obj
256

    
257
  def Copy(self):
258
    """Makes a deep copy of the current object and its children.
259

260
    """
261
    dict_form = self.ToDict()
262
    clone_obj = self.__class__.FromDict(dict_form)
263
    return clone_obj
264

    
265
  def __repr__(self):
266
    """Implement __repr__ for ConfigObjects."""
267
    return repr(self.ToDict())
268

    
269
  def UpgradeConfig(self):
270
    """Fill defaults for missing configuration values.
271

272
    This method will be called at configuration load time, and its
273
    implementation will be object dependent.
274

275
    """
276
    pass
277

    
278

    
279
class TaggableObject(ConfigObject):
280
  """An generic class supporting tags.
281

282
  """
283
  __slots__ = ["tags"]
284
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285

    
286
  @classmethod
287
  def ValidateTag(cls, tag):
288
    """Check if a tag is valid.
289

290
    If the tag is invalid, an errors.TagError will be raised. The
291
    function has no return value.
292

293
    """
294
    if not isinstance(tag, basestring):
295
      raise errors.TagError("Invalid tag type (not a string)")
296
    if len(tag) > constants.MAX_TAG_LEN:
297
      raise errors.TagError("Tag too long (>%d characters)" %
298
                            constants.MAX_TAG_LEN)
299
    if not tag:
300
      raise errors.TagError("Tags cannot be empty")
301
    if not cls.VALID_TAG_RE.match(tag):
302
      raise errors.TagError("Tag contains invalid characters")
303

    
304
  def GetTags(self):
305
    """Return the tags list.
306

307
    """
308
    tags = getattr(self, "tags", None)
309
    if tags is None:
310
      tags = self.tags = set()
311
    return tags
312

    
313
  def AddTag(self, tag):
314
    """Add a new tag.
315

316
    """
317
    self.ValidateTag(tag)
318
    tags = self.GetTags()
319
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
320
      raise errors.TagError("Too many tags")
321
    self.GetTags().add(tag)
322

    
323
  def RemoveTag(self, tag):
324
    """Remove a tag.
325

326
    """
327
    self.ValidateTag(tag)
328
    tags = self.GetTags()
329
    try:
330
      tags.remove(tag)
331
    except KeyError:
332
      raise errors.TagError("Tag not found")
333

    
334
  def ToDict(self):
335
    """Taggable-object-specific conversion to standard python types.
336

337
    This replaces the tags set with a list.
338

339
    """
340
    bo = super(TaggableObject, self).ToDict()
341

    
342
    tags = bo.get("tags", None)
343
    if isinstance(tags, set):
344
      bo["tags"] = list(tags)
345
    return bo
346

    
347
  @classmethod
348
  def FromDict(cls, val):
349
    """Custom function for instances.
350

351
    """
352
    obj = super(TaggableObject, cls).FromDict(val)
353
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
354
      obj.tags = set(obj.tags)
355
    return obj
356

    
357

    
358
class MasterNetworkParameters(ConfigObject):
359
  """Network configuration parameters for the master
360

361
  @ivar name: master name
362
  @ivar ip: master IP
363
  @ivar netmask: master netmask
364
  @ivar netdev: master network device
365
  @ivar ip_family: master IP family
366

367
  """
368
  __slots__ = [
369
    "name",
370
    "ip",
371
    "netmask",
372
    "netdev",
373
    "ip_family",
374
    ]
375

    
376

    
377
class ConfigData(ConfigObject):
378
  """Top-level config object."""
379
  __slots__ = [
380
    "version",
381
    "cluster",
382
    "nodes",
383
    "nodegroups",
384
    "instances",
385
    "networks",
386
    "serial_no",
387
    ] + _TIMESTAMPS
388

    
389
  def ToDict(self):
390
    """Custom function for top-level config data.
391

392
    This just replaces the list of instances, nodes and the cluster
393
    with standard python types.
394

395
    """
396
    mydict = super(ConfigData, self).ToDict()
397
    mydict["cluster"] = mydict["cluster"].ToDict()
398
    for key in "nodes", "instances", "nodegroups", "networks":
399
      mydict[key] = outils.ContainerToDicts(mydict[key])
400

    
401
    return mydict
402

    
403
  @classmethod
404
  def FromDict(cls, val):
405
    """Custom function for top-level config data
406

407
    """
408
    obj = super(ConfigData, cls).FromDict(val)
409
    obj.cluster = Cluster.FromDict(obj.cluster)
410
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
411
    obj.instances = \
412
      outils.ContainerFromDicts(obj.instances, dict, Instance)
413
    obj.nodegroups = \
414
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
415
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
416
    return obj
417

    
418
  def HasAnyDiskOfType(self, dev_type):
419
    """Check if in there is at disk of the given type in the configuration.
420

421
    @type dev_type: L{constants.LDS_BLOCK}
422
    @param dev_type: the type to look for
423
    @rtype: boolean
424
    @return: boolean indicating if a disk of the given type was found or not
425

426
    """
427
    for instance in self.instances.values():
428
      for disk in instance.disks:
429
        if disk.IsBasedOnDiskType(dev_type):
430
          return True
431
    return False
432

    
433
  def UpgradeConfig(self):
434
    """Fill defaults for missing configuration values.
435

436
    """
437
    self.cluster.UpgradeConfig()
438
    for node in self.nodes.values():
439
      node.UpgradeConfig()
440
    for instance in self.instances.values():
441
      instance.UpgradeConfig()
442
    if self.nodegroups is None:
443
      self.nodegroups = {}
444
    for nodegroup in self.nodegroups.values():
445
      nodegroup.UpgradeConfig()
446
    if self.cluster.drbd_usermode_helper is None:
447
      # To decide if we set an helper let's check if at least one instance has
448
      # a DRBD disk. This does not cover all the possible scenarios but it
449
      # gives a good approximation.
450
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
451
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
452
    if self.networks is None:
453
      self.networks = {}
454
    for network in self.networks.values():
455
      network.UpgradeConfig()
456
    self._UpgradeEnabledDiskTemplates()
457

    
458
  def _UpgradeEnabledDiskTemplates(self):
459
    """Upgrade the cluster's enabled disk templates by inspecting the currently
460
       enabled and/or used disk templates.
461

462
    """
463
    # enabled_disk_templates in the cluster config were introduced in 2.8.
464
    # Remove this code once upgrading from earlier versions is deprecated.
465
    if not self.cluster.enabled_disk_templates:
466
      template_set = \
467
        set([inst.disk_template for inst in self.instances.values()])
468
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
469
      if self.cluster.volume_group_name:
470
        template_set.add(constants.DT_DRBD8)
471
        template_set.add(constants.DT_PLAIN)
472
      # FIXME: Adapt this when dis/enabling at configure time is removed.
473
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
474
      # might currently not be used.
475
      if constants.ENABLE_FILE_STORAGE:
476
        template_set.add(constants.DT_FILE)
477
      if constants.ENABLE_SHARED_FILE_STORAGE:
478
        template_set.add(constants.DT_SHARED_FILE)
479
      # Set enabled_disk_templates to the inferred disk templates. Order them
480
      # according to a preference list that is based on Ganeti's history of
481
      # supported disk templates.
482
      self.cluster.enabled_disk_templates = []
483
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
484
        if preferred_template in template_set:
485
          self.cluster.enabled_disk_templates.append(preferred_template)
486
          template_set.remove(preferred_template)
487
      self.cluster.enabled_disk_templates.extend(list(template_set))
488

    
489

    
490
class NIC(ConfigObject):
491
  """Config object representing a network card."""
492
  __slots__ = ["name", "mac", "ip", "network",
493
               "nicparams", "netinfo", "pci"] + _UUID
494

    
495
  @classmethod
496
  def CheckParameterSyntax(cls, nicparams):
497
    """Check the given parameters for validity.
498

499
    @type nicparams:  dict
500
    @param nicparams: dictionary with parameter names/value
501
    @raise errors.ConfigurationError: when a parameter is not valid
502

503
    """
504
    mode = nicparams[constants.NIC_MODE]
505
    if (mode not in constants.NIC_VALID_MODES and
506
        mode != constants.VALUE_AUTO):
507
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
508

    
509
    if (mode == constants.NIC_MODE_BRIDGED and
510
        not nicparams[constants.NIC_LINK]):
511
      raise errors.ConfigurationError("Missing bridged NIC link")
512

    
513
  @classmethod
514
  def FromDict(cls, val):
515
    """Custom function for NICs.
516

517
    Remove deprecated idx. Add dummy UUID if not found.
518
    Needed for old runtime files.
519

520
    """
521
    if "idx" in val:
522
      del val["idx"]
523
    obj = super(NIC, cls).FromDict(val)
524
    return obj
525

    
526

    
527
class Disk(ConfigObject):
528
  """Config object representing a block device."""
529
  __slots__ = ["name", "dev_type", "logical_id", "physical_id",
530
               "children", "iv_name", "size", "mode", "params", "pci"] + _UUID
531

    
532
  def CreateOnSecondary(self):
533
    """Test if this device needs to be created on a secondary node."""
534
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
535

    
536
  def AssembleOnSecondary(self):
537
    """Test if this device needs to be assembled on a secondary node."""
538
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
539

    
540
  def OpenOnSecondary(self):
541
    """Test if this device needs to be opened on a secondary node."""
542
    return self.dev_type in (constants.LD_LV,)
543

    
544
  def StaticDevPath(self):
545
    """Return the device path if this device type has a static one.
546

547
    Some devices (LVM for example) live always at the same /dev/ path,
548
    irrespective of their status. For such devices, we return this
549
    path, for others we return None.
550

551
    @warning: The path returned is not a normalized pathname; callers
552
        should check that it is a valid path.
553

554
    """
555
    if self.dev_type == constants.LD_LV:
556
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
557
    elif self.dev_type == constants.LD_BLOCKDEV:
558
      return self.logical_id[1]
559
    elif self.dev_type == constants.LD_RBD:
560
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
561
    return None
562

    
563
  def ChildrenNeeded(self):
564
    """Compute the needed number of children for activation.
565

566
    This method will return either -1 (all children) or a positive
567
    number denoting the minimum number of children needed for
568
    activation (only mirrored devices will usually return >=0).
569

570
    Currently, only DRBD8 supports diskless activation (therefore we
571
    return 0), for all other we keep the previous semantics and return
572
    -1.
573

574
    """
575
    if self.dev_type == constants.LD_DRBD8:
576
      return 0
577
    return -1
578

    
579
  def IsBasedOnDiskType(self, dev_type):
580
    """Check if the disk or its children are based on the given type.
581

582
    @type dev_type: L{constants.LDS_BLOCK}
583
    @param dev_type: the type to look for
584
    @rtype: boolean
585
    @return: boolean indicating if a device of the given type was found or not
586

587
    """
588
    if self.children:
589
      for child in self.children:
590
        if child.IsBasedOnDiskType(dev_type):
591
          return True
592
    return self.dev_type == dev_type
593

    
594
  def GetNodes(self, node):
595
    """This function returns the nodes this device lives on.
596

597
    Given the node on which the parent of the device lives on (or, in
598
    case of a top-level device, the primary node of the devices'
599
    instance), this function will return a list of nodes on which this
600
    devices needs to (or can) be assembled.
601

602
    """
603
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
604
                         constants.LD_BLOCKDEV, constants.LD_RBD,
605
                         constants.LD_EXT]:
606
      result = [node]
607
    elif self.dev_type in constants.LDS_DRBD:
608
      result = [self.logical_id[0], self.logical_id[1]]
609
      if node not in result:
610
        raise errors.ConfigurationError("DRBD device passed unknown node")
611
    else:
612
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
613
    return result
614

    
615
  def ComputeNodeTree(self, parent_node):
616
    """Compute the node/disk tree for this disk and its children.
617

618
    This method, given the node on which the parent disk lives, will
619
    return the list of all (node, disk) pairs which describe the disk
620
    tree in the most compact way. For example, a drbd/lvm stack
621
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
622
    which represents all the top-level devices on the nodes.
623

624
    """
625
    my_nodes = self.GetNodes(parent_node)
626
    result = [(node, self) for node in my_nodes]
627
    if not self.children:
628
      # leaf device
629
      return result
630
    for node in my_nodes:
631
      for child in self.children:
632
        child_result = child.ComputeNodeTree(node)
633
        if len(child_result) == 1:
634
          # child (and all its descendants) is simple, doesn't split
635
          # over multiple hosts, so we don't need to describe it, our
636
          # own entry for this node describes it completely
637
          continue
638
        else:
639
          # check if child nodes differ from my nodes; note that
640
          # subdisk can differ from the child itself, and be instead
641
          # one of its descendants
642
          for subnode, subdisk in child_result:
643
            if subnode not in my_nodes:
644
              result.append((subnode, subdisk))
645
            # otherwise child is under our own node, so we ignore this
646
            # entry (but probably the other results in the list will
647
            # be different)
648
    return result
649

    
650
  def ComputeGrowth(self, amount):
651
    """Compute the per-VG growth requirements.
652

653
    This only works for VG-based disks.
654

655
    @type amount: integer
656
    @param amount: the desired increase in (user-visible) disk space
657
    @rtype: dict
658
    @return: a dictionary of volume-groups and the required size
659

660
    """
661
    if self.dev_type == constants.LD_LV:
662
      return {self.logical_id[0]: amount}
663
    elif self.dev_type == constants.LD_DRBD8:
664
      if self.children:
665
        return self.children[0].ComputeGrowth(amount)
666
      else:
667
        return {}
668
    else:
669
      # Other disk types do not require VG space
670
      return {}
671

    
672
  def RecordGrow(self, amount):
673
    """Update the size of this disk after growth.
674

675
    This method recurses over the disks's children and updates their
676
    size correspondigly. The method needs to be kept in sync with the
677
    actual algorithms from bdev.
678

679
    """
680
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
681
                         constants.LD_RBD, constants.LD_EXT):
682
      self.size += amount
683
    elif self.dev_type == constants.LD_DRBD8:
684
      if self.children:
685
        self.children[0].RecordGrow(amount)
686
      self.size += amount
687
    else:
688
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
689
                                   " disk type %s" % self.dev_type)
690

    
691
  def Update(self, size=None, mode=None):
692
    """Apply changes to size and mode.
693

694
    """
695
    if self.dev_type == constants.LD_DRBD8:
696
      if self.children:
697
        self.children[0].Update(size=size, mode=mode)
698
    else:
699
      assert not self.children
700

    
701
    if size is not None:
702
      self.size = size
703
    if mode is not None:
704
      self.mode = mode
705

    
706
  def UnsetSize(self):
707
    """Sets recursively the size to zero for the disk and its children.
708

709
    """
710
    if self.children:
711
      for child in self.children:
712
        child.UnsetSize()
713
    self.size = 0
714

    
715
  def SetPhysicalID(self, target_node, nodes_ip):
716
    """Convert the logical ID to the physical ID.
717

718
    This is used only for drbd, which needs ip/port configuration.
719

720
    The routine descends down and updates its children also, because
721
    this helps when the only the top device is passed to the remote
722
    node.
723

724
    Arguments:
725
      - target_node: the node we wish to configure for
726
      - nodes_ip: a mapping of node name to ip
727

728
    The target_node must exist in in nodes_ip, and must be one of the
729
    nodes in the logical ID for each of the DRBD devices encountered
730
    in the disk tree.
731

732
    """
733
    if self.children:
734
      for child in self.children:
735
        child.SetPhysicalID(target_node, nodes_ip)
736

    
737
    if self.logical_id is None and self.physical_id is not None:
738
      return
739
    if self.dev_type in constants.LDS_DRBD:
740
      pnode, snode, port, pminor, sminor, secret = self.logical_id
741
      if target_node not in (pnode, snode):
742
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
743
                                        target_node)
744
      pnode_ip = nodes_ip.get(pnode, None)
745
      snode_ip = nodes_ip.get(snode, None)
746
      if pnode_ip is None or snode_ip is None:
747
        raise errors.ConfigurationError("Can't find primary or secondary node"
748
                                        " for %s" % str(self))
749
      p_data = (pnode_ip, port)
750
      s_data = (snode_ip, port)
751
      if pnode == target_node:
752
        self.physical_id = p_data + s_data + (pminor, secret)
753
      else: # it must be secondary, we tested above
754
        self.physical_id = s_data + p_data + (sminor, secret)
755
    else:
756
      self.physical_id = self.logical_id
757
    return
758

    
759
  def ToDict(self):
760
    """Disk-specific conversion to standard python types.
761

762
    This replaces the children lists of objects with lists of
763
    standard python types.
764

765
    """
766
    bo = super(Disk, self).ToDict()
767

    
768
    for attr in ("children",):
769
      alist = bo.get(attr, None)
770
      if alist:
771
        bo[attr] = outils.ContainerToDicts(alist)
772
    return bo
773

    
774
  @classmethod
775
  def FromDict(cls, val):
776
    """Custom function for Disks
777

778
    """
779
    if "idx" in val:
780
      del val["idx"]
781
    obj = super(Disk, cls).FromDict(val)
782
    if obj.children:
783
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
784
    if obj.logical_id and isinstance(obj.logical_id, list):
785
      obj.logical_id = tuple(obj.logical_id)
786
    if obj.physical_id and isinstance(obj.physical_id, list):
787
      obj.physical_id = tuple(obj.physical_id)
788
    if obj.dev_type in constants.LDS_DRBD:
789
      # we need a tuple of length six here
790
      if len(obj.logical_id) < 6:
791
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
792
    return obj
793

    
794
  def __str__(self):
795
    """Custom str() formatter for disks.
796

797
    """
798
    if self.dev_type == constants.LD_LV:
799
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
800
    elif self.dev_type in constants.LDS_DRBD:
801
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
802
      val = "<DRBD8("
803
      if self.physical_id is None:
804
        phy = "unconfigured"
805
      else:
806
        phy = ("configured as %s:%s %s:%s" %
807
               (self.physical_id[0], self.physical_id[1],
808
                self.physical_id[2], self.physical_id[3]))
809

    
810
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
811
              (node_a, minor_a, node_b, minor_b, port, phy))
812
      if self.children and self.children.count(None) == 0:
813
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
814
      else:
815
        val += "no local storage"
816
    else:
817
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
818
             (self.dev_type, self.logical_id, self.physical_id, self.children))
819
    if self.iv_name is None:
820
      val += ", not visible"
821
    else:
822
      val += ", visible as /dev/%s" % self.iv_name
823
    if isinstance(self.size, int):
824
      val += ", size=%dm)>" % self.size
825
    else:
826
      val += ", size='%s')>" % (self.size,)
827
    return val
828

    
829
  def Verify(self):
830
    """Checks that this disk is correctly configured.
831

832
    """
833
    all_errors = []
834
    if self.mode not in constants.DISK_ACCESS_SET:
835
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
836
    return all_errors
837

    
838
  def UpgradeConfig(self):
839
    """Fill defaults for missing configuration values.
840

841
    """
842
    if self.children:
843
      for child in self.children:
844
        child.UpgradeConfig()
845

    
846
    # FIXME: Make this configurable in Ganeti 2.7
847
    # Params should be an empty dict that gets filled any time needed
848
    # In case of ext template we allow arbitrary params that should not
849
    # be overrided during a config reload/upgrade.
850
    if not self.params or not isinstance(self.params, dict):
851
      self.params = {}
852

    
853
    # add here config upgrade for this disk
854

    
855
    # If the file driver is empty, fill it up with the default value
856
    if self.dev_type == constants.LD_FILE and self.physical_id[0] is None:
857
      self.physical_id[0] = constants.FD_DEFAULT
858

    
859
  @staticmethod
860
  def ComputeLDParams(disk_template, disk_params):
861
    """Computes Logical Disk parameters from Disk Template parameters.
862

863
    @type disk_template: string
864
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
865
    @type disk_params: dict
866
    @param disk_params: disk template parameters;
867
                        dict(template_name -> parameters
868
    @rtype: list(dict)
869
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
870
      contains the LD parameters of the node. The tree is flattened in-order.
871

872
    """
873
    if disk_template not in constants.DISK_TEMPLATES:
874
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
875

    
876
    assert disk_template in disk_params
877

    
878
    result = list()
879
    dt_params = disk_params[disk_template]
880
    if disk_template == constants.DT_DRBD8:
881
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
882
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
883
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
884
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
885
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
886
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
887
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
888
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
889
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
890
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
891
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
892
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
893
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
894
        }))
895

    
896
      # data LV
897
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
898
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
899
        }))
900

    
901
      # metadata LV
902
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
903
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
904
        }))
905

    
906
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
907
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
908

    
909
    elif disk_template == constants.DT_PLAIN:
910
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
911
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
912
        }))
913

    
914
    elif disk_template == constants.DT_BLOCK:
915
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
916

    
917
    elif disk_template == constants.DT_RBD:
918
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
919
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
920
        }))
921

    
922
    elif disk_template == constants.DT_EXT:
923
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
924

    
925
    return result
926

    
927

    
928
class InstancePolicy(ConfigObject):
929
  """Config object representing instance policy limits dictionary.
930

931
  Note that this object is not actually used in the config, it's just
932
  used as a placeholder for a few functions.
933

934
  """
935
  @classmethod
936
  def CheckParameterSyntax(cls, ipolicy, check_std):
937
    """ Check the instance policy for validity.
938

939
    @type ipolicy: dict
940
    @param ipolicy: dictionary with min/max/std specs and policies
941
    @type check_std: bool
942
    @param check_std: Whether to check std value or just assume compliance
943
    @raise errors.ConfigurationError: when the policy is not legal
944

945
    """
946
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
947
    if constants.IPOLICY_DTS in ipolicy:
948
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
949
    for key in constants.IPOLICY_PARAMETERS:
950
      if key in ipolicy:
951
        InstancePolicy.CheckParameter(key, ipolicy[key])
952
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
953
    if wrong_keys:
954
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
955
                                      utils.CommaJoin(wrong_keys))
956

    
957
  @classmethod
958
  def _CheckIncompleteSpec(cls, spec, keyname):
959
    missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
960
    if missing_params:
961
      msg = ("Missing instance specs parameters for %s: %s" %
962
             (keyname, utils.CommaJoin(missing_params)))
963
      raise errors.ConfigurationError(msg)
964

    
965
  @classmethod
966
  def CheckISpecSyntax(cls, ipolicy, check_std):
967
    """Check the instance policy specs for validity.
968

969
    @type ipolicy: dict
970
    @param ipolicy: dictionary with min/max/std specs
971
    @type check_std: bool
972
    @param check_std: Whether to check std value or just assume compliance
973
    @raise errors.ConfigurationError: when specs are not valid
974

975
    """
976
    if constants.ISPECS_MINMAX not in ipolicy:
977
      # Nothing to check
978
      return
979

    
980
    if check_std and constants.ISPECS_STD not in ipolicy:
981
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
982
      raise errors.ConfigurationError(msg)
983
    stdspec = ipolicy.get(constants.ISPECS_STD)
984
    if check_std:
985
      InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
986

    
987
    if not ipolicy[constants.ISPECS_MINMAX]:
988
      raise errors.ConfigurationError("Empty minmax specifications")
989
    std_is_good = False
990
    for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
991
      missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
992
      if missing:
993
        msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
994
        raise errors.ConfigurationError(msg)
995
      for (key, spec) in minmaxspecs.items():
996
        InstancePolicy._CheckIncompleteSpec(spec, key)
997

    
998
      spec_std_ok = True
999
      for param in constants.ISPECS_PARAMETERS:
1000
        par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
1001
                                                           param, check_std)
1002
        spec_std_ok = spec_std_ok and par_std_ok
1003
      std_is_good = std_is_good or spec_std_ok
1004
    if not std_is_good:
1005
      raise errors.ConfigurationError("Invalid std specifications")
1006

    
1007
  @classmethod
1008
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1009
    """Check the instance policy specs for validity on a given key.
1010

1011
    We check if the instance specs makes sense for a given key, that is
1012
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1013

1014
    @type minmaxspecs: dict
1015
    @param minmaxspecs: dictionary with min and max instance spec
1016
    @type stdspec: dict
1017
    @param stdspec: dictionary with standard instance spec
1018
    @type name: string
1019
    @param name: what are the limits for
1020
    @type check_std: bool
1021
    @param check_std: Whether to check std value or just assume compliance
1022
    @rtype: bool
1023
    @return: C{True} when specs are valid, C{False} when standard spec for the
1024
        given name is not valid
1025
    @raise errors.ConfigurationError: when min/max specs for the given name
1026
        are not valid
1027

1028
    """
1029
    minspec = minmaxspecs[constants.ISPECS_MIN]
1030
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1031
    min_v = minspec[name]
1032
    max_v = maxspec[name]
1033

    
1034
    if min_v > max_v:
1035
      err = ("Invalid specification of min/max values for %s: %s/%s" %
1036
             (name, min_v, max_v))
1037
      raise errors.ConfigurationError(err)
1038
    elif check_std:
1039
      std_v = stdspec.get(name, min_v)
1040
      return std_v >= min_v and std_v <= max_v
1041
    else:
1042
      return True
1043

    
1044
  @classmethod
1045
  def CheckDiskTemplates(cls, disk_templates):
1046
    """Checks the disk templates for validity.
1047

1048
    """
1049
    if not disk_templates:
1050
      raise errors.ConfigurationError("Instance policy must contain" +
1051
                                      " at least one disk template")
1052
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1053
    if wrong:
1054
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1055
                                      utils.CommaJoin(wrong))
1056

    
1057
  @classmethod
1058
  def CheckParameter(cls, key, value):
1059
    """Checks a parameter.
1060

1061
    Currently we expect all parameters to be float values.
1062

1063
    """
1064
    try:
1065
      float(value)
1066
    except (TypeError, ValueError), err:
1067
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1068
                                      " '%s', error: %s" % (key, value, err))
1069

    
1070

    
1071
class Instance(TaggableObject):
1072
  """Config object representing an instance."""
1073
  __slots__ = [
1074
    "name",
1075
    "primary_node",
1076
    "os",
1077
    "hypervisor",
1078
    "hvparams",
1079
    "beparams",
1080
    "osparams",
1081
    "admin_state",
1082
    "nics",
1083
    "disks",
1084
    "disk_template",
1085
    "disks_active",
1086
    "network_port",
1087
    "serial_no",
1088
    ] + _TIMESTAMPS + _UUID
1089

    
1090
  def _ComputeSecondaryNodes(self):
1091
    """Compute the list of secondary nodes.
1092

1093
    This is a simple wrapper over _ComputeAllNodes.
1094

1095
    """
1096
    all_nodes = set(self._ComputeAllNodes())
1097
    all_nodes.discard(self.primary_node)
1098
    return tuple(all_nodes)
1099

    
1100
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1101
                             "List of names of secondary nodes")
1102

    
1103
  def _ComputeAllNodes(self):
1104
    """Compute the list of all nodes.
1105

1106
    Since the data is already there (in the drbd disks), keeping it as
1107
    a separate normal attribute is redundant and if not properly
1108
    synchronised can cause problems. Thus it's better to compute it
1109
    dynamically.
1110

1111
    """
1112
    def _Helper(nodes, device):
1113
      """Recursively computes nodes given a top device."""
1114
      if device.dev_type in constants.LDS_DRBD:
1115
        nodea, nodeb = device.logical_id[:2]
1116
        nodes.add(nodea)
1117
        nodes.add(nodeb)
1118
      if device.children:
1119
        for child in device.children:
1120
          _Helper(nodes, child)
1121

    
1122
    all_nodes = set()
1123
    all_nodes.add(self.primary_node)
1124
    for device in self.disks:
1125
      _Helper(all_nodes, device)
1126
    return tuple(all_nodes)
1127

    
1128
  all_nodes = property(_ComputeAllNodes, None, None,
1129
                       "List of names of all the nodes of the instance")
1130

    
1131
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1132
    """Provide a mapping of nodes to LVs this instance owns.
1133

1134
    This function figures out what logical volumes should belong on
1135
    which nodes, recursing through a device tree.
1136

1137
    @param lvmap: optional dictionary to receive the
1138
        'node' : ['lv', ...] data.
1139

1140
    @return: None if lvmap arg is given, otherwise, a dictionary of
1141
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1142
        volumeN is of the form "vg_name/lv_name", compatible with
1143
        GetVolumeList()
1144

1145
    """
1146
    if node is None:
1147
      node = self.primary_node
1148

    
1149
    if lvmap is None:
1150
      lvmap = {
1151
        node: [],
1152
        }
1153
      ret = lvmap
1154
    else:
1155
      if not node in lvmap:
1156
        lvmap[node] = []
1157
      ret = None
1158

    
1159
    if not devs:
1160
      devs = self.disks
1161

    
1162
    for dev in devs:
1163
      if dev.dev_type == constants.LD_LV:
1164
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1165

    
1166
      elif dev.dev_type in constants.LDS_DRBD:
1167
        if dev.children:
1168
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1169
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1170

    
1171
      elif dev.children:
1172
        self.MapLVsByNode(lvmap, dev.children, node)
1173

    
1174
    return ret
1175

    
1176
  def FindDisk(self, idx):
1177
    """Find a disk given having a specified index.
1178

1179
    This is just a wrapper that does validation of the index.
1180

1181
    @type idx: int
1182
    @param idx: the disk index
1183
    @rtype: L{Disk}
1184
    @return: the corresponding disk
1185
    @raise errors.OpPrereqError: when the given index is not valid
1186

1187
    """
1188
    try:
1189
      idx = int(idx)
1190
      return self.disks[idx]
1191
    except (TypeError, ValueError), err:
1192
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1193
                                 errors.ECODE_INVAL)
1194
    except IndexError:
1195
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1196
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1197
                                 errors.ECODE_INVAL)
1198

    
1199
  def ToDict(self):
1200
    """Instance-specific conversion to standard python types.
1201

1202
    This replaces the children lists of objects with lists of standard
1203
    python types.
1204

1205
    """
1206
    bo = super(Instance, self).ToDict()
1207

    
1208
    for attr in "nics", "disks":
1209
      alist = bo.get(attr, None)
1210
      if alist:
1211
        nlist = outils.ContainerToDicts(alist)
1212
      else:
1213
        nlist = []
1214
      bo[attr] = nlist
1215
    return bo
1216

    
1217
  @classmethod
1218
  def FromDict(cls, val):
1219
    """Custom function for instances.
1220

1221
    """
1222
    if "admin_state" not in val:
1223
      if val.get("admin_up", False):
1224
        val["admin_state"] = constants.ADMINST_UP
1225
      else:
1226
        val["admin_state"] = constants.ADMINST_DOWN
1227
    if "admin_up" in val:
1228
      del val["admin_up"]
1229
    obj = super(Instance, cls).FromDict(val)
1230
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1231
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1232
    return obj
1233

    
1234
  def UpgradeConfig(self):
1235
    """Fill defaults for missing configuration values.
1236

1237
    """
1238
    for nic in self.nics:
1239
      nic.UpgradeConfig()
1240
    for disk in self.disks:
1241
      disk.UpgradeConfig()
1242
    if self.hvparams:
1243
      for key in constants.HVC_GLOBALS:
1244
        try:
1245
          del self.hvparams[key]
1246
        except KeyError:
1247
          pass
1248
    if self.osparams is None:
1249
      self.osparams = {}
1250
    UpgradeBeParams(self.beparams)
1251
    if self.disks_active is None:
1252
      self.disks_active = self.admin_state == constants.ADMINST_UP
1253

    
1254

    
1255
class OS(ConfigObject):
1256
  """Config object representing an operating system.
1257

1258
  @type supported_parameters: list
1259
  @ivar supported_parameters: a list of tuples, name and description,
1260
      containing the supported parameters by this OS
1261

1262
  @type VARIANT_DELIM: string
1263
  @cvar VARIANT_DELIM: the variant delimiter
1264

1265
  """
1266
  __slots__ = [
1267
    "name",
1268
    "path",
1269
    "api_versions",
1270
    "create_script",
1271
    "export_script",
1272
    "import_script",
1273
    "rename_script",
1274
    "verify_script",
1275
    "supported_variants",
1276
    "supported_parameters",
1277
    ]
1278

    
1279
  VARIANT_DELIM = "+"
1280

    
1281
  @classmethod
1282
  def SplitNameVariant(cls, name):
1283
    """Splits the name into the proper name and variant.
1284

1285
    @param name: the OS (unprocessed) name
1286
    @rtype: list
1287
    @return: a list of two elements; if the original name didn't
1288
        contain a variant, it's returned as an empty string
1289

1290
    """
1291
    nv = name.split(cls.VARIANT_DELIM, 1)
1292
    if len(nv) == 1:
1293
      nv.append("")
1294
    return nv
1295

    
1296
  @classmethod
1297
  def GetName(cls, name):
1298
    """Returns the proper name of the os (without the variant).
1299

1300
    @param name: the OS (unprocessed) name
1301

1302
    """
1303
    return cls.SplitNameVariant(name)[0]
1304

    
1305
  @classmethod
1306
  def GetVariant(cls, name):
1307
    """Returns the variant the os (without the base name).
1308

1309
    @param name: the OS (unprocessed) name
1310

1311
    """
1312
    return cls.SplitNameVariant(name)[1]
1313

    
1314

    
1315
class ExtStorage(ConfigObject):
1316
  """Config object representing an External Storage Provider.
1317

1318
  """
1319
  __slots__ = [
1320
    "name",
1321
    "path",
1322
    "create_script",
1323
    "remove_script",
1324
    "grow_script",
1325
    "attach_script",
1326
    "detach_script",
1327
    "setinfo_script",
1328
    "verify_script",
1329
    "snapshot_script",
1330
    "supported_parameters",
1331
    ]
1332

    
1333

    
1334
class NodeHvState(ConfigObject):
1335
  """Hypvervisor state on a node.
1336

1337
  @ivar mem_total: Total amount of memory
1338
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1339
    available)
1340
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1341
    rounding
1342
  @ivar mem_inst: Memory used by instances living on node
1343
  @ivar cpu_total: Total node CPU core count
1344
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1345

1346
  """
1347
  __slots__ = [
1348
    "mem_total",
1349
    "mem_node",
1350
    "mem_hv",
1351
    "mem_inst",
1352
    "cpu_total",
1353
    "cpu_node",
1354
    ] + _TIMESTAMPS
1355

    
1356

    
1357
class NodeDiskState(ConfigObject):
1358
  """Disk state on a node.
1359

1360
  """
1361
  __slots__ = [
1362
    "total",
1363
    "reserved",
1364
    "overhead",
1365
    ] + _TIMESTAMPS
1366

    
1367

    
1368
class Node(TaggableObject):
1369
  """Config object representing a node.
1370

1371
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1372
  @ivar hv_state_static: Hypervisor state overriden by user
1373
  @ivar disk_state: Disk state (e.g. free space)
1374
  @ivar disk_state_static: Disk state overriden by user
1375

1376
  """
1377
  __slots__ = [
1378
    "name",
1379
    "primary_ip",
1380
    "secondary_ip",
1381
    "serial_no",
1382
    "master_candidate",
1383
    "offline",
1384
    "drained",
1385
    "group",
1386
    "master_capable",
1387
    "vm_capable",
1388
    "ndparams",
1389
    "powered",
1390
    "hv_state",
1391
    "hv_state_static",
1392
    "disk_state",
1393
    "disk_state_static",
1394
    ] + _TIMESTAMPS + _UUID
1395

    
1396
  def UpgradeConfig(self):
1397
    """Fill defaults for missing configuration values.
1398

1399
    """
1400
    # pylint: disable=E0203
1401
    # because these are "defined" via slots, not manually
1402
    if self.master_capable is None:
1403
      self.master_capable = True
1404

    
1405
    if self.vm_capable is None:
1406
      self.vm_capable = True
1407

    
1408
    if self.ndparams is None:
1409
      self.ndparams = {}
1410
    # And remove any global parameter
1411
    for key in constants.NDC_GLOBALS:
1412
      if key in self.ndparams:
1413
        logging.warning("Ignoring %s node parameter for node %s",
1414
                        key, self.name)
1415
        del self.ndparams[key]
1416

    
1417
    if self.powered is None:
1418
      self.powered = True
1419

    
1420
  def ToDict(self):
1421
    """Custom function for serializing.
1422

1423
    """
1424
    data = super(Node, self).ToDict()
1425

    
1426
    hv_state = data.get("hv_state", None)
1427
    if hv_state is not None:
1428
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1429

    
1430
    disk_state = data.get("disk_state", None)
1431
    if disk_state is not None:
1432
      data["disk_state"] = \
1433
        dict((key, outils.ContainerToDicts(value))
1434
             for (key, value) in disk_state.items())
1435

    
1436
    return data
1437

    
1438
  @classmethod
1439
  def FromDict(cls, val):
1440
    """Custom function for deserializing.
1441

1442
    """
1443
    obj = super(Node, cls).FromDict(val)
1444

    
1445
    if obj.hv_state is not None:
1446
      obj.hv_state = \
1447
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1448

    
1449
    if obj.disk_state is not None:
1450
      obj.disk_state = \
1451
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1452
             for (key, value) in obj.disk_state.items())
1453

    
1454
    return obj
1455

    
1456

    
1457
class NodeGroup(TaggableObject):
1458
  """Config object representing a node group."""
1459
  __slots__ = [
1460
    "name",
1461
    "members",
1462
    "ndparams",
1463
    "diskparams",
1464
    "ipolicy",
1465
    "serial_no",
1466
    "hv_state_static",
1467
    "disk_state_static",
1468
    "alloc_policy",
1469
    "networks",
1470
    ] + _TIMESTAMPS + _UUID
1471

    
1472
  def ToDict(self):
1473
    """Custom function for nodegroup.
1474

1475
    This discards the members object, which gets recalculated and is only kept
1476
    in memory.
1477

1478
    """
1479
    mydict = super(NodeGroup, self).ToDict()
1480
    del mydict["members"]
1481
    return mydict
1482

    
1483
  @classmethod
1484
  def FromDict(cls, val):
1485
    """Custom function for nodegroup.
1486

1487
    The members slot is initialized to an empty list, upon deserialization.
1488

1489
    """
1490
    obj = super(NodeGroup, cls).FromDict(val)
1491
    obj.members = []
1492
    return obj
1493

    
1494
  def UpgradeConfig(self):
1495
    """Fill defaults for missing configuration values.
1496

1497
    """
1498
    if self.ndparams is None:
1499
      self.ndparams = {}
1500

    
1501
    if self.serial_no is None:
1502
      self.serial_no = 1
1503

    
1504
    if self.alloc_policy is None:
1505
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1506

    
1507
    # We only update mtime, and not ctime, since we would not be able
1508
    # to provide a correct value for creation time.
1509
    if self.mtime is None:
1510
      self.mtime = time.time()
1511

    
1512
    if self.diskparams is None:
1513
      self.diskparams = {}
1514
    if self.ipolicy is None:
1515
      self.ipolicy = MakeEmptyIPolicy()
1516

    
1517
    if self.networks is None:
1518
      self.networks = {}
1519

    
1520
  def FillND(self, node):
1521
    """Return filled out ndparams for L{objects.Node}
1522

1523
    @type node: L{objects.Node}
1524
    @param node: A Node object to fill
1525
    @return a copy of the node's ndparams with defaults filled
1526

1527
    """
1528
    return self.SimpleFillND(node.ndparams)
1529

    
1530
  def SimpleFillND(self, ndparams):
1531
    """Fill a given ndparams dict with defaults.
1532

1533
    @type ndparams: dict
1534
    @param ndparams: the dict to fill
1535
    @rtype: dict
1536
    @return: a copy of the passed in ndparams with missing keys filled
1537
        from the node group defaults
1538

1539
    """
1540
    return FillDict(self.ndparams, ndparams)
1541

    
1542

    
1543
class Cluster(TaggableObject):
1544
  """Config object representing the cluster."""
1545
  __slots__ = [
1546
    "serial_no",
1547
    "rsahostkeypub",
1548
    "dsahostkeypub",
1549
    "highest_used_port",
1550
    "tcpudp_port_pool",
1551
    "mac_prefix",
1552
    "volume_group_name",
1553
    "reserved_lvs",
1554
    "drbd_usermode_helper",
1555
    "default_bridge",
1556
    "default_hypervisor",
1557
    "master_node",
1558
    "master_ip",
1559
    "master_netdev",
1560
    "master_netmask",
1561
    "use_external_mip_script",
1562
    "cluster_name",
1563
    "file_storage_dir",
1564
    "shared_file_storage_dir",
1565
    "enabled_hypervisors",
1566
    "hvparams",
1567
    "ipolicy",
1568
    "os_hvp",
1569
    "beparams",
1570
    "osparams",
1571
    "nicparams",
1572
    "ndparams",
1573
    "diskparams",
1574
    "candidate_pool_size",
1575
    "modify_etc_hosts",
1576
    "modify_ssh_setup",
1577
    "maintain_node_health",
1578
    "uid_pool",
1579
    "default_iallocator",
1580
    "hidden_os",
1581
    "blacklisted_os",
1582
    "primary_ip_family",
1583
    "prealloc_wipe_disks",
1584
    "hv_state_static",
1585
    "disk_state_static",
1586
    "enabled_disk_templates",
1587
    ] + _TIMESTAMPS + _UUID
1588

    
1589
  def UpgradeConfig(self):
1590
    """Fill defaults for missing configuration values.
1591

1592
    """
1593
    # pylint: disable=E0203
1594
    # because these are "defined" via slots, not manually
1595
    if self.hvparams is None:
1596
      self.hvparams = constants.HVC_DEFAULTS
1597
    else:
1598
      for hypervisor in self.hvparams:
1599
        self.hvparams[hypervisor] = FillDict(
1600
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1601

    
1602
    if self.os_hvp is None:
1603
      self.os_hvp = {}
1604

    
1605
    # osparams added before 2.2
1606
    if self.osparams is None:
1607
      self.osparams = {}
1608

    
1609
    self.ndparams = UpgradeNDParams(self.ndparams)
1610

    
1611
    self.beparams = UpgradeGroupedParams(self.beparams,
1612
                                         constants.BEC_DEFAULTS)
1613
    for beparams_group in self.beparams:
1614
      UpgradeBeParams(self.beparams[beparams_group])
1615

    
1616
    migrate_default_bridge = not self.nicparams
1617
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1618
                                          constants.NICC_DEFAULTS)
1619
    if migrate_default_bridge:
1620
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1621
        self.default_bridge
1622

    
1623
    if self.modify_etc_hosts is None:
1624
      self.modify_etc_hosts = True
1625

    
1626
    if self.modify_ssh_setup is None:
1627
      self.modify_ssh_setup = True
1628

    
1629
    # default_bridge is no longer used in 2.1. The slot is left there to
1630
    # support auto-upgrading. It can be removed once we decide to deprecate
1631
    # upgrading straight from 2.0.
1632
    if self.default_bridge is not None:
1633
      self.default_bridge = None
1634

    
1635
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1636
    # code can be removed once upgrading straight from 2.0 is deprecated.
1637
    if self.default_hypervisor is not None:
1638
      self.enabled_hypervisors = ([self.default_hypervisor] +
1639
                                  [hvname for hvname in self.enabled_hypervisors
1640
                                   if hvname != self.default_hypervisor])
1641
      self.default_hypervisor = None
1642

    
1643
    # maintain_node_health added after 2.1.1
1644
    if self.maintain_node_health is None:
1645
      self.maintain_node_health = False
1646

    
1647
    if self.uid_pool is None:
1648
      self.uid_pool = []
1649

    
1650
    if self.default_iallocator is None:
1651
      self.default_iallocator = ""
1652

    
1653
    # reserved_lvs added before 2.2
1654
    if self.reserved_lvs is None:
1655
      self.reserved_lvs = []
1656

    
1657
    # hidden and blacklisted operating systems added before 2.2.1
1658
    if self.hidden_os is None:
1659
      self.hidden_os = []
1660

    
1661
    if self.blacklisted_os is None:
1662
      self.blacklisted_os = []
1663

    
1664
    # primary_ip_family added before 2.3
1665
    if self.primary_ip_family is None:
1666
      self.primary_ip_family = AF_INET
1667

    
1668
    if self.master_netmask is None:
1669
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1670
      self.master_netmask = ipcls.iplen
1671

    
1672
    if self.prealloc_wipe_disks is None:
1673
      self.prealloc_wipe_disks = False
1674

    
1675
    # shared_file_storage_dir added before 2.5
1676
    if self.shared_file_storage_dir is None:
1677
      self.shared_file_storage_dir = ""
1678

    
1679
    if self.use_external_mip_script is None:
1680
      self.use_external_mip_script = False
1681

    
1682
    if self.diskparams:
1683
      self.diskparams = UpgradeDiskParams(self.diskparams)
1684
    else:
1685
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1686

    
1687
    # instance policy added before 2.6
1688
    if self.ipolicy is None:
1689
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1690
    else:
1691
      # we can either make sure to upgrade the ipolicy always, or only
1692
      # do it in some corner cases (e.g. missing keys); note that this
1693
      # will break any removal of keys from the ipolicy dict
1694
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1695
      if wrongkeys:
1696
        # These keys would be silently removed by FillIPolicy()
1697
        msg = ("Cluster instance policy contains spurious keys: %s" %
1698
               utils.CommaJoin(wrongkeys))
1699
        raise errors.ConfigurationError(msg)
1700
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1701

    
1702
  @property
1703
  def primary_hypervisor(self):
1704
    """The first hypervisor is the primary.
1705

1706
    Useful, for example, for L{Node}'s hv/disk state.
1707

1708
    """
1709
    return self.enabled_hypervisors[0]
1710

    
1711
  def ToDict(self):
1712
    """Custom function for cluster.
1713

1714
    """
1715
    mydict = super(Cluster, self).ToDict()
1716

    
1717
    if self.tcpudp_port_pool is None:
1718
      tcpudp_port_pool = []
1719
    else:
1720
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1721

    
1722
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1723

    
1724
    return mydict
1725

    
1726
  @classmethod
1727
  def FromDict(cls, val):
1728
    """Custom function for cluster.
1729

1730
    """
1731
    obj = super(Cluster, cls).FromDict(val)
1732

    
1733
    if obj.tcpudp_port_pool is None:
1734
      obj.tcpudp_port_pool = set()
1735
    elif not isinstance(obj.tcpudp_port_pool, set):
1736
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1737

    
1738
    return obj
1739

    
1740
  def SimpleFillDP(self, diskparams):
1741
    """Fill a given diskparams dict with cluster defaults.
1742

1743
    @param diskparams: The diskparams
1744
    @return: The defaults dict
1745

1746
    """
1747
    return FillDiskParams(self.diskparams, diskparams)
1748

    
1749
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1750
    """Get the default hypervisor parameters for the cluster.
1751

1752
    @param hypervisor: the hypervisor name
1753
    @param os_name: if specified, we'll also update the defaults for this OS
1754
    @param skip_keys: if passed, list of keys not to use
1755
    @return: the defaults dict
1756

1757
    """
1758
    if skip_keys is None:
1759
      skip_keys = []
1760

    
1761
    fill_stack = [self.hvparams.get(hypervisor, {})]
1762
    if os_name is not None:
1763
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1764
      fill_stack.append(os_hvp)
1765

    
1766
    ret_dict = {}
1767
    for o_dict in fill_stack:
1768
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1769

    
1770
    return ret_dict
1771

    
1772
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1773
    """Fill a given hvparams dict with cluster defaults.
1774

1775
    @type hv_name: string
1776
    @param hv_name: the hypervisor to use
1777
    @type os_name: string
1778
    @param os_name: the OS to use for overriding the hypervisor defaults
1779
    @type skip_globals: boolean
1780
    @param skip_globals: if True, the global hypervisor parameters will
1781
        not be filled
1782
    @rtype: dict
1783
    @return: a copy of the given hvparams with missing keys filled from
1784
        the cluster defaults
1785

1786
    """
1787
    if skip_globals:
1788
      skip_keys = constants.HVC_GLOBALS
1789
    else:
1790
      skip_keys = []
1791

    
1792
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1793
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1794

    
1795
  def FillHV(self, instance, skip_globals=False):
1796
    """Fill an instance's hvparams dict with cluster defaults.
1797

1798
    @type instance: L{objects.Instance}
1799
    @param instance: the instance parameter to fill
1800
    @type skip_globals: boolean
1801
    @param skip_globals: if True, the global hypervisor parameters will
1802
        not be filled
1803
    @rtype: dict
1804
    @return: a copy of the instance's hvparams with missing keys filled from
1805
        the cluster defaults
1806

1807
    """
1808
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1809
                             instance.hvparams, skip_globals)
1810

    
1811
  def SimpleFillBE(self, beparams):
1812
    """Fill a given beparams dict with cluster defaults.
1813

1814
    @type beparams: dict
1815
    @param beparams: the dict to fill
1816
    @rtype: dict
1817
    @return: a copy of the passed in beparams with missing keys filled
1818
        from the cluster defaults
1819

1820
    """
1821
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1822

    
1823
  def FillBE(self, instance):
1824
    """Fill an instance's beparams dict with cluster defaults.
1825

1826
    @type instance: L{objects.Instance}
1827
    @param instance: the instance parameter to fill
1828
    @rtype: dict
1829
    @return: a copy of the instance's beparams with missing keys filled from
1830
        the cluster defaults
1831

1832
    """
1833
    return self.SimpleFillBE(instance.beparams)
1834

    
1835
  def SimpleFillNIC(self, nicparams):
1836
    """Fill a given nicparams dict with cluster defaults.
1837

1838
    @type nicparams: dict
1839
    @param nicparams: the dict to fill
1840
    @rtype: dict
1841
    @return: a copy of the passed in nicparams with missing keys filled
1842
        from the cluster defaults
1843

1844
    """
1845
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1846

    
1847
  def SimpleFillOS(self, os_name, os_params):
1848
    """Fill an instance's osparams dict with cluster defaults.
1849

1850
    @type os_name: string
1851
    @param os_name: the OS name to use
1852
    @type os_params: dict
1853
    @param os_params: the dict to fill with default values
1854
    @rtype: dict
1855
    @return: a copy of the instance's osparams with missing keys filled from
1856
        the cluster defaults
1857

1858
    """
1859
    name_only = os_name.split("+", 1)[0]
1860
    # base OS
1861
    result = self.osparams.get(name_only, {})
1862
    # OS with variant
1863
    result = FillDict(result, self.osparams.get(os_name, {}))
1864
    # specified params
1865
    return FillDict(result, os_params)
1866

    
1867
  @staticmethod
1868
  def SimpleFillHvState(hv_state):
1869
    """Fill an hv_state sub dict with cluster defaults.
1870

1871
    """
1872
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1873

    
1874
  @staticmethod
1875
  def SimpleFillDiskState(disk_state):
1876
    """Fill an disk_state sub dict with cluster defaults.
1877

1878
    """
1879
    return FillDict(constants.DS_DEFAULTS, disk_state)
1880

    
1881
  def FillND(self, node, nodegroup):
1882
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1883

1884
    @type node: L{objects.Node}
1885
    @param node: A Node object to fill
1886
    @type nodegroup: L{objects.NodeGroup}
1887
    @param nodegroup: A Node object to fill
1888
    @return a copy of the node's ndparams with defaults filled
1889

1890
    """
1891
    return self.SimpleFillND(nodegroup.FillND(node))
1892

    
1893
  def SimpleFillND(self, ndparams):
1894
    """Fill a given ndparams dict with defaults.
1895

1896
    @type ndparams: dict
1897
    @param ndparams: the dict to fill
1898
    @rtype: dict
1899
    @return: a copy of the passed in ndparams with missing keys filled
1900
        from the cluster defaults
1901

1902
    """
1903
    return FillDict(self.ndparams, ndparams)
1904

    
1905
  def SimpleFillIPolicy(self, ipolicy):
1906
    """ Fill instance policy dict with defaults.
1907

1908
    @type ipolicy: dict
1909
    @param ipolicy: the dict to fill
1910
    @rtype: dict
1911
    @return: a copy of passed ipolicy with missing keys filled from
1912
      the cluster defaults
1913

1914
    """
1915
    return FillIPolicy(self.ipolicy, ipolicy)
1916

    
1917

    
1918
class BlockDevStatus(ConfigObject):
1919
  """Config object representing the status of a block device."""
1920
  __slots__ = [
1921
    "dev_path",
1922
    "major",
1923
    "minor",
1924
    "sync_percent",
1925
    "estimated_time",
1926
    "is_degraded",
1927
    "ldisk_status",
1928
    ]
1929

    
1930

    
1931
class ImportExportStatus(ConfigObject):
1932
  """Config object representing the status of an import or export."""
1933
  __slots__ = [
1934
    "recent_output",
1935
    "listen_port",
1936
    "connected",
1937
    "progress_mbytes",
1938
    "progress_throughput",
1939
    "progress_eta",
1940
    "progress_percent",
1941
    "exit_status",
1942
    "error_message",
1943
    ] + _TIMESTAMPS
1944

    
1945

    
1946
class ImportExportOptions(ConfigObject):
1947
  """Options for import/export daemon
1948

1949
  @ivar key_name: X509 key name (None for cluster certificate)
1950
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1951
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1952
  @ivar magic: Used to ensure the connection goes to the right disk
1953
  @ivar ipv6: Whether to use IPv6
1954
  @ivar connect_timeout: Number of seconds for establishing connection
1955

1956
  """
1957
  __slots__ = [
1958
    "key_name",
1959
    "ca_pem",
1960
    "compress",
1961
    "magic",
1962
    "ipv6",
1963
    "connect_timeout",
1964
    ]
1965

    
1966

    
1967
class ConfdRequest(ConfigObject):
1968
  """Object holding a confd request.
1969

1970
  @ivar protocol: confd protocol version
1971
  @ivar type: confd query type
1972
  @ivar query: query request
1973
  @ivar rsalt: requested reply salt
1974

1975
  """
1976
  __slots__ = [
1977
    "protocol",
1978
    "type",
1979
    "query",
1980
    "rsalt",
1981
    ]
1982

    
1983

    
1984
class ConfdReply(ConfigObject):
1985
  """Object holding a confd reply.
1986

1987
  @ivar protocol: confd protocol version
1988
  @ivar status: reply status code (ok, error)
1989
  @ivar answer: confd query reply
1990
  @ivar serial: configuration serial number
1991

1992
  """
1993
  __slots__ = [
1994
    "protocol",
1995
    "status",
1996
    "answer",
1997
    "serial",
1998
    ]
1999

    
2000

    
2001
class QueryFieldDefinition(ConfigObject):
2002
  """Object holding a query field definition.
2003

2004
  @ivar name: Field name
2005
  @ivar title: Human-readable title
2006
  @ivar kind: Field type
2007
  @ivar doc: Human-readable description
2008

2009
  """
2010
  __slots__ = [
2011
    "name",
2012
    "title",
2013
    "kind",
2014
    "doc",
2015
    ]
2016

    
2017

    
2018
class _QueryResponseBase(ConfigObject):
2019
  __slots__ = [
2020
    "fields",
2021
    ]
2022

    
2023
  def ToDict(self):
2024
    """Custom function for serializing.
2025

2026
    """
2027
    mydict = super(_QueryResponseBase, self).ToDict()
2028
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2029
    return mydict
2030

    
2031
  @classmethod
2032
  def FromDict(cls, val):
2033
    """Custom function for de-serializing.
2034

2035
    """
2036
    obj = super(_QueryResponseBase, cls).FromDict(val)
2037
    obj.fields = \
2038
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2039
    return obj
2040

    
2041

    
2042
class QueryResponse(_QueryResponseBase):
2043
  """Object holding the response to a query.
2044

2045
  @ivar fields: List of L{QueryFieldDefinition} objects
2046
  @ivar data: Requested data
2047

2048
  """
2049
  __slots__ = [
2050
    "data",
2051
    ]
2052

    
2053

    
2054
class QueryFieldsRequest(ConfigObject):
2055
  """Object holding a request for querying available fields.
2056

2057
  """
2058
  __slots__ = [
2059
    "what",
2060
    "fields",
2061
    ]
2062

    
2063

    
2064
class QueryFieldsResponse(_QueryResponseBase):
2065
  """Object holding the response to a query for fields.
2066

2067
  @ivar fields: List of L{QueryFieldDefinition} objects
2068

2069
  """
2070
  __slots__ = []
2071

    
2072

    
2073
class MigrationStatus(ConfigObject):
2074
  """Object holding the status of a migration.
2075

2076
  """
2077
  __slots__ = [
2078
    "status",
2079
    "transferred_ram",
2080
    "total_ram",
2081
    ]
2082

    
2083

    
2084
class InstanceConsole(ConfigObject):
2085
  """Object describing how to access the console of an instance.
2086

2087
  """
2088
  __slots__ = [
2089
    "instance",
2090
    "kind",
2091
    "message",
2092
    "host",
2093
    "port",
2094
    "user",
2095
    "command",
2096
    "display",
2097
    ]
2098

    
2099
  def Validate(self):
2100
    """Validates contents of this object.
2101

2102
    """
2103
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2104
    assert self.instance, "Missing instance name"
2105
    assert self.message or self.kind in [constants.CONS_SSH,
2106
                                         constants.CONS_SPICE,
2107
                                         constants.CONS_VNC]
2108
    assert self.host or self.kind == constants.CONS_MESSAGE
2109
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2110
                                      constants.CONS_SSH]
2111
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2112
                                      constants.CONS_SPICE,
2113
                                      constants.CONS_VNC]
2114
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2115
                                         constants.CONS_SPICE,
2116
                                         constants.CONS_VNC]
2117
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2118
                                         constants.CONS_SPICE,
2119
                                         constants.CONS_SSH]
2120
    return True
2121

    
2122

    
2123
class Network(TaggableObject):
2124
  """Object representing a network definition for ganeti.
2125

2126
  """
2127
  __slots__ = [
2128
    "name",
2129
    "serial_no",
2130
    "mac_prefix",
2131
    "network",
2132
    "network6",
2133
    "gateway",
2134
    "gateway6",
2135
    "reservations",
2136
    "ext_reservations",
2137
    ] + _TIMESTAMPS + _UUID
2138

    
2139
  def HooksDict(self, prefix=""):
2140
    """Export a dictionary used by hooks with a network's information.
2141

2142
    @type prefix: String
2143
    @param prefix: Prefix to prepend to the dict entries
2144

2145
    """
2146
    result = {
2147
      "%sNETWORK_NAME" % prefix: self.name,
2148
      "%sNETWORK_UUID" % prefix: self.uuid,
2149
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2150
    }
2151
    if self.network:
2152
      result["%sNETWORK_SUBNET" % prefix] = self.network
2153
    if self.gateway:
2154
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2155
    if self.network6:
2156
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2157
    if self.gateway6:
2158
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2159
    if self.mac_prefix:
2160
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2161

    
2162
    return result
2163

    
2164
  @classmethod
2165
  def FromDict(cls, val):
2166
    """Custom function for networks.
2167

2168
    Remove deprecated network_type and family.
2169

2170
    """
2171
    if "network_type" in val:
2172
      del val["network_type"]
2173
    if "family" in val:
2174
      del val["family"]
2175
    obj = super(Network, cls).FromDict(val)
2176
    return obj
2177

    
2178

    
2179
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2180
  """Simple wrapper over ConfigParse that allows serialization.
2181

2182
  This class is basically ConfigParser.SafeConfigParser with two
2183
  additional methods that allow it to serialize/unserialize to/from a
2184
  buffer.
2185

2186
  """
2187
  def Dumps(self):
2188
    """Dump this instance and return the string representation."""
2189
    buf = StringIO()
2190
    self.write(buf)
2191
    return buf.getvalue()
2192

    
2193
  @classmethod
2194
  def Loads(cls, data):
2195
    """Load data from a string."""
2196
    buf = StringIO(data)
2197
    cfp = cls()
2198
    cfp.readfp(buf)
2199
    return cfp
2200

    
2201

    
2202
class LvmPvInfo(ConfigObject):
2203
  """Information about an LVM physical volume (PV).
2204

2205
  @type name: string
2206
  @ivar name: name of the PV
2207
  @type vg_name: string
2208
  @ivar vg_name: name of the volume group containing the PV
2209
  @type size: float
2210
  @ivar size: size of the PV in MiB
2211
  @type free: float
2212
  @ivar free: free space in the PV, in MiB
2213
  @type attributes: string
2214
  @ivar attributes: PV attributes
2215
  @type lv_list: list of strings
2216
  @ivar lv_list: names of the LVs hosted on the PV
2217
  """
2218
  __slots__ = [
2219
    "name",
2220
    "vg_name",
2221
    "size",
2222
    "free",
2223
    "attributes",
2224
    "lv_list"
2225
    ]
2226

    
2227
  def IsEmpty(self):
2228
    """Is this PV empty?
2229

2230
    """
2231
    return self.size <= (self.free + 1)
2232

    
2233
  def IsAllocatable(self):
2234
    """Is this PV allocatable?
2235

2236
    """
2237
    return ("a" in self.attributes)