Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 62fed51b

History | View | Annotate | Download (63.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def _FillMinMaxISpecs(default_specs, custom_specs):
86
  assert frozenset(default_specs.keys()) == constants.ISPECS_MINMAX_KEYS
87
  ret_specs = {}
88
  for key in constants.ISPECS_MINMAX_KEYS:
89
    ret_specs[key] = FillDict(default_specs[key],
90
                              custom_specs.get(key, {}))
91
  return ret_specs
92

    
93

    
94
def FillIPolicy(default_ipolicy, custom_ipolicy):
95
  """Fills an instance policy with defaults.
96

97
  """
98
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
99
  ret_dict = {}
100
  # Instance specs
101
  new_mm = _FillMinMaxISpecs(default_ipolicy[constants.ISPECS_MINMAX],
102
                             custom_ipolicy.get(constants.ISPECS_MINMAX, {}))
103
  ret_dict[constants.ISPECS_MINMAX] = new_mm
104
  new_std = FillDict(default_ipolicy[constants.ISPECS_STD],
105
                     custom_ipolicy.get(constants.ISPECS_STD, {}))
106
  ret_dict[constants.ISPECS_STD] = new_std
107
  # list items
108
  for key in [constants.IPOLICY_DTS]:
109
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
110
  # other items which we know we can directly copy (immutables)
111
  for key in constants.IPOLICY_PARAMETERS:
112
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
113

    
114
  return ret_dict
115

    
116

    
117
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
118
  """Fills the disk parameter defaults.
119

120
  @see: L{FillDict} for parameters and return value
121

122
  """
123
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
124

    
125
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
126
                             skip_keys=skip_keys))
127
              for dt in constants.DISK_TEMPLATES)
128

    
129

    
130
def UpgradeGroupedParams(target, defaults):
131
  """Update all groups for the target parameter.
132

133
  @type target: dict of dicts
134
  @param target: {group: {parameter: value}}
135
  @type defaults: dict
136
  @param defaults: default parameter values
137

138
  """
139
  if target is None:
140
    target = {constants.PP_DEFAULT: defaults}
141
  else:
142
    for group in target:
143
      target[group] = FillDict(defaults, target[group])
144
  return target
145

    
146

    
147
def UpgradeBeParams(target):
148
  """Update the be parameters dict to the new format.
149

150
  @type target: dict
151
  @param target: "be" parameters dict
152

153
  """
154
  if constants.BE_MEMORY in target:
155
    memory = target[constants.BE_MEMORY]
156
    target[constants.BE_MAXMEM] = memory
157
    target[constants.BE_MINMEM] = memory
158
    del target[constants.BE_MEMORY]
159

    
160

    
161
def UpgradeDiskParams(diskparams):
162
  """Upgrade the disk parameters.
163

164
  @type diskparams: dict
165
  @param diskparams: disk parameters to upgrade
166
  @rtype: dict
167
  @return: the upgraded disk parameters dict
168

169
  """
170
  if not diskparams:
171
    result = {}
172
  else:
173
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
174

    
175
  return result
176

    
177

    
178
def UpgradeNDParams(ndparams):
179
  """Upgrade ndparams structure.
180

181
  @type ndparams: dict
182
  @param ndparams: disk parameters to upgrade
183
  @rtype: dict
184
  @return: the upgraded node parameters dict
185

186
  """
187
  if ndparams is None:
188
    ndparams = {}
189

    
190
  if (constants.ND_OOB_PROGRAM in ndparams and
191
      ndparams[constants.ND_OOB_PROGRAM] is None):
192
    # will be reset by the line below
193
    del ndparams[constants.ND_OOB_PROGRAM]
194
  return FillDict(constants.NDC_DEFAULTS, ndparams)
195

    
196

    
197
def MakeEmptyIPolicy():
198
  """Create empty IPolicy dictionary.
199

200
  """
201
  return {
202
    constants.ISPECS_MINMAX: {
203
      constants.ISPECS_MIN: {},
204
      constants.ISPECS_MAX: {},
205
      },
206
    constants.ISPECS_STD: {},
207
    }
208

    
209

    
210
class ConfigObject(outils.ValidatedSlots):
211
  """A generic config object.
212

213
  It has the following properties:
214

215
    - provides somewhat safe recursive unpickling and pickling for its classes
216
    - unset attributes which are defined in slots are always returned
217
      as None instead of raising an error
218

219
  Classes derived from this must always declare __slots__ (we use many
220
  config objects and the memory reduction is useful)
221

222
  """
223
  __slots__ = []
224

    
225
  def __getattr__(self, name):
226
    if name not in self.GetAllSlots():
227
      raise AttributeError("Invalid object attribute %s.%s" %
228
                           (type(self).__name__, name))
229
    return None
230

    
231
  def __setstate__(self, state):
232
    slots = self.GetAllSlots()
233
    for name in state:
234
      if name in slots:
235
        setattr(self, name, state[name])
236

    
237
  def Validate(self):
238
    """Validates the slots.
239

240
    """
241

    
242
  def ToDict(self):
243
    """Convert to a dict holding only standard python types.
244

245
    The generic routine just dumps all of this object's attributes in
246
    a dict. It does not work if the class has children who are
247
    ConfigObjects themselves (e.g. the nics list in an Instance), in
248
    which case the object should subclass the function in order to
249
    make sure all objects returned are only standard python types.
250

251
    """
252
    result = {}
253
    for name in self.GetAllSlots():
254
      value = getattr(self, name, None)
255
      if value is not None:
256
        result[name] = value
257
    return result
258

    
259
  __getstate__ = ToDict
260

    
261
  @classmethod
262
  def FromDict(cls, val):
263
    """Create an object from a dictionary.
264

265
    This generic routine takes a dict, instantiates a new instance of
266
    the given class, and sets attributes based on the dict content.
267

268
    As for `ToDict`, this does not work if the class has children
269
    who are ConfigObjects themselves (e.g. the nics list in an
270
    Instance), in which case the object should subclass the function
271
    and alter the objects.
272

273
    """
274
    if not isinstance(val, dict):
275
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
276
                                      " expected dict, got %s" % type(val))
277
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
278
    obj = cls(**val_str) # pylint: disable=W0142
279
    return obj
280

    
281
  def Copy(self):
282
    """Makes a deep copy of the current object and its children.
283

284
    """
285
    dict_form = self.ToDict()
286
    clone_obj = self.__class__.FromDict(dict_form)
287
    return clone_obj
288

    
289
  def __repr__(self):
290
    """Implement __repr__ for ConfigObjects."""
291
    return repr(self.ToDict())
292

    
293
  def UpgradeConfig(self):
294
    """Fill defaults for missing configuration values.
295

296
    This method will be called at configuration load time, and its
297
    implementation will be object dependent.
298

299
    """
300
    pass
301

    
302

    
303
class TaggableObject(ConfigObject):
304
  """An generic class supporting tags.
305

306
  """
307
  __slots__ = ["tags"]
308
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
309

    
310
  @classmethod
311
  def ValidateTag(cls, tag):
312
    """Check if a tag is valid.
313

314
    If the tag is invalid, an errors.TagError will be raised. The
315
    function has no return value.
316

317
    """
318
    if not isinstance(tag, basestring):
319
      raise errors.TagError("Invalid tag type (not a string)")
320
    if len(tag) > constants.MAX_TAG_LEN:
321
      raise errors.TagError("Tag too long (>%d characters)" %
322
                            constants.MAX_TAG_LEN)
323
    if not tag:
324
      raise errors.TagError("Tags cannot be empty")
325
    if not cls.VALID_TAG_RE.match(tag):
326
      raise errors.TagError("Tag contains invalid characters")
327

    
328
  def GetTags(self):
329
    """Return the tags list.
330

331
    """
332
    tags = getattr(self, "tags", None)
333
    if tags is None:
334
      tags = self.tags = set()
335
    return tags
336

    
337
  def AddTag(self, tag):
338
    """Add a new tag.
339

340
    """
341
    self.ValidateTag(tag)
342
    tags = self.GetTags()
343
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
344
      raise errors.TagError("Too many tags")
345
    self.GetTags().add(tag)
346

    
347
  def RemoveTag(self, tag):
348
    """Remove a tag.
349

350
    """
351
    self.ValidateTag(tag)
352
    tags = self.GetTags()
353
    try:
354
      tags.remove(tag)
355
    except KeyError:
356
      raise errors.TagError("Tag not found")
357

    
358
  def ToDict(self):
359
    """Taggable-object-specific conversion to standard python types.
360

361
    This replaces the tags set with a list.
362

363
    """
364
    bo = super(TaggableObject, self).ToDict()
365

    
366
    tags = bo.get("tags", None)
367
    if isinstance(tags, set):
368
      bo["tags"] = list(tags)
369
    return bo
370

    
371
  @classmethod
372
  def FromDict(cls, val):
373
    """Custom function for instances.
374

375
    """
376
    obj = super(TaggableObject, cls).FromDict(val)
377
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
378
      obj.tags = set(obj.tags)
379
    return obj
380

    
381

    
382
class MasterNetworkParameters(ConfigObject):
383
  """Network configuration parameters for the master
384

385
  @ivar name: master name
386
  @ivar ip: master IP
387
  @ivar netmask: master netmask
388
  @ivar netdev: master network device
389
  @ivar ip_family: master IP family
390

391
  """
392
  __slots__ = [
393
    "name",
394
    "ip",
395
    "netmask",
396
    "netdev",
397
    "ip_family",
398
    ]
399

    
400

    
401
class ConfigData(ConfigObject):
402
  """Top-level config object."""
403
  __slots__ = [
404
    "version",
405
    "cluster",
406
    "nodes",
407
    "nodegroups",
408
    "instances",
409
    "networks",
410
    "serial_no",
411
    ] + _TIMESTAMPS
412

    
413
  def ToDict(self):
414
    """Custom function for top-level config data.
415

416
    This just replaces the list of instances, nodes and the cluster
417
    with standard python types.
418

419
    """
420
    mydict = super(ConfigData, self).ToDict()
421
    mydict["cluster"] = mydict["cluster"].ToDict()
422
    for key in "nodes", "instances", "nodegroups", "networks":
423
      mydict[key] = outils.ContainerToDicts(mydict[key])
424

    
425
    return mydict
426

    
427
  @classmethod
428
  def FromDict(cls, val):
429
    """Custom function for top-level config data
430

431
    """
432
    obj = super(ConfigData, cls).FromDict(val)
433
    obj.cluster = Cluster.FromDict(obj.cluster)
434
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
435
    obj.instances = \
436
      outils.ContainerFromDicts(obj.instances, dict, Instance)
437
    obj.nodegroups = \
438
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
439
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
440
    return obj
441

    
442
  def HasAnyDiskOfType(self, dev_type):
443
    """Check if in there is at disk of the given type in the configuration.
444

445
    @type dev_type: L{constants.LDS_BLOCK}
446
    @param dev_type: the type to look for
447
    @rtype: boolean
448
    @return: boolean indicating if a disk of the given type was found or not
449

450
    """
451
    for instance in self.instances.values():
452
      for disk in instance.disks:
453
        if disk.IsBasedOnDiskType(dev_type):
454
          return True
455
    return False
456

    
457
  def UpgradeConfig(self):
458
    """Fill defaults for missing configuration values.
459

460
    """
461
    self.cluster.UpgradeConfig()
462
    for node in self.nodes.values():
463
      node.UpgradeConfig()
464
    for instance in self.instances.values():
465
      instance.UpgradeConfig()
466
    if self.nodegroups is None:
467
      self.nodegroups = {}
468
    for nodegroup in self.nodegroups.values():
469
      nodegroup.UpgradeConfig()
470
    if self.cluster.drbd_usermode_helper is None:
471
      # To decide if we set an helper let's check if at least one instance has
472
      # a DRBD disk. This does not cover all the possible scenarios but it
473
      # gives a good approximation.
474
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
475
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
476
    if self.networks is None:
477
      self.networks = {}
478
    for network in self.networks.values():
479
      network.UpgradeConfig()
480
    self._UpgradeEnabledDiskTemplates()
481

    
482
  def _UpgradeEnabledDiskTemplates(self):
483
    """Upgrade the cluster's enabled disk templates by inspecting the currently
484
       enabled and/or used disk templates.
485

486
    """
487
    # enabled_disk_templates in the cluster config were introduced in 2.8.
488
    # Remove this code once upgrading from earlier versions is deprecated.
489
    if not self.cluster.enabled_disk_templates:
490
      template_set = \
491
        set([inst.disk_template for inst in self.instances.values()])
492
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
493
      if self.cluster.volume_group_name:
494
        template_set.add(constants.DT_DRBD8)
495
        template_set.add(constants.DT_PLAIN)
496
      # FIXME: Adapt this when dis/enabling at configure time is removed.
497
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
498
      # might currently not be used.
499
      if constants.ENABLE_FILE_STORAGE:
500
        template_set.add(constants.DT_FILE)
501
      if constants.ENABLE_SHARED_FILE_STORAGE:
502
        template_set.add(constants.DT_SHARED_FILE)
503
      # Set enabled_disk_templates to the inferred disk templates. Order them
504
      # according to a preference list that is based on Ganeti's history of
505
      # supported disk templates.
506
      self.cluster.enabled_disk_templates = []
507
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
508
        if preferred_template in template_set:
509
          self.cluster.enabled_disk_templates.append(preferred_template)
510
          template_set.remove(preferred_template)
511
      self.cluster.enabled_disk_templates.extend(list(template_set))
512

    
513

    
514
class NIC(ConfigObject):
515
  """Config object representing a network card."""
516
  __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
517

    
518
  @classmethod
519
  def CheckParameterSyntax(cls, nicparams):
520
    """Check the given parameters for validity.
521

522
    @type nicparams:  dict
523
    @param nicparams: dictionary with parameter names/value
524
    @raise errors.ConfigurationError: when a parameter is not valid
525

526
    """
527
    mode = nicparams[constants.NIC_MODE]
528
    if (mode not in constants.NIC_VALID_MODES and
529
        mode != constants.VALUE_AUTO):
530
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
531

    
532
    if (mode == constants.NIC_MODE_BRIDGED and
533
        not nicparams[constants.NIC_LINK]):
534
      raise errors.ConfigurationError("Missing bridged NIC link")
535

    
536

    
537
class Disk(ConfigObject):
538
  """Config object representing a block device."""
539
  __slots__ = ["name", "dev_type", "logical_id", "physical_id",
540
               "children", "iv_name", "size", "mode", "params"] + _UUID
541

    
542
  def CreateOnSecondary(self):
543
    """Test if this device needs to be created on a secondary node."""
544
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
545

    
546
  def AssembleOnSecondary(self):
547
    """Test if this device needs to be assembled on a secondary node."""
548
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
549

    
550
  def OpenOnSecondary(self):
551
    """Test if this device needs to be opened on a secondary node."""
552
    return self.dev_type in (constants.LD_LV,)
553

    
554
  def StaticDevPath(self):
555
    """Return the device path if this device type has a static one.
556

557
    Some devices (LVM for example) live always at the same /dev/ path,
558
    irrespective of their status. For such devices, we return this
559
    path, for others we return None.
560

561
    @warning: The path returned is not a normalized pathname; callers
562
        should check that it is a valid path.
563

564
    """
565
    if self.dev_type == constants.LD_LV:
566
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
567
    elif self.dev_type == constants.LD_BLOCKDEV:
568
      return self.logical_id[1]
569
    elif self.dev_type == constants.LD_RBD:
570
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
571
    return None
572

    
573
  def ChildrenNeeded(self):
574
    """Compute the needed number of children for activation.
575

576
    This method will return either -1 (all children) or a positive
577
    number denoting the minimum number of children needed for
578
    activation (only mirrored devices will usually return >=0).
579

580
    Currently, only DRBD8 supports diskless activation (therefore we
581
    return 0), for all other we keep the previous semantics and return
582
    -1.
583

584
    """
585
    if self.dev_type == constants.LD_DRBD8:
586
      return 0
587
    return -1
588

    
589
  def IsBasedOnDiskType(self, dev_type):
590
    """Check if the disk or its children are based on the given type.
591

592
    @type dev_type: L{constants.LDS_BLOCK}
593
    @param dev_type: the type to look for
594
    @rtype: boolean
595
    @return: boolean indicating if a device of the given type was found or not
596

597
    """
598
    if self.children:
599
      for child in self.children:
600
        if child.IsBasedOnDiskType(dev_type):
601
          return True
602
    return self.dev_type == dev_type
603

    
604
  def GetNodes(self, node):
605
    """This function returns the nodes this device lives on.
606

607
    Given the node on which the parent of the device lives on (or, in
608
    case of a top-level device, the primary node of the devices'
609
    instance), this function will return a list of nodes on which this
610
    devices needs to (or can) be assembled.
611

612
    """
613
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
614
                         constants.LD_BLOCKDEV, constants.LD_RBD,
615
                         constants.LD_EXT]:
616
      result = [node]
617
    elif self.dev_type in constants.LDS_DRBD:
618
      result = [self.logical_id[0], self.logical_id[1]]
619
      if node not in result:
620
        raise errors.ConfigurationError("DRBD device passed unknown node")
621
    else:
622
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
623
    return result
624

    
625
  def ComputeNodeTree(self, parent_node):
626
    """Compute the node/disk tree for this disk and its children.
627

628
    This method, given the node on which the parent disk lives, will
629
    return the list of all (node, disk) pairs which describe the disk
630
    tree in the most compact way. For example, a drbd/lvm stack
631
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
632
    which represents all the top-level devices on the nodes.
633

634
    """
635
    my_nodes = self.GetNodes(parent_node)
636
    result = [(node, self) for node in my_nodes]
637
    if not self.children:
638
      # leaf device
639
      return result
640
    for node in my_nodes:
641
      for child in self.children:
642
        child_result = child.ComputeNodeTree(node)
643
        if len(child_result) == 1:
644
          # child (and all its descendants) is simple, doesn't split
645
          # over multiple hosts, so we don't need to describe it, our
646
          # own entry for this node describes it completely
647
          continue
648
        else:
649
          # check if child nodes differ from my nodes; note that
650
          # subdisk can differ from the child itself, and be instead
651
          # one of its descendants
652
          for subnode, subdisk in child_result:
653
            if subnode not in my_nodes:
654
              result.append((subnode, subdisk))
655
            # otherwise child is under our own node, so we ignore this
656
            # entry (but probably the other results in the list will
657
            # be different)
658
    return result
659

    
660
  def ComputeGrowth(self, amount):
661
    """Compute the per-VG growth requirements.
662

663
    This only works for VG-based disks.
664

665
    @type amount: integer
666
    @param amount: the desired increase in (user-visible) disk space
667
    @rtype: dict
668
    @return: a dictionary of volume-groups and the required size
669

670
    """
671
    if self.dev_type == constants.LD_LV:
672
      return {self.logical_id[0]: amount}
673
    elif self.dev_type == constants.LD_DRBD8:
674
      if self.children:
675
        return self.children[0].ComputeGrowth(amount)
676
      else:
677
        return {}
678
    else:
679
      # Other disk types do not require VG space
680
      return {}
681

    
682
  def RecordGrow(self, amount):
683
    """Update the size of this disk after growth.
684

685
    This method recurses over the disks's children and updates their
686
    size correspondigly. The method needs to be kept in sync with the
687
    actual algorithms from bdev.
688

689
    """
690
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
691
                         constants.LD_RBD, constants.LD_EXT):
692
      self.size += amount
693
    elif self.dev_type == constants.LD_DRBD8:
694
      if self.children:
695
        self.children[0].RecordGrow(amount)
696
      self.size += amount
697
    else:
698
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
699
                                   " disk type %s" % self.dev_type)
700

    
701
  def Update(self, size=None, mode=None):
702
    """Apply changes to size and mode.
703

704
    """
705
    if self.dev_type == constants.LD_DRBD8:
706
      if self.children:
707
        self.children[0].Update(size=size, mode=mode)
708
    else:
709
      assert not self.children
710

    
711
    if size is not None:
712
      self.size = size
713
    if mode is not None:
714
      self.mode = mode
715

    
716
  def UnsetSize(self):
717
    """Sets recursively the size to zero for the disk and its children.
718

719
    """
720
    if self.children:
721
      for child in self.children:
722
        child.UnsetSize()
723
    self.size = 0
724

    
725
  def SetPhysicalID(self, target_node, nodes_ip):
726
    """Convert the logical ID to the physical ID.
727

728
    This is used only for drbd, which needs ip/port configuration.
729

730
    The routine descends down and updates its children also, because
731
    this helps when the only the top device is passed to the remote
732
    node.
733

734
    Arguments:
735
      - target_node: the node we wish to configure for
736
      - nodes_ip: a mapping of node name to ip
737

738
    The target_node must exist in in nodes_ip, and must be one of the
739
    nodes in the logical ID for each of the DRBD devices encountered
740
    in the disk tree.
741

742
    """
743
    if self.children:
744
      for child in self.children:
745
        child.SetPhysicalID(target_node, nodes_ip)
746

    
747
    if self.logical_id is None and self.physical_id is not None:
748
      return
749
    if self.dev_type in constants.LDS_DRBD:
750
      pnode, snode, port, pminor, sminor, secret = self.logical_id
751
      if target_node not in (pnode, snode):
752
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
753
                                        target_node)
754
      pnode_ip = nodes_ip.get(pnode, None)
755
      snode_ip = nodes_ip.get(snode, None)
756
      if pnode_ip is None or snode_ip is None:
757
        raise errors.ConfigurationError("Can't find primary or secondary node"
758
                                        " for %s" % str(self))
759
      p_data = (pnode_ip, port)
760
      s_data = (snode_ip, port)
761
      if pnode == target_node:
762
        self.physical_id = p_data + s_data + (pminor, secret)
763
      else: # it must be secondary, we tested above
764
        self.physical_id = s_data + p_data + (sminor, secret)
765
    else:
766
      self.physical_id = self.logical_id
767
    return
768

    
769
  def ToDict(self):
770
    """Disk-specific conversion to standard python types.
771

772
    This replaces the children lists of objects with lists of
773
    standard python types.
774

775
    """
776
    bo = super(Disk, self).ToDict()
777

    
778
    for attr in ("children",):
779
      alist = bo.get(attr, None)
780
      if alist:
781
        bo[attr] = outils.ContainerToDicts(alist)
782
    return bo
783

    
784
  @classmethod
785
  def FromDict(cls, val):
786
    """Custom function for Disks
787

788
    """
789
    obj = super(Disk, cls).FromDict(val)
790
    if obj.children:
791
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
792
    if obj.logical_id and isinstance(obj.logical_id, list):
793
      obj.logical_id = tuple(obj.logical_id)
794
    if obj.physical_id and isinstance(obj.physical_id, list):
795
      obj.physical_id = tuple(obj.physical_id)
796
    if obj.dev_type in constants.LDS_DRBD:
797
      # we need a tuple of length six here
798
      if len(obj.logical_id) < 6:
799
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
800
    return obj
801

    
802
  def __str__(self):
803
    """Custom str() formatter for disks.
804

805
    """
806
    if self.dev_type == constants.LD_LV:
807
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
808
    elif self.dev_type in constants.LDS_DRBD:
809
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
810
      val = "<DRBD8("
811
      if self.physical_id is None:
812
        phy = "unconfigured"
813
      else:
814
        phy = ("configured as %s:%s %s:%s" %
815
               (self.physical_id[0], self.physical_id[1],
816
                self.physical_id[2], self.physical_id[3]))
817

    
818
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
819
              (node_a, minor_a, node_b, minor_b, port, phy))
820
      if self.children and self.children.count(None) == 0:
821
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
822
      else:
823
        val += "no local storage"
824
    else:
825
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
826
             (self.dev_type, self.logical_id, self.physical_id, self.children))
827
    if self.iv_name is None:
828
      val += ", not visible"
829
    else:
830
      val += ", visible as /dev/%s" % self.iv_name
831
    if isinstance(self.size, int):
832
      val += ", size=%dm)>" % self.size
833
    else:
834
      val += ", size='%s')>" % (self.size,)
835
    return val
836

    
837
  def Verify(self):
838
    """Checks that this disk is correctly configured.
839

840
    """
841
    all_errors = []
842
    if self.mode not in constants.DISK_ACCESS_SET:
843
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
844
    return all_errors
845

    
846
  def UpgradeConfig(self):
847
    """Fill defaults for missing configuration values.
848

849
    """
850
    if self.children:
851
      for child in self.children:
852
        child.UpgradeConfig()
853

    
854
    # FIXME: Make this configurable in Ganeti 2.7
855
    self.params = {}
856
    # add here config upgrade for this disk
857

    
858
  @staticmethod
859
  def ComputeLDParams(disk_template, disk_params):
860
    """Computes Logical Disk parameters from Disk Template parameters.
861

862
    @type disk_template: string
863
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
864
    @type disk_params: dict
865
    @param disk_params: disk template parameters;
866
                        dict(template_name -> parameters
867
    @rtype: list(dict)
868
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
869
      contains the LD parameters of the node. The tree is flattened in-order.
870

871
    """
872
    if disk_template not in constants.DISK_TEMPLATES:
873
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
874

    
875
    assert disk_template in disk_params
876

    
877
    result = list()
878
    dt_params = disk_params[disk_template]
879
    if disk_template == constants.DT_DRBD8:
880
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
881
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
882
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
883
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
884
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
885
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
886
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
887
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
888
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
889
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
890
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
891
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
892
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
893
        }))
894

    
895
      # data LV
896
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
897
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
898
        }))
899

    
900
      # metadata LV
901
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
902
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
903
        }))
904

    
905
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
906
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
907

    
908
    elif disk_template == constants.DT_PLAIN:
909
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
910
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
911
        }))
912

    
913
    elif disk_template == constants.DT_BLOCK:
914
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
915

    
916
    elif disk_template == constants.DT_RBD:
917
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
918
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
919
        }))
920

    
921
    elif disk_template == constants.DT_EXT:
922
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
923

    
924
    return result
925

    
926

    
927
class InstancePolicy(ConfigObject):
928
  """Config object representing instance policy limits dictionary.
929

930
  Note that this object is not actually used in the config, it's just
931
  used as a placeholder for a few functions.
932

933
  """
934
  @classmethod
935
  def CheckParameterSyntax(cls, ipolicy, check_std):
936
    """ Check the instance policy for validity.
937

938
    @type ipolicy: dict
939
    @param ipolicy: dictionary with min/max/std specs and policies
940
    @type check_std: bool
941
    @param check_std: Whether to check std value or just assume compliance
942
    @raise errors.ConfigurationError: when the policy is not legal
943

944
    """
945
    InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
946
    if constants.IPOLICY_DTS in ipolicy:
947
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
948
    for key in constants.IPOLICY_PARAMETERS:
949
      if key in ipolicy:
950
        InstancePolicy.CheckParameter(key, ipolicy[key])
951
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
952
    if wrong_keys:
953
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
954
                                      utils.CommaJoin(wrong_keys))
955

    
956
  @classmethod
957
  def CheckISpecSyntax(cls, ipolicy, check_std):
958
    """Check the instance policy specs for validity.
959

960
    @type ipolicy: dict
961
    @param ipolicy: dictionary with min/max/std specs
962
    @type check_std: bool
963
    @param check_std: Whether to check std value or just assume compliance
964
    @raise errors.ConfigurationError: when specs are not valid
965

966
    """
967
    if constants.ISPECS_MINMAX not in ipolicy:
968
      # Nothing to check
969
      return
970

    
971
    if check_std and constants.ISPECS_STD not in ipolicy:
972
      msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
973
      raise errors.ConfigurationError(msg)
974
    minmaxspecs = ipolicy[constants.ISPECS_MINMAX]
975
    stdspec = ipolicy.get(constants.ISPECS_STD)
976
    missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
977
    if missing:
978
      msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
979
      raise errors.ConfigurationError(msg)
980
    for param in constants.ISPECS_PARAMETERS:
981
      InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec, param,
982
                                            check_std)
983

    
984
  @classmethod
985
  def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
986
    """Check the instance policy specs for validity on a given key.
987

988
    We check if the instance specs makes sense for a given key, that is
989
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
990

991
    @type minmaxspecs: dict
992
    @param minmaxspecs: dictionary with min and max instance spec
993
    @type stdspec: dict
994
    @param stdspec: dictionary with standard instance spec
995
    @type name: string
996
    @param name: what are the limits for
997
    @type check_std: bool
998
    @param check_std: Whether to check std value or just assume compliance
999
    @raise errors.ConfigurationError: when specs for the given name are not
1000
        valid
1001

1002
    """
1003
    minspec = minmaxspecs[constants.ISPECS_MIN]
1004
    maxspec = minmaxspecs[constants.ISPECS_MAX]
1005
    min_v = minspec.get(name, 0)
1006

    
1007
    if check_std:
1008
      std_v = stdspec.get(name, min_v)
1009
      std_msg = std_v
1010
    else:
1011
      std_v = min_v
1012
      std_msg = "-"
1013

    
1014
    max_v = maxspec.get(name, std_v)
1015
    if min_v > std_v or std_v > max_v:
1016
      err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
1017
             (name,
1018
              minspec.get(name, "-"),
1019
              maxspec.get(name, "-"),
1020
              std_msg))
1021
      raise errors.ConfigurationError(err)
1022

    
1023
  @classmethod
1024
  def CheckDiskTemplates(cls, disk_templates):
1025
    """Checks the disk templates for validity.
1026

1027
    """
1028
    if not disk_templates:
1029
      raise errors.ConfigurationError("Instance policy must contain" +
1030
                                      " at least one disk template")
1031
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1032
    if wrong:
1033
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1034
                                      utils.CommaJoin(wrong))
1035

    
1036
  @classmethod
1037
  def CheckParameter(cls, key, value):
1038
    """Checks a parameter.
1039

1040
    Currently we expect all parameters to be float values.
1041

1042
    """
1043
    try:
1044
      float(value)
1045
    except (TypeError, ValueError), err:
1046
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1047
                                      " '%s', error: %s" % (key, value, err))
1048

    
1049

    
1050
class Instance(TaggableObject):
1051
  """Config object representing an instance."""
1052
  __slots__ = [
1053
    "name",
1054
    "primary_node",
1055
    "os",
1056
    "hypervisor",
1057
    "hvparams",
1058
    "beparams",
1059
    "osparams",
1060
    "admin_state",
1061
    "nics",
1062
    "disks",
1063
    "disk_template",
1064
    "network_port",
1065
    "serial_no",
1066
    ] + _TIMESTAMPS + _UUID
1067

    
1068
  def _ComputeSecondaryNodes(self):
1069
    """Compute the list of secondary nodes.
1070

1071
    This is a simple wrapper over _ComputeAllNodes.
1072

1073
    """
1074
    all_nodes = set(self._ComputeAllNodes())
1075
    all_nodes.discard(self.primary_node)
1076
    return tuple(all_nodes)
1077

    
1078
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1079
                             "List of names of secondary nodes")
1080

    
1081
  def _ComputeAllNodes(self):
1082
    """Compute the list of all nodes.
1083

1084
    Since the data is already there (in the drbd disks), keeping it as
1085
    a separate normal attribute is redundant and if not properly
1086
    synchronised can cause problems. Thus it's better to compute it
1087
    dynamically.
1088

1089
    """
1090
    def _Helper(nodes, device):
1091
      """Recursively computes nodes given a top device."""
1092
      if device.dev_type in constants.LDS_DRBD:
1093
        nodea, nodeb = device.logical_id[:2]
1094
        nodes.add(nodea)
1095
        nodes.add(nodeb)
1096
      if device.children:
1097
        for child in device.children:
1098
          _Helper(nodes, child)
1099

    
1100
    all_nodes = set()
1101
    all_nodes.add(self.primary_node)
1102
    for device in self.disks:
1103
      _Helper(all_nodes, device)
1104
    return tuple(all_nodes)
1105

    
1106
  all_nodes = property(_ComputeAllNodes, None, None,
1107
                       "List of names of all the nodes of the instance")
1108

    
1109
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1110
    """Provide a mapping of nodes to LVs this instance owns.
1111

1112
    This function figures out what logical volumes should belong on
1113
    which nodes, recursing through a device tree.
1114

1115
    @param lvmap: optional dictionary to receive the
1116
        'node' : ['lv', ...] data.
1117

1118
    @return: None if lvmap arg is given, otherwise, a dictionary of
1119
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1120
        volumeN is of the form "vg_name/lv_name", compatible with
1121
        GetVolumeList()
1122

1123
    """
1124
    if node is None:
1125
      node = self.primary_node
1126

    
1127
    if lvmap is None:
1128
      lvmap = {
1129
        node: [],
1130
        }
1131
      ret = lvmap
1132
    else:
1133
      if not node in lvmap:
1134
        lvmap[node] = []
1135
      ret = None
1136

    
1137
    if not devs:
1138
      devs = self.disks
1139

    
1140
    for dev in devs:
1141
      if dev.dev_type == constants.LD_LV:
1142
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1143

    
1144
      elif dev.dev_type in constants.LDS_DRBD:
1145
        if dev.children:
1146
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1147
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1148

    
1149
      elif dev.children:
1150
        self.MapLVsByNode(lvmap, dev.children, node)
1151

    
1152
    return ret
1153

    
1154
  def FindDisk(self, idx):
1155
    """Find a disk given having a specified index.
1156

1157
    This is just a wrapper that does validation of the index.
1158

1159
    @type idx: int
1160
    @param idx: the disk index
1161
    @rtype: L{Disk}
1162
    @return: the corresponding disk
1163
    @raise errors.OpPrereqError: when the given index is not valid
1164

1165
    """
1166
    try:
1167
      idx = int(idx)
1168
      return self.disks[idx]
1169
    except (TypeError, ValueError), err:
1170
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1171
                                 errors.ECODE_INVAL)
1172
    except IndexError:
1173
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1174
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1175
                                 errors.ECODE_INVAL)
1176

    
1177
  def ToDict(self):
1178
    """Instance-specific conversion to standard python types.
1179

1180
    This replaces the children lists of objects with lists of standard
1181
    python types.
1182

1183
    """
1184
    bo = super(Instance, self).ToDict()
1185

    
1186
    for attr in "nics", "disks":
1187
      alist = bo.get(attr, None)
1188
      if alist:
1189
        nlist = outils.ContainerToDicts(alist)
1190
      else:
1191
        nlist = []
1192
      bo[attr] = nlist
1193
    return bo
1194

    
1195
  @classmethod
1196
  def FromDict(cls, val):
1197
    """Custom function for instances.
1198

1199
    """
1200
    if "admin_state" not in val:
1201
      if val.get("admin_up", False):
1202
        val["admin_state"] = constants.ADMINST_UP
1203
      else:
1204
        val["admin_state"] = constants.ADMINST_DOWN
1205
    if "admin_up" in val:
1206
      del val["admin_up"]
1207
    obj = super(Instance, cls).FromDict(val)
1208
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1209
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1210
    return obj
1211

    
1212
  def UpgradeConfig(self):
1213
    """Fill defaults for missing configuration values.
1214

1215
    """
1216
    for nic in self.nics:
1217
      nic.UpgradeConfig()
1218
    for disk in self.disks:
1219
      disk.UpgradeConfig()
1220
    if self.hvparams:
1221
      for key in constants.HVC_GLOBALS:
1222
        try:
1223
          del self.hvparams[key]
1224
        except KeyError:
1225
          pass
1226
    if self.osparams is None:
1227
      self.osparams = {}
1228
    UpgradeBeParams(self.beparams)
1229

    
1230

    
1231
class OS(ConfigObject):
1232
  """Config object representing an operating system.
1233

1234
  @type supported_parameters: list
1235
  @ivar supported_parameters: a list of tuples, name and description,
1236
      containing the supported parameters by this OS
1237

1238
  @type VARIANT_DELIM: string
1239
  @cvar VARIANT_DELIM: the variant delimiter
1240

1241
  """
1242
  __slots__ = [
1243
    "name",
1244
    "path",
1245
    "api_versions",
1246
    "create_script",
1247
    "export_script",
1248
    "import_script",
1249
    "rename_script",
1250
    "verify_script",
1251
    "supported_variants",
1252
    "supported_parameters",
1253
    ]
1254

    
1255
  VARIANT_DELIM = "+"
1256

    
1257
  @classmethod
1258
  def SplitNameVariant(cls, name):
1259
    """Splits the name into the proper name and variant.
1260

1261
    @param name: the OS (unprocessed) name
1262
    @rtype: list
1263
    @return: a list of two elements; if the original name didn't
1264
        contain a variant, it's returned as an empty string
1265

1266
    """
1267
    nv = name.split(cls.VARIANT_DELIM, 1)
1268
    if len(nv) == 1:
1269
      nv.append("")
1270
    return nv
1271

    
1272
  @classmethod
1273
  def GetName(cls, name):
1274
    """Returns the proper name of the os (without the variant).
1275

1276
    @param name: the OS (unprocessed) name
1277

1278
    """
1279
    return cls.SplitNameVariant(name)[0]
1280

    
1281
  @classmethod
1282
  def GetVariant(cls, name):
1283
    """Returns the variant the os (without the base name).
1284

1285
    @param name: the OS (unprocessed) name
1286

1287
    """
1288
    return cls.SplitNameVariant(name)[1]
1289

    
1290

    
1291
class ExtStorage(ConfigObject):
1292
  """Config object representing an External Storage Provider.
1293

1294
  """
1295
  __slots__ = [
1296
    "name",
1297
    "path",
1298
    "create_script",
1299
    "remove_script",
1300
    "grow_script",
1301
    "attach_script",
1302
    "detach_script",
1303
    "setinfo_script",
1304
    "verify_script",
1305
    "supported_parameters",
1306
    ]
1307

    
1308

    
1309
class NodeHvState(ConfigObject):
1310
  """Hypvervisor state on a node.
1311

1312
  @ivar mem_total: Total amount of memory
1313
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1314
    available)
1315
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1316
    rounding
1317
  @ivar mem_inst: Memory used by instances living on node
1318
  @ivar cpu_total: Total node CPU core count
1319
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1320

1321
  """
1322
  __slots__ = [
1323
    "mem_total",
1324
    "mem_node",
1325
    "mem_hv",
1326
    "mem_inst",
1327
    "cpu_total",
1328
    "cpu_node",
1329
    ] + _TIMESTAMPS
1330

    
1331

    
1332
class NodeDiskState(ConfigObject):
1333
  """Disk state on a node.
1334

1335
  """
1336
  __slots__ = [
1337
    "total",
1338
    "reserved",
1339
    "overhead",
1340
    ] + _TIMESTAMPS
1341

    
1342

    
1343
class Node(TaggableObject):
1344
  """Config object representing a node.
1345

1346
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1347
  @ivar hv_state_static: Hypervisor state overriden by user
1348
  @ivar disk_state: Disk state (e.g. free space)
1349
  @ivar disk_state_static: Disk state overriden by user
1350

1351
  """
1352
  __slots__ = [
1353
    "name",
1354
    "primary_ip",
1355
    "secondary_ip",
1356
    "serial_no",
1357
    "master_candidate",
1358
    "offline",
1359
    "drained",
1360
    "group",
1361
    "master_capable",
1362
    "vm_capable",
1363
    "ndparams",
1364
    "powered",
1365
    "hv_state",
1366
    "hv_state_static",
1367
    "disk_state",
1368
    "disk_state_static",
1369
    ] + _TIMESTAMPS + _UUID
1370

    
1371
  def UpgradeConfig(self):
1372
    """Fill defaults for missing configuration values.
1373

1374
    """
1375
    # pylint: disable=E0203
1376
    # because these are "defined" via slots, not manually
1377
    if self.master_capable is None:
1378
      self.master_capable = True
1379

    
1380
    if self.vm_capable is None:
1381
      self.vm_capable = True
1382

    
1383
    if self.ndparams is None:
1384
      self.ndparams = {}
1385
    # And remove any global parameter
1386
    for key in constants.NDC_GLOBALS:
1387
      if key in self.ndparams:
1388
        logging.warning("Ignoring %s node parameter for node %s",
1389
                        key, self.name)
1390
        del self.ndparams[key]
1391

    
1392
    if self.powered is None:
1393
      self.powered = True
1394

    
1395
  def ToDict(self):
1396
    """Custom function for serializing.
1397

1398
    """
1399
    data = super(Node, self).ToDict()
1400

    
1401
    hv_state = data.get("hv_state", None)
1402
    if hv_state is not None:
1403
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1404

    
1405
    disk_state = data.get("disk_state", None)
1406
    if disk_state is not None:
1407
      data["disk_state"] = \
1408
        dict((key, outils.ContainerToDicts(value))
1409
             for (key, value) in disk_state.items())
1410

    
1411
    return data
1412

    
1413
  @classmethod
1414
  def FromDict(cls, val):
1415
    """Custom function for deserializing.
1416

1417
    """
1418
    obj = super(Node, cls).FromDict(val)
1419

    
1420
    if obj.hv_state is not None:
1421
      obj.hv_state = \
1422
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1423

    
1424
    if obj.disk_state is not None:
1425
      obj.disk_state = \
1426
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1427
             for (key, value) in obj.disk_state.items())
1428

    
1429
    return obj
1430

    
1431

    
1432
class NodeGroup(TaggableObject):
1433
  """Config object representing a node group."""
1434
  __slots__ = [
1435
    "name",
1436
    "members",
1437
    "ndparams",
1438
    "diskparams",
1439
    "ipolicy",
1440
    "serial_no",
1441
    "hv_state_static",
1442
    "disk_state_static",
1443
    "alloc_policy",
1444
    "networks",
1445
    ] + _TIMESTAMPS + _UUID
1446

    
1447
  def ToDict(self):
1448
    """Custom function for nodegroup.
1449

1450
    This discards the members object, which gets recalculated and is only kept
1451
    in memory.
1452

1453
    """
1454
    mydict = super(NodeGroup, self).ToDict()
1455
    del mydict["members"]
1456
    return mydict
1457

    
1458
  @classmethod
1459
  def FromDict(cls, val):
1460
    """Custom function for nodegroup.
1461

1462
    The members slot is initialized to an empty list, upon deserialization.
1463

1464
    """
1465
    obj = super(NodeGroup, cls).FromDict(val)
1466
    obj.members = []
1467
    return obj
1468

    
1469
  def UpgradeConfig(self):
1470
    """Fill defaults for missing configuration values.
1471

1472
    """
1473
    if self.ndparams is None:
1474
      self.ndparams = {}
1475

    
1476
    if self.serial_no is None:
1477
      self.serial_no = 1
1478

    
1479
    if self.alloc_policy is None:
1480
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1481

    
1482
    # We only update mtime, and not ctime, since we would not be able
1483
    # to provide a correct value for creation time.
1484
    if self.mtime is None:
1485
      self.mtime = time.time()
1486

    
1487
    if self.diskparams is None:
1488
      self.diskparams = {}
1489
    if self.ipolicy is None:
1490
      self.ipolicy = MakeEmptyIPolicy()
1491

    
1492
    if self.networks is None:
1493
      self.networks = {}
1494

    
1495
  def FillND(self, node):
1496
    """Return filled out ndparams for L{objects.Node}
1497

1498
    @type node: L{objects.Node}
1499
    @param node: A Node object to fill
1500
    @return a copy of the node's ndparams with defaults filled
1501

1502
    """
1503
    return self.SimpleFillND(node.ndparams)
1504

    
1505
  def SimpleFillND(self, ndparams):
1506
    """Fill a given ndparams dict with defaults.
1507

1508
    @type ndparams: dict
1509
    @param ndparams: the dict to fill
1510
    @rtype: dict
1511
    @return: a copy of the passed in ndparams with missing keys filled
1512
        from the node group defaults
1513

1514
    """
1515
    return FillDict(self.ndparams, ndparams)
1516

    
1517

    
1518
class Cluster(TaggableObject):
1519
  """Config object representing the cluster."""
1520
  __slots__ = [
1521
    "serial_no",
1522
    "rsahostkeypub",
1523
    "highest_used_port",
1524
    "tcpudp_port_pool",
1525
    "mac_prefix",
1526
    "volume_group_name",
1527
    "reserved_lvs",
1528
    "drbd_usermode_helper",
1529
    "default_bridge",
1530
    "default_hypervisor",
1531
    "master_node",
1532
    "master_ip",
1533
    "master_netdev",
1534
    "master_netmask",
1535
    "use_external_mip_script",
1536
    "cluster_name",
1537
    "file_storage_dir",
1538
    "shared_file_storage_dir",
1539
    "enabled_hypervisors",
1540
    "hvparams",
1541
    "ipolicy",
1542
    "os_hvp",
1543
    "beparams",
1544
    "osparams",
1545
    "nicparams",
1546
    "ndparams",
1547
    "diskparams",
1548
    "candidate_pool_size",
1549
    "modify_etc_hosts",
1550
    "modify_ssh_setup",
1551
    "maintain_node_health",
1552
    "uid_pool",
1553
    "default_iallocator",
1554
    "hidden_os",
1555
    "blacklisted_os",
1556
    "primary_ip_family",
1557
    "prealloc_wipe_disks",
1558
    "hv_state_static",
1559
    "disk_state_static",
1560
    "enabled_disk_templates",
1561
    ] + _TIMESTAMPS + _UUID
1562

    
1563
  def UpgradeConfig(self):
1564
    """Fill defaults for missing configuration values.
1565

1566
    """
1567
    # pylint: disable=E0203
1568
    # because these are "defined" via slots, not manually
1569
    if self.hvparams is None:
1570
      self.hvparams = constants.HVC_DEFAULTS
1571
    else:
1572
      for hypervisor in self.hvparams:
1573
        self.hvparams[hypervisor] = FillDict(
1574
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1575

    
1576
    if self.os_hvp is None:
1577
      self.os_hvp = {}
1578

    
1579
    # osparams added before 2.2
1580
    if self.osparams is None:
1581
      self.osparams = {}
1582

    
1583
    self.ndparams = UpgradeNDParams(self.ndparams)
1584

    
1585
    self.beparams = UpgradeGroupedParams(self.beparams,
1586
                                         constants.BEC_DEFAULTS)
1587
    for beparams_group in self.beparams:
1588
      UpgradeBeParams(self.beparams[beparams_group])
1589

    
1590
    migrate_default_bridge = not self.nicparams
1591
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1592
                                          constants.NICC_DEFAULTS)
1593
    if migrate_default_bridge:
1594
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1595
        self.default_bridge
1596

    
1597
    if self.modify_etc_hosts is None:
1598
      self.modify_etc_hosts = True
1599

    
1600
    if self.modify_ssh_setup is None:
1601
      self.modify_ssh_setup = True
1602

    
1603
    # default_bridge is no longer used in 2.1. The slot is left there to
1604
    # support auto-upgrading. It can be removed once we decide to deprecate
1605
    # upgrading straight from 2.0.
1606
    if self.default_bridge is not None:
1607
      self.default_bridge = None
1608

    
1609
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1610
    # code can be removed once upgrading straight from 2.0 is deprecated.
1611
    if self.default_hypervisor is not None:
1612
      self.enabled_hypervisors = ([self.default_hypervisor] +
1613
                                  [hvname for hvname in self.enabled_hypervisors
1614
                                   if hvname != self.default_hypervisor])
1615
      self.default_hypervisor = None
1616

    
1617
    # maintain_node_health added after 2.1.1
1618
    if self.maintain_node_health is None:
1619
      self.maintain_node_health = False
1620

    
1621
    if self.uid_pool is None:
1622
      self.uid_pool = []
1623

    
1624
    if self.default_iallocator is None:
1625
      self.default_iallocator = ""
1626

    
1627
    # reserved_lvs added before 2.2
1628
    if self.reserved_lvs is None:
1629
      self.reserved_lvs = []
1630

    
1631
    # hidden and blacklisted operating systems added before 2.2.1
1632
    if self.hidden_os is None:
1633
      self.hidden_os = []
1634

    
1635
    if self.blacklisted_os is None:
1636
      self.blacklisted_os = []
1637

    
1638
    # primary_ip_family added before 2.3
1639
    if self.primary_ip_family is None:
1640
      self.primary_ip_family = AF_INET
1641

    
1642
    if self.master_netmask is None:
1643
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1644
      self.master_netmask = ipcls.iplen
1645

    
1646
    if self.prealloc_wipe_disks is None:
1647
      self.prealloc_wipe_disks = False
1648

    
1649
    # shared_file_storage_dir added before 2.5
1650
    if self.shared_file_storage_dir is None:
1651
      self.shared_file_storage_dir = ""
1652

    
1653
    if self.use_external_mip_script is None:
1654
      self.use_external_mip_script = False
1655

    
1656
    if self.diskparams:
1657
      self.diskparams = UpgradeDiskParams(self.diskparams)
1658
    else:
1659
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1660

    
1661
    # instance policy added before 2.6
1662
    if self.ipolicy is None:
1663
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1664
    else:
1665
      # we can either make sure to upgrade the ipolicy always, or only
1666
      # do it in some corner cases (e.g. missing keys); note that this
1667
      # will break any removal of keys from the ipolicy dict
1668
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1669
      if wrongkeys:
1670
        # These keys would be silently removed by FillIPolicy()
1671
        msg = ("Cluster instance policy contains spurious keys: %s" %
1672
               utils.CommaJoin(wrongkeys))
1673
        raise errors.ConfigurationError(msg)
1674
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1675

    
1676
  @property
1677
  def primary_hypervisor(self):
1678
    """The first hypervisor is the primary.
1679

1680
    Useful, for example, for L{Node}'s hv/disk state.
1681

1682
    """
1683
    return self.enabled_hypervisors[0]
1684

    
1685
  def ToDict(self):
1686
    """Custom function for cluster.
1687

1688
    """
1689
    mydict = super(Cluster, self).ToDict()
1690

    
1691
    if self.tcpudp_port_pool is None:
1692
      tcpudp_port_pool = []
1693
    else:
1694
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1695

    
1696
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1697

    
1698
    return mydict
1699

    
1700
  @classmethod
1701
  def FromDict(cls, val):
1702
    """Custom function for cluster.
1703

1704
    """
1705
    obj = super(Cluster, cls).FromDict(val)
1706

    
1707
    if obj.tcpudp_port_pool is None:
1708
      obj.tcpudp_port_pool = set()
1709
    elif not isinstance(obj.tcpudp_port_pool, set):
1710
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1711

    
1712
    return obj
1713

    
1714
  def SimpleFillDP(self, diskparams):
1715
    """Fill a given diskparams dict with cluster defaults.
1716

1717
    @param diskparams: The diskparams
1718
    @return: The defaults dict
1719

1720
    """
1721
    return FillDiskParams(self.diskparams, diskparams)
1722

    
1723
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1724
    """Get the default hypervisor parameters for the cluster.
1725

1726
    @param hypervisor: the hypervisor name
1727
    @param os_name: if specified, we'll also update the defaults for this OS
1728
    @param skip_keys: if passed, list of keys not to use
1729
    @return: the defaults dict
1730

1731
    """
1732
    if skip_keys is None:
1733
      skip_keys = []
1734

    
1735
    fill_stack = [self.hvparams.get(hypervisor, {})]
1736
    if os_name is not None:
1737
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1738
      fill_stack.append(os_hvp)
1739

    
1740
    ret_dict = {}
1741
    for o_dict in fill_stack:
1742
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1743

    
1744
    return ret_dict
1745

    
1746
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1747
    """Fill a given hvparams dict with cluster defaults.
1748

1749
    @type hv_name: string
1750
    @param hv_name: the hypervisor to use
1751
    @type os_name: string
1752
    @param os_name: the OS to use for overriding the hypervisor defaults
1753
    @type skip_globals: boolean
1754
    @param skip_globals: if True, the global hypervisor parameters will
1755
        not be filled
1756
    @rtype: dict
1757
    @return: a copy of the given hvparams with missing keys filled from
1758
        the cluster defaults
1759

1760
    """
1761
    if skip_globals:
1762
      skip_keys = constants.HVC_GLOBALS
1763
    else:
1764
      skip_keys = []
1765

    
1766
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1767
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1768

    
1769
  def FillHV(self, instance, skip_globals=False):
1770
    """Fill an instance's hvparams dict with cluster defaults.
1771

1772
    @type instance: L{objects.Instance}
1773
    @param instance: the instance parameter to fill
1774
    @type skip_globals: boolean
1775
    @param skip_globals: if True, the global hypervisor parameters will
1776
        not be filled
1777
    @rtype: dict
1778
    @return: a copy of the instance's hvparams with missing keys filled from
1779
        the cluster defaults
1780

1781
    """
1782
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1783
                             instance.hvparams, skip_globals)
1784

    
1785
  def SimpleFillBE(self, beparams):
1786
    """Fill a given beparams dict with cluster defaults.
1787

1788
    @type beparams: dict
1789
    @param beparams: the dict to fill
1790
    @rtype: dict
1791
    @return: a copy of the passed in beparams with missing keys filled
1792
        from the cluster defaults
1793

1794
    """
1795
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1796

    
1797
  def FillBE(self, instance):
1798
    """Fill an instance's beparams dict with cluster defaults.
1799

1800
    @type instance: L{objects.Instance}
1801
    @param instance: the instance parameter to fill
1802
    @rtype: dict
1803
    @return: a copy of the instance's beparams with missing keys filled from
1804
        the cluster defaults
1805

1806
    """
1807
    return self.SimpleFillBE(instance.beparams)
1808

    
1809
  def SimpleFillNIC(self, nicparams):
1810
    """Fill a given nicparams dict with cluster defaults.
1811

1812
    @type nicparams: dict
1813
    @param nicparams: the dict to fill
1814
    @rtype: dict
1815
    @return: a copy of the passed in nicparams with missing keys filled
1816
        from the cluster defaults
1817

1818
    """
1819
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1820

    
1821
  def SimpleFillOS(self, os_name, os_params):
1822
    """Fill an instance's osparams dict with cluster defaults.
1823

1824
    @type os_name: string
1825
    @param os_name: the OS name to use
1826
    @type os_params: dict
1827
    @param os_params: the dict to fill with default values
1828
    @rtype: dict
1829
    @return: a copy of the instance's osparams with missing keys filled from
1830
        the cluster defaults
1831

1832
    """
1833
    name_only = os_name.split("+", 1)[0]
1834
    # base OS
1835
    result = self.osparams.get(name_only, {})
1836
    # OS with variant
1837
    result = FillDict(result, self.osparams.get(os_name, {}))
1838
    # specified params
1839
    return FillDict(result, os_params)
1840

    
1841
  @staticmethod
1842
  def SimpleFillHvState(hv_state):
1843
    """Fill an hv_state sub dict with cluster defaults.
1844

1845
    """
1846
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1847

    
1848
  @staticmethod
1849
  def SimpleFillDiskState(disk_state):
1850
    """Fill an disk_state sub dict with cluster defaults.
1851

1852
    """
1853
    return FillDict(constants.DS_DEFAULTS, disk_state)
1854

    
1855
  def FillND(self, node, nodegroup):
1856
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1857

1858
    @type node: L{objects.Node}
1859
    @param node: A Node object to fill
1860
    @type nodegroup: L{objects.NodeGroup}
1861
    @param nodegroup: A Node object to fill
1862
    @return a copy of the node's ndparams with defaults filled
1863

1864
    """
1865
    return self.SimpleFillND(nodegroup.FillND(node))
1866

    
1867
  def SimpleFillND(self, ndparams):
1868
    """Fill a given ndparams dict with defaults.
1869

1870
    @type ndparams: dict
1871
    @param ndparams: the dict to fill
1872
    @rtype: dict
1873
    @return: a copy of the passed in ndparams with missing keys filled
1874
        from the cluster defaults
1875

1876
    """
1877
    return FillDict(self.ndparams, ndparams)
1878

    
1879
  def SimpleFillIPolicy(self, ipolicy):
1880
    """ Fill instance policy dict with defaults.
1881

1882
    @type ipolicy: dict
1883
    @param ipolicy: the dict to fill
1884
    @rtype: dict
1885
    @return: a copy of passed ipolicy with missing keys filled from
1886
      the cluster defaults
1887

1888
    """
1889
    return FillIPolicy(self.ipolicy, ipolicy)
1890

    
1891

    
1892
class BlockDevStatus(ConfigObject):
1893
  """Config object representing the status of a block device."""
1894
  __slots__ = [
1895
    "dev_path",
1896
    "major",
1897
    "minor",
1898
    "sync_percent",
1899
    "estimated_time",
1900
    "is_degraded",
1901
    "ldisk_status",
1902
    ]
1903

    
1904

    
1905
class ImportExportStatus(ConfigObject):
1906
  """Config object representing the status of an import or export."""
1907
  __slots__ = [
1908
    "recent_output",
1909
    "listen_port",
1910
    "connected",
1911
    "progress_mbytes",
1912
    "progress_throughput",
1913
    "progress_eta",
1914
    "progress_percent",
1915
    "exit_status",
1916
    "error_message",
1917
    ] + _TIMESTAMPS
1918

    
1919

    
1920
class ImportExportOptions(ConfigObject):
1921
  """Options for import/export daemon
1922

1923
  @ivar key_name: X509 key name (None for cluster certificate)
1924
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1925
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1926
  @ivar magic: Used to ensure the connection goes to the right disk
1927
  @ivar ipv6: Whether to use IPv6
1928
  @ivar connect_timeout: Number of seconds for establishing connection
1929

1930
  """
1931
  __slots__ = [
1932
    "key_name",
1933
    "ca_pem",
1934
    "compress",
1935
    "magic",
1936
    "ipv6",
1937
    "connect_timeout",
1938
    ]
1939

    
1940

    
1941
class ConfdRequest(ConfigObject):
1942
  """Object holding a confd request.
1943

1944
  @ivar protocol: confd protocol version
1945
  @ivar type: confd query type
1946
  @ivar query: query request
1947
  @ivar rsalt: requested reply salt
1948

1949
  """
1950
  __slots__ = [
1951
    "protocol",
1952
    "type",
1953
    "query",
1954
    "rsalt",
1955
    ]
1956

    
1957

    
1958
class ConfdReply(ConfigObject):
1959
  """Object holding a confd reply.
1960

1961
  @ivar protocol: confd protocol version
1962
  @ivar status: reply status code (ok, error)
1963
  @ivar answer: confd query reply
1964
  @ivar serial: configuration serial number
1965

1966
  """
1967
  __slots__ = [
1968
    "protocol",
1969
    "status",
1970
    "answer",
1971
    "serial",
1972
    ]
1973

    
1974

    
1975
class QueryFieldDefinition(ConfigObject):
1976
  """Object holding a query field definition.
1977

1978
  @ivar name: Field name
1979
  @ivar title: Human-readable title
1980
  @ivar kind: Field type
1981
  @ivar doc: Human-readable description
1982

1983
  """
1984
  __slots__ = [
1985
    "name",
1986
    "title",
1987
    "kind",
1988
    "doc",
1989
    ]
1990

    
1991

    
1992
class _QueryResponseBase(ConfigObject):
1993
  __slots__ = [
1994
    "fields",
1995
    ]
1996

    
1997
  def ToDict(self):
1998
    """Custom function for serializing.
1999

2000
    """
2001
    mydict = super(_QueryResponseBase, self).ToDict()
2002
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2003
    return mydict
2004

    
2005
  @classmethod
2006
  def FromDict(cls, val):
2007
    """Custom function for de-serializing.
2008

2009
    """
2010
    obj = super(_QueryResponseBase, cls).FromDict(val)
2011
    obj.fields = \
2012
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2013
    return obj
2014

    
2015

    
2016
class QueryResponse(_QueryResponseBase):
2017
  """Object holding the response to a query.
2018

2019
  @ivar fields: List of L{QueryFieldDefinition} objects
2020
  @ivar data: Requested data
2021

2022
  """
2023
  __slots__ = [
2024
    "data",
2025
    ]
2026

    
2027

    
2028
class QueryFieldsRequest(ConfigObject):
2029
  """Object holding a request for querying available fields.
2030

2031
  """
2032
  __slots__ = [
2033
    "what",
2034
    "fields",
2035
    ]
2036

    
2037

    
2038
class QueryFieldsResponse(_QueryResponseBase):
2039
  """Object holding the response to a query for fields.
2040

2041
  @ivar fields: List of L{QueryFieldDefinition} objects
2042

2043
  """
2044
  __slots__ = []
2045

    
2046

    
2047
class MigrationStatus(ConfigObject):
2048
  """Object holding the status of a migration.
2049

2050
  """
2051
  __slots__ = [
2052
    "status",
2053
    "transferred_ram",
2054
    "total_ram",
2055
    ]
2056

    
2057

    
2058
class InstanceConsole(ConfigObject):
2059
  """Object describing how to access the console of an instance.
2060

2061
  """
2062
  __slots__ = [
2063
    "instance",
2064
    "kind",
2065
    "message",
2066
    "host",
2067
    "port",
2068
    "user",
2069
    "command",
2070
    "display",
2071
    ]
2072

    
2073
  def Validate(self):
2074
    """Validates contents of this object.
2075

2076
    """
2077
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2078
    assert self.instance, "Missing instance name"
2079
    assert self.message or self.kind in [constants.CONS_SSH,
2080
                                         constants.CONS_SPICE,
2081
                                         constants.CONS_VNC]
2082
    assert self.host or self.kind == constants.CONS_MESSAGE
2083
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2084
                                      constants.CONS_SSH]
2085
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2086
                                      constants.CONS_SPICE,
2087
                                      constants.CONS_VNC]
2088
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2089
                                         constants.CONS_SPICE,
2090
                                         constants.CONS_VNC]
2091
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2092
                                         constants.CONS_SPICE,
2093
                                         constants.CONS_SSH]
2094
    return True
2095

    
2096

    
2097
class Network(TaggableObject):
2098
  """Object representing a network definition for ganeti.
2099

2100
  """
2101
  __slots__ = [
2102
    "name",
2103
    "serial_no",
2104
    "mac_prefix",
2105
    "network",
2106
    "network6",
2107
    "gateway",
2108
    "gateway6",
2109
    "reservations",
2110
    "ext_reservations",
2111
    ] + _TIMESTAMPS + _UUID
2112

    
2113
  def HooksDict(self, prefix=""):
2114
    """Export a dictionary used by hooks with a network's information.
2115

2116
    @type prefix: String
2117
    @param prefix: Prefix to prepend to the dict entries
2118

2119
    """
2120
    result = {
2121
      "%sNETWORK_NAME" % prefix: self.name,
2122
      "%sNETWORK_UUID" % prefix: self.uuid,
2123
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2124
    }
2125
    if self.network:
2126
      result["%sNETWORK_SUBNET" % prefix] = self.network
2127
    if self.gateway:
2128
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2129
    if self.network6:
2130
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2131
    if self.gateway6:
2132
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2133
    if self.mac_prefix:
2134
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2135

    
2136
    return result
2137

    
2138
  @classmethod
2139
  def FromDict(cls, val):
2140
    """Custom function for networks.
2141

2142
    Remove deprecated network_type and family.
2143

2144
    """
2145
    if "network_type" in val:
2146
      del val["network_type"]
2147
    if "family" in val:
2148
      del val["family"]
2149
    obj = super(Network, cls).FromDict(val)
2150
    return obj
2151

    
2152

    
2153
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2154
  """Simple wrapper over ConfigParse that allows serialization.
2155

2156
  This class is basically ConfigParser.SafeConfigParser with two
2157
  additional methods that allow it to serialize/unserialize to/from a
2158
  buffer.
2159

2160
  """
2161
  def Dumps(self):
2162
    """Dump this instance and return the string representation."""
2163
    buf = StringIO()
2164
    self.write(buf)
2165
    return buf.getvalue()
2166

    
2167
  @classmethod
2168
  def Loads(cls, data):
2169
    """Load data from a string."""
2170
    buf = StringIO(data)
2171
    cfp = cls()
2172
    cfp.readfp(buf)
2173
    return cfp
2174

    
2175

    
2176
class LvmPvInfo(ConfigObject):
2177
  """Information about an LVM physical volume (PV).
2178

2179
  @type name: string
2180
  @ivar name: name of the PV
2181
  @type vg_name: string
2182
  @ivar vg_name: name of the volume group containing the PV
2183
  @type size: float
2184
  @ivar size: size of the PV in MiB
2185
  @type free: float
2186
  @ivar free: free space in the PV, in MiB
2187
  @type attributes: string
2188
  @ivar attributes: PV attributes
2189
  @type lv_list: list of strings
2190
  @ivar lv_list: names of the LVs hosted on the PV
2191
  """
2192
  __slots__ = [
2193
    "name",
2194
    "vg_name",
2195
    "size",
2196
    "free",
2197
    "attributes",
2198
    "lv_list"
2199
    ]
2200

    
2201
  def IsEmpty(self):
2202
    """Is this PV empty?
2203

2204
    """
2205
    return self.size <= (self.free + 1)
2206

    
2207
  def IsAllocatable(self):
2208
    """Is this PV allocatable?
2209

2210
    """
2211
    return ("a" in self.attributes)