Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ da5f09ef

History | View | Annotate | Download (62.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def _FillMinMaxISpecs(default_specs, custom_specs):
86
  assert frozenset(default_specs.keys()) == constants.ISPECS_MINMAX_KEYS
87
  ret_specs = {}
88
  for key in constants.ISPECS_MINMAX_KEYS:
89
    ret_specs[key] = FillDict(default_specs[key],
90
                              custom_specs.get(key, {}))
91
  return ret_specs
92

    
93

    
94
def FillIPolicy(default_ipolicy, custom_ipolicy):
95
  """Fills an instance policy with defaults.
96

97
  """
98
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
99
  ret_dict = {}
100
  # Instance specs
101
  new_mm = _FillMinMaxISpecs(default_ipolicy[constants.ISPECS_MINMAX],
102
                             custom_ipolicy.get(constants.ISPECS_MINMAX, {}))
103
  ret_dict[constants.ISPECS_MINMAX] = new_mm
104
  new_std = FillDict(default_ipolicy[constants.ISPECS_STD],
105
                     custom_ipolicy.get(constants.ISPECS_STD, {}))
106
  ret_dict[constants.ISPECS_STD] = new_std
107
  # list items
108
  for key in [constants.IPOLICY_DTS]:
109
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
110
  # other items which we know we can directly copy (immutables)
111
  for key in constants.IPOLICY_PARAMETERS:
112
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
113

    
114
  return ret_dict
115

    
116

    
117
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
118
  """Fills the disk parameter defaults.
119

120
  @see: L{FillDict} for parameters and return value
121

122
  """
123
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
124

    
125
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
126
                             skip_keys=skip_keys))
127
              for dt in constants.DISK_TEMPLATES)
128

    
129

    
130
def UpgradeGroupedParams(target, defaults):
131
  """Update all groups for the target parameter.
132

133
  @type target: dict of dicts
134
  @param target: {group: {parameter: value}}
135
  @type defaults: dict
136
  @param defaults: default parameter values
137

138
  """
139
  if target is None:
140
    target = {constants.PP_DEFAULT: defaults}
141
  else:
142
    for group in target:
143
      target[group] = FillDict(defaults, target[group])
144
  return target
145

    
146

    
147
def UpgradeBeParams(target):
148
  """Update the be parameters dict to the new format.
149

150
  @type target: dict
151
  @param target: "be" parameters dict
152

153
  """
154
  if constants.BE_MEMORY in target:
155
    memory = target[constants.BE_MEMORY]
156
    target[constants.BE_MAXMEM] = memory
157
    target[constants.BE_MINMEM] = memory
158
    del target[constants.BE_MEMORY]
159

    
160

    
161
def UpgradeDiskParams(diskparams):
162
  """Upgrade the disk parameters.
163

164
  @type diskparams: dict
165
  @param diskparams: disk parameters to upgrade
166
  @rtype: dict
167
  @return: the upgraded disk parameters dict
168

169
  """
170
  if not diskparams:
171
    result = {}
172
  else:
173
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
174

    
175
  return result
176

    
177

    
178
def UpgradeNDParams(ndparams):
179
  """Upgrade ndparams structure.
180

181
  @type ndparams: dict
182
  @param ndparams: disk parameters to upgrade
183
  @rtype: dict
184
  @return: the upgraded node parameters dict
185

186
  """
187
  if ndparams is None:
188
    ndparams = {}
189

    
190
  if (constants.ND_OOB_PROGRAM in ndparams and
191
      ndparams[constants.ND_OOB_PROGRAM] is None):
192
    # will be reset by the line below
193
    del ndparams[constants.ND_OOB_PROGRAM]
194
  return FillDict(constants.NDC_DEFAULTS, ndparams)
195

    
196

    
197
def MakeEmptyIPolicy():
198
  """Create empty IPolicy dictionary.
199

200
  """
201
  return {
202
    constants.ISPECS_MINMAX: {
203
      constants.ISPECS_MIN: {},
204
      constants.ISPECS_MAX: {},
205
      },
206
    constants.ISPECS_STD: {},
207
    }
208

    
209

    
210
class ConfigObject(outils.ValidatedSlots):
211
  """A generic config object.
212

213
  It has the following properties:
214

215
    - provides somewhat safe recursive unpickling and pickling for its classes
216
    - unset attributes which are defined in slots are always returned
217
      as None instead of raising an error
218

219
  Classes derived from this must always declare __slots__ (we use many
220
  config objects and the memory reduction is useful)
221

222
  """
223
  __slots__ = []
224

    
225
  def __getattr__(self, name):
226
    if name not in self.GetAllSlots():
227
      raise AttributeError("Invalid object attribute %s.%s" %
228
                           (type(self).__name__, name))
229
    return None
230

    
231
  def __setstate__(self, state):
232
    slots = self.GetAllSlots()
233
    for name in state:
234
      if name in slots:
235
        setattr(self, name, state[name])
236

    
237
  def Validate(self):
238
    """Validates the slots.
239

240
    """
241

    
242
  def ToDict(self):
243
    """Convert to a dict holding only standard python types.
244

245
    The generic routine just dumps all of this object's attributes in
246
    a dict. It does not work if the class has children who are
247
    ConfigObjects themselves (e.g. the nics list in an Instance), in
248
    which case the object should subclass the function in order to
249
    make sure all objects returned are only standard python types.
250

251
    """
252
    result = {}
253
    for name in self.GetAllSlots():
254
      value = getattr(self, name, None)
255
      if value is not None:
256
        result[name] = value
257
    return result
258

    
259
  __getstate__ = ToDict
260

    
261
  @classmethod
262
  def FromDict(cls, val):
263
    """Create an object from a dictionary.
264

265
    This generic routine takes a dict, instantiates a new instance of
266
    the given class, and sets attributes based on the dict content.
267

268
    As for `ToDict`, this does not work if the class has children
269
    who are ConfigObjects themselves (e.g. the nics list in an
270
    Instance), in which case the object should subclass the function
271
    and alter the objects.
272

273
    """
274
    if not isinstance(val, dict):
275
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
276
                                      " expected dict, got %s" % type(val))
277
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
278
    obj = cls(**val_str) # pylint: disable=W0142
279
    return obj
280

    
281
  def Copy(self):
282
    """Makes a deep copy of the current object and its children.
283

284
    """
285
    dict_form = self.ToDict()
286
    clone_obj = self.__class__.FromDict(dict_form)
287
    return clone_obj
288

    
289
  def __repr__(self):
290
    """Implement __repr__ for ConfigObjects."""
291
    return repr(self.ToDict())
292

    
293
  def UpgradeConfig(self):
294
    """Fill defaults for missing configuration values.
295

296
    This method will be called at configuration load time, and its
297
    implementation will be object dependent.
298

299
    """
300
    pass
301

    
302

    
303
class TaggableObject(ConfigObject):
304
  """An generic class supporting tags.
305

306
  """
307
  __slots__ = ["tags"]
308
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
309

    
310
  @classmethod
311
  def ValidateTag(cls, tag):
312
    """Check if a tag is valid.
313

314
    If the tag is invalid, an errors.TagError will be raised. The
315
    function has no return value.
316

317
    """
318
    if not isinstance(tag, basestring):
319
      raise errors.TagError("Invalid tag type (not a string)")
320
    if len(tag) > constants.MAX_TAG_LEN:
321
      raise errors.TagError("Tag too long (>%d characters)" %
322
                            constants.MAX_TAG_LEN)
323
    if not tag:
324
      raise errors.TagError("Tags cannot be empty")
325
    if not cls.VALID_TAG_RE.match(tag):
326
      raise errors.TagError("Tag contains invalid characters")
327

    
328
  def GetTags(self):
329
    """Return the tags list.
330

331
    """
332
    tags = getattr(self, "tags", None)
333
    if tags is None:
334
      tags = self.tags = set()
335
    return tags
336

    
337
  def AddTag(self, tag):
338
    """Add a new tag.
339

340
    """
341
    self.ValidateTag(tag)
342
    tags = self.GetTags()
343
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
344
      raise errors.TagError("Too many tags")
345
    self.GetTags().add(tag)
346

    
347
  def RemoveTag(self, tag):
348
    """Remove a tag.
349

350
    """
351
    self.ValidateTag(tag)
352
    tags = self.GetTags()
353
    try:
354
      tags.remove(tag)
355
    except KeyError:
356
      raise errors.TagError("Tag not found")
357

    
358
  def ToDict(self):
359
    """Taggable-object-specific conversion to standard python types.
360

361
    This replaces the tags set with a list.
362

363
    """
364
    bo = super(TaggableObject, self).ToDict()
365

    
366
    tags = bo.get("tags", None)
367
    if isinstance(tags, set):
368
      bo["tags"] = list(tags)
369
    return bo
370

    
371
  @classmethod
372
  def FromDict(cls, val):
373
    """Custom function for instances.
374

375
    """
376
    obj = super(TaggableObject, cls).FromDict(val)
377
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
378
      obj.tags = set(obj.tags)
379
    return obj
380

    
381

    
382
class MasterNetworkParameters(ConfigObject):
383
  """Network configuration parameters for the master
384

385
  @ivar name: master name
386
  @ivar ip: master IP
387
  @ivar netmask: master netmask
388
  @ivar netdev: master network device
389
  @ivar ip_family: master IP family
390

391
  """
392
  __slots__ = [
393
    "name",
394
    "ip",
395
    "netmask",
396
    "netdev",
397
    "ip_family",
398
    ]
399

    
400

    
401
class ConfigData(ConfigObject):
402
  """Top-level config object."""
403
  __slots__ = [
404
    "version",
405
    "cluster",
406
    "nodes",
407
    "nodegroups",
408
    "instances",
409
    "networks",
410
    "serial_no",
411
    ] + _TIMESTAMPS
412

    
413
  def ToDict(self):
414
    """Custom function for top-level config data.
415

416
    This just replaces the list of instances, nodes and the cluster
417
    with standard python types.
418

419
    """
420
    mydict = super(ConfigData, self).ToDict()
421
    mydict["cluster"] = mydict["cluster"].ToDict()
422
    for key in "nodes", "instances", "nodegroups", "networks":
423
      mydict[key] = outils.ContainerToDicts(mydict[key])
424

    
425
    return mydict
426

    
427
  @classmethod
428
  def FromDict(cls, val):
429
    """Custom function for top-level config data
430

431
    """
432
    obj = super(ConfigData, cls).FromDict(val)
433
    obj.cluster = Cluster.FromDict(obj.cluster)
434
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
435
    obj.instances = \
436
      outils.ContainerFromDicts(obj.instances, dict, Instance)
437
    obj.nodegroups = \
438
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
439
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
440
    return obj
441

    
442
  def HasAnyDiskOfType(self, dev_type):
443
    """Check if in there is at disk of the given type in the configuration.
444

445
    @type dev_type: L{constants.LDS_BLOCK}
446
    @param dev_type: the type to look for
447
    @rtype: boolean
448
    @return: boolean indicating if a disk of the given type was found or not
449

450
    """
451
    for instance in self.instances.values():
452
      for disk in instance.disks:
453
        if disk.IsBasedOnDiskType(dev_type):
454
          return True
455
    return False
456

    
457
  def UpgradeConfig(self):
458
    """Fill defaults for missing configuration values.
459

460
    """
461
    self.cluster.UpgradeConfig()
462
    for node in self.nodes.values():
463
      node.UpgradeConfig()
464
    for instance in self.instances.values():
465
      instance.UpgradeConfig()
466
    if self.nodegroups is None:
467
      self.nodegroups = {}
468
    for nodegroup in self.nodegroups.values():
469
      nodegroup.UpgradeConfig()
470
    if self.cluster.drbd_usermode_helper is None:
471
      # To decide if we set an helper let's check if at least one instance has
472
      # a DRBD disk. This does not cover all the possible scenarios but it
473
      # gives a good approximation.
474
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
475
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
476
    if self.networks is None:
477
      self.networks = {}
478
    for network in self.networks.values():
479
      network.UpgradeConfig()
480
    self._UpgradeStorageTypes()
481

    
482
  def _UpgradeStorageTypes(self):
483
    """Upgrade the cluster's enabled storage types by inspecting the currently
484
       enabled and/or used storage types.
485

486
    """
487
    # enabled_storage_types in the cluster config were introduced in 2.8. Remove
488
    # this code once upgrading from earlier versions is deprecated.
489
    if not self.cluster.enabled_storage_types:
490
      storage_type_set = \
491
        set([constants.DISK_TEMPLATES_STORAGE_TYPE[inst.disk_template]
492
               for inst in self.instances.values()])
493
      # Add lvm, file and shared file storage, if they are enabled, even though
494
      # they might currently not be used.
495
      if self.cluster.volume_group_name:
496
        storage_type_set.add(constants.ST_LVM_VG)
497
      # FIXME: Adapt this when dis/enabling at configure time is removed.
498
      if constants.ENABLE_FILE_STORAGE:
499
        storage_type_set.add(constants.ST_FILE)
500
      if constants.ENABLE_SHARED_FILE_STORAGE:
501
        storage_type_set.add(constants.ST_SHARED_FILE)
502
      # Set enabled_storage_types to the inferred storage types. Order them
503
      # according to a preference list that is based on Ganeti's history of
504
      # supported storage types.
505
      self.cluster.enabled_storage_types = []
506
      for preferred_type in constants.STORAGE_TYPES_PREFERENCE:
507
        if preferred_type in storage_type_set:
508
          self.cluster.enabled_storage_types.append(preferred_type)
509
          storage_type_set.remove(preferred_type)
510
      self.cluster.enabled_storage_types.extend(list(storage_type_set))
511

    
512

    
513
class NIC(ConfigObject):
514
  """Config object representing a network card."""
515
  __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
516

    
517
  @classmethod
518
  def CheckParameterSyntax(cls, nicparams):
519
    """Check the given parameters for validity.
520

521
    @type nicparams:  dict
522
    @param nicparams: dictionary with parameter names/value
523
    @raise errors.ConfigurationError: when a parameter is not valid
524

525
    """
526
    mode = nicparams[constants.NIC_MODE]
527
    if (mode not in constants.NIC_VALID_MODES and
528
        mode != constants.VALUE_AUTO):
529
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
530

    
531
    if (mode == constants.NIC_MODE_BRIDGED and
532
        not nicparams[constants.NIC_LINK]):
533
      raise errors.ConfigurationError("Missing bridged NIC link")
534

    
535

    
536
class Disk(ConfigObject):
537
  """Config object representing a block device."""
538
  __slots__ = ["dev_type", "logical_id", "physical_id",
539
               "children", "iv_name", "size", "mode", "params"]
540

    
541
  def CreateOnSecondary(self):
542
    """Test if this device needs to be created on a secondary node."""
543
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
544

    
545
  def AssembleOnSecondary(self):
546
    """Test if this device needs to be assembled on a secondary node."""
547
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
548

    
549
  def OpenOnSecondary(self):
550
    """Test if this device needs to be opened on a secondary node."""
551
    return self.dev_type in (constants.LD_LV,)
552

    
553
  def StaticDevPath(self):
554
    """Return the device path if this device type has a static one.
555

556
    Some devices (LVM for example) live always at the same /dev/ path,
557
    irrespective of their status. For such devices, we return this
558
    path, for others we return None.
559

560
    @warning: The path returned is not a normalized pathname; callers
561
        should check that it is a valid path.
562

563
    """
564
    if self.dev_type == constants.LD_LV:
565
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
566
    elif self.dev_type == constants.LD_BLOCKDEV:
567
      return self.logical_id[1]
568
    elif self.dev_type == constants.LD_RBD:
569
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
570
    return None
571

    
572
  def ChildrenNeeded(self):
573
    """Compute the needed number of children for activation.
574

575
    This method will return either -1 (all children) or a positive
576
    number denoting the minimum number of children needed for
577
    activation (only mirrored devices will usually return >=0).
578

579
    Currently, only DRBD8 supports diskless activation (therefore we
580
    return 0), for all other we keep the previous semantics and return
581
    -1.
582

583
    """
584
    if self.dev_type == constants.LD_DRBD8:
585
      return 0
586
    return -1
587

    
588
  def IsBasedOnDiskType(self, dev_type):
589
    """Check if the disk or its children are based on the given type.
590

591
    @type dev_type: L{constants.LDS_BLOCK}
592
    @param dev_type: the type to look for
593
    @rtype: boolean
594
    @return: boolean indicating if a device of the given type was found or not
595

596
    """
597
    if self.children:
598
      for child in self.children:
599
        if child.IsBasedOnDiskType(dev_type):
600
          return True
601
    return self.dev_type == dev_type
602

    
603
  def GetNodes(self, node):
604
    """This function returns the nodes this device lives on.
605

606
    Given the node on which the parent of the device lives on (or, in
607
    case of a top-level device, the primary node of the devices'
608
    instance), this function will return a list of nodes on which this
609
    devices needs to (or can) be assembled.
610

611
    """
612
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
613
                         constants.LD_BLOCKDEV, constants.LD_RBD,
614
                         constants.LD_EXT]:
615
      result = [node]
616
    elif self.dev_type in constants.LDS_DRBD:
617
      result = [self.logical_id[0], self.logical_id[1]]
618
      if node not in result:
619
        raise errors.ConfigurationError("DRBD device passed unknown node")
620
    else:
621
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
622
    return result
623

    
624
  def ComputeNodeTree(self, parent_node):
625
    """Compute the node/disk tree for this disk and its children.
626

627
    This method, given the node on which the parent disk lives, will
628
    return the list of all (node, disk) pairs which describe the disk
629
    tree in the most compact way. For example, a drbd/lvm stack
630
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
631
    which represents all the top-level devices on the nodes.
632

633
    """
634
    my_nodes = self.GetNodes(parent_node)
635
    result = [(node, self) for node in my_nodes]
636
    if not self.children:
637
      # leaf device
638
      return result
639
    for node in my_nodes:
640
      for child in self.children:
641
        child_result = child.ComputeNodeTree(node)
642
        if len(child_result) == 1:
643
          # child (and all its descendants) is simple, doesn't split
644
          # over multiple hosts, so we don't need to describe it, our
645
          # own entry for this node describes it completely
646
          continue
647
        else:
648
          # check if child nodes differ from my nodes; note that
649
          # subdisk can differ from the child itself, and be instead
650
          # one of its descendants
651
          for subnode, subdisk in child_result:
652
            if subnode not in my_nodes:
653
              result.append((subnode, subdisk))
654
            # otherwise child is under our own node, so we ignore this
655
            # entry (but probably the other results in the list will
656
            # be different)
657
    return result
658

    
659
  def ComputeGrowth(self, amount):
660
    """Compute the per-VG growth requirements.
661

662
    This only works for VG-based disks.
663

664
    @type amount: integer
665
    @param amount: the desired increase in (user-visible) disk space
666
    @rtype: dict
667
    @return: a dictionary of volume-groups and the required size
668

669
    """
670
    if self.dev_type == constants.LD_LV:
671
      return {self.logical_id[0]: amount}
672
    elif self.dev_type == constants.LD_DRBD8:
673
      if self.children:
674
        return self.children[0].ComputeGrowth(amount)
675
      else:
676
        return {}
677
    else:
678
      # Other disk types do not require VG space
679
      return {}
680

    
681
  def RecordGrow(self, amount):
682
    """Update the size of this disk after growth.
683

684
    This method recurses over the disks's children and updates their
685
    size correspondigly. The method needs to be kept in sync with the
686
    actual algorithms from bdev.
687

688
    """
689
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
690
                         constants.LD_RBD, constants.LD_EXT):
691
      self.size += amount
692
    elif self.dev_type == constants.LD_DRBD8:
693
      if self.children:
694
        self.children[0].RecordGrow(amount)
695
      self.size += amount
696
    else:
697
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
698
                                   " disk type %s" % self.dev_type)
699

    
700
  def Update(self, size=None, mode=None):
701
    """Apply changes to size and mode.
702

703
    """
704
    if self.dev_type == constants.LD_DRBD8:
705
      if self.children:
706
        self.children[0].Update(size=size, mode=mode)
707
    else:
708
      assert not self.children
709

    
710
    if size is not None:
711
      self.size = size
712
    if mode is not None:
713
      self.mode = mode
714

    
715
  def UnsetSize(self):
716
    """Sets recursively the size to zero for the disk and its children.
717

718
    """
719
    if self.children:
720
      for child in self.children:
721
        child.UnsetSize()
722
    self.size = 0
723

    
724
  def SetPhysicalID(self, target_node, nodes_ip):
725
    """Convert the logical ID to the physical ID.
726

727
    This is used only for drbd, which needs ip/port configuration.
728

729
    The routine descends down and updates its children also, because
730
    this helps when the only the top device is passed to the remote
731
    node.
732

733
    Arguments:
734
      - target_node: the node we wish to configure for
735
      - nodes_ip: a mapping of node name to ip
736

737
    The target_node must exist in in nodes_ip, and must be one of the
738
    nodes in the logical ID for each of the DRBD devices encountered
739
    in the disk tree.
740

741
    """
742
    if self.children:
743
      for child in self.children:
744
        child.SetPhysicalID(target_node, nodes_ip)
745

    
746
    if self.logical_id is None and self.physical_id is not None:
747
      return
748
    if self.dev_type in constants.LDS_DRBD:
749
      pnode, snode, port, pminor, sminor, secret = self.logical_id
750
      if target_node not in (pnode, snode):
751
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
752
                                        target_node)
753
      pnode_ip = nodes_ip.get(pnode, None)
754
      snode_ip = nodes_ip.get(snode, None)
755
      if pnode_ip is None or snode_ip is None:
756
        raise errors.ConfigurationError("Can't find primary or secondary node"
757
                                        " for %s" % str(self))
758
      p_data = (pnode_ip, port)
759
      s_data = (snode_ip, port)
760
      if pnode == target_node:
761
        self.physical_id = p_data + s_data + (pminor, secret)
762
      else: # it must be secondary, we tested above
763
        self.physical_id = s_data + p_data + (sminor, secret)
764
    else:
765
      self.physical_id = self.logical_id
766
    return
767

    
768
  def ToDict(self):
769
    """Disk-specific conversion to standard python types.
770

771
    This replaces the children lists of objects with lists of
772
    standard python types.
773

774
    """
775
    bo = super(Disk, self).ToDict()
776

    
777
    for attr in ("children",):
778
      alist = bo.get(attr, None)
779
      if alist:
780
        bo[attr] = outils.ContainerToDicts(alist)
781
    return bo
782

    
783
  @classmethod
784
  def FromDict(cls, val):
785
    """Custom function for Disks
786

787
    """
788
    obj = super(Disk, cls).FromDict(val)
789
    if obj.children:
790
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
791
    if obj.logical_id and isinstance(obj.logical_id, list):
792
      obj.logical_id = tuple(obj.logical_id)
793
    if obj.physical_id and isinstance(obj.physical_id, list):
794
      obj.physical_id = tuple(obj.physical_id)
795
    if obj.dev_type in constants.LDS_DRBD:
796
      # we need a tuple of length six here
797
      if len(obj.logical_id) < 6:
798
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
799
    return obj
800

    
801
  def __str__(self):
802
    """Custom str() formatter for disks.
803

804
    """
805
    if self.dev_type == constants.LD_LV:
806
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
807
    elif self.dev_type in constants.LDS_DRBD:
808
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
809
      val = "<DRBD8("
810
      if self.physical_id is None:
811
        phy = "unconfigured"
812
      else:
813
        phy = ("configured as %s:%s %s:%s" %
814
               (self.physical_id[0], self.physical_id[1],
815
                self.physical_id[2], self.physical_id[3]))
816

    
817
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
818
              (node_a, minor_a, node_b, minor_b, port, phy))
819
      if self.children and self.children.count(None) == 0:
820
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
821
      else:
822
        val += "no local storage"
823
    else:
824
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
825
             (self.dev_type, self.logical_id, self.physical_id, self.children))
826
    if self.iv_name is None:
827
      val += ", not visible"
828
    else:
829
      val += ", visible as /dev/%s" % self.iv_name
830
    if isinstance(self.size, int):
831
      val += ", size=%dm)>" % self.size
832
    else:
833
      val += ", size='%s')>" % (self.size,)
834
    return val
835

    
836
  def Verify(self):
837
    """Checks that this disk is correctly configured.
838

839
    """
840
    all_errors = []
841
    if self.mode not in constants.DISK_ACCESS_SET:
842
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
843
    return all_errors
844

    
845
  def UpgradeConfig(self):
846
    """Fill defaults for missing configuration values.
847

848
    """
849
    if self.children:
850
      for child in self.children:
851
        child.UpgradeConfig()
852

    
853
    # FIXME: Make this configurable in Ganeti 2.7
854
    self.params = {}
855
    # add here config upgrade for this disk
856

    
857
  @staticmethod
858
  def ComputeLDParams(disk_template, disk_params):
859
    """Computes Logical Disk parameters from Disk Template parameters.
860

861
    @type disk_template: string
862
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
863
    @type disk_params: dict
864
    @param disk_params: disk template parameters;
865
                        dict(template_name -> parameters
866
    @rtype: list(dict)
867
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
868
      contains the LD parameters of the node. The tree is flattened in-order.
869

870
    """
871
    if disk_template not in constants.DISK_TEMPLATES:
872
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
873

    
874
    assert disk_template in disk_params
875

    
876
    result = list()
877
    dt_params = disk_params[disk_template]
878
    if disk_template == constants.DT_DRBD8:
879
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
880
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
881
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
882
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
883
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
884
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
885
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
886
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
887
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
888
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
889
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
890
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
891
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
892
        }))
893

    
894
      # data LV
895
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
896
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
897
        }))
898

    
899
      # metadata LV
900
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
901
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
902
        }))
903

    
904
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
905
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
906

    
907
    elif disk_template == constants.DT_PLAIN:
908
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
909
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
910
        }))
911

    
912
    elif disk_template == constants.DT_BLOCK:
913
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
914

    
915
    elif disk_template == constants.DT_RBD:
916
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
917
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
918
        }))
919

    
920
    elif disk_template == constants.DT_EXT:
921
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
922

    
923
    return result
924

    
925

    
926
class InstancePolicy(ConfigObject):
927
  """Config object representing instance policy limits dictionary.
928

929
  Note that this object is not actually used in the config, it's just
930
  used as a placeholder for a few functions.
931

932
  """
933
  @classmethod
934
  def CheckParameterSyntax(cls, ipolicy, check_std):
935
    """ Check the instance policy for validity.
936

937
    @type ipolicy: dict
938
    @param ipolicy: dictionary with min/max/std specs and policies
939
    @type check_std: bool
940
    @param check_std: Whether to check std value or just assume compliance
941
    @raise errors.ConfigurationError: when the policy is not legal
942

943
    """
944
    if constants.ISPECS_MINMAX in ipolicy:
945
      if check_std and constants.ISPECS_STD not in ipolicy:
946
        msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
947
        raise errors.ConfigurationError(msg)
948
      minmaxspecs = ipolicy[constants.ISPECS_MINMAX]
949
      stdspec = ipolicy.get(constants.ISPECS_STD)
950
      for param in constants.ISPECS_PARAMETERS:
951
        InstancePolicy.CheckISpecSyntax(minmaxspecs, stdspec, param, check_std)
952
    if constants.IPOLICY_DTS in ipolicy:
953
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
954
    for key in constants.IPOLICY_PARAMETERS:
955
      if key in ipolicy:
956
        InstancePolicy.CheckParameter(key, ipolicy[key])
957
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
958
    if wrong_keys:
959
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
960
                                      utils.CommaJoin(wrong_keys))
961

    
962
  @classmethod
963
  def CheckISpecSyntax(cls, minmaxspecs, stdspec, name, check_std):
964
    """Check the instance policy specs for validity on a given key.
965

966
    We check if the instance specs makes sense for a given key, that is
967
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
968

969
    @type minmaxspecs: dict
970
    @param minmaxspecs: dictionary with min and max instance spec
971
    @type stdspec: dict
972
    @param stdspec: dictionary with standard instance spec
973
    @type name: string
974
    @param name: what are the limits for
975
    @type check_std: bool
976
    @param check_std: Whether to check std value or just assume compliance
977
    @raise errors.ConfigurationError: when specs for the given name are not
978
        valid
979

980
    """
981
    missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
982
    if missing:
983
      msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
984
      raise errors.ConfigurationError(msg)
985

    
986
    minspec = minmaxspecs[constants.ISPECS_MIN]
987
    maxspec = minmaxspecs[constants.ISPECS_MAX]
988
    min_v = minspec.get(name, 0)
989

    
990
    if check_std:
991
      std_v = stdspec.get(name, min_v)
992
      std_msg = std_v
993
    else:
994
      std_v = min_v
995
      std_msg = "-"
996

    
997
    max_v = maxspec.get(name, std_v)
998
    if min_v > std_v or std_v > max_v:
999
      err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
1000
             (name,
1001
              minspec.get(name, "-"),
1002
              maxspec.get(name, "-"),
1003
              std_msg))
1004
      raise errors.ConfigurationError(err)
1005

    
1006
  @classmethod
1007
  def CheckDiskTemplates(cls, disk_templates):
1008
    """Checks the disk templates for validity.
1009

1010
    """
1011
    if not disk_templates:
1012
      raise errors.ConfigurationError("Instance policy must contain" +
1013
                                      " at least one disk template")
1014
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1015
    if wrong:
1016
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1017
                                      utils.CommaJoin(wrong))
1018

    
1019
  @classmethod
1020
  def CheckParameter(cls, key, value):
1021
    """Checks a parameter.
1022

1023
    Currently we expect all parameters to be float values.
1024

1025
    """
1026
    try:
1027
      float(value)
1028
    except (TypeError, ValueError), err:
1029
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1030
                                      " '%s', error: %s" % (key, value, err))
1031

    
1032

    
1033
class Instance(TaggableObject):
1034
  """Config object representing an instance."""
1035
  __slots__ = [
1036
    "name",
1037
    "primary_node",
1038
    "os",
1039
    "hypervisor",
1040
    "hvparams",
1041
    "beparams",
1042
    "osparams",
1043
    "admin_state",
1044
    "nics",
1045
    "disks",
1046
    "disk_template",
1047
    "network_port",
1048
    "serial_no",
1049
    ] + _TIMESTAMPS + _UUID
1050

    
1051
  def _ComputeSecondaryNodes(self):
1052
    """Compute the list of secondary nodes.
1053

1054
    This is a simple wrapper over _ComputeAllNodes.
1055

1056
    """
1057
    all_nodes = set(self._ComputeAllNodes())
1058
    all_nodes.discard(self.primary_node)
1059
    return tuple(all_nodes)
1060

    
1061
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1062
                             "List of names of secondary nodes")
1063

    
1064
  def _ComputeAllNodes(self):
1065
    """Compute the list of all nodes.
1066

1067
    Since the data is already there (in the drbd disks), keeping it as
1068
    a separate normal attribute is redundant and if not properly
1069
    synchronised can cause problems. Thus it's better to compute it
1070
    dynamically.
1071

1072
    """
1073
    def _Helper(nodes, device):
1074
      """Recursively computes nodes given a top device."""
1075
      if device.dev_type in constants.LDS_DRBD:
1076
        nodea, nodeb = device.logical_id[:2]
1077
        nodes.add(nodea)
1078
        nodes.add(nodeb)
1079
      if device.children:
1080
        for child in device.children:
1081
          _Helper(nodes, child)
1082

    
1083
    all_nodes = set()
1084
    all_nodes.add(self.primary_node)
1085
    for device in self.disks:
1086
      _Helper(all_nodes, device)
1087
    return tuple(all_nodes)
1088

    
1089
  all_nodes = property(_ComputeAllNodes, None, None,
1090
                       "List of names of all the nodes of the instance")
1091

    
1092
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1093
    """Provide a mapping of nodes to LVs this instance owns.
1094

1095
    This function figures out what logical volumes should belong on
1096
    which nodes, recursing through a device tree.
1097

1098
    @param lvmap: optional dictionary to receive the
1099
        'node' : ['lv', ...] data.
1100

1101
    @return: None if lvmap arg is given, otherwise, a dictionary of
1102
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1103
        volumeN is of the form "vg_name/lv_name", compatible with
1104
        GetVolumeList()
1105

1106
    """
1107
    if node is None:
1108
      node = self.primary_node
1109

    
1110
    if lvmap is None:
1111
      lvmap = {
1112
        node: [],
1113
        }
1114
      ret = lvmap
1115
    else:
1116
      if not node in lvmap:
1117
        lvmap[node] = []
1118
      ret = None
1119

    
1120
    if not devs:
1121
      devs = self.disks
1122

    
1123
    for dev in devs:
1124
      if dev.dev_type == constants.LD_LV:
1125
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1126

    
1127
      elif dev.dev_type in constants.LDS_DRBD:
1128
        if dev.children:
1129
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1130
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1131

    
1132
      elif dev.children:
1133
        self.MapLVsByNode(lvmap, dev.children, node)
1134

    
1135
    return ret
1136

    
1137
  def FindDisk(self, idx):
1138
    """Find a disk given having a specified index.
1139

1140
    This is just a wrapper that does validation of the index.
1141

1142
    @type idx: int
1143
    @param idx: the disk index
1144
    @rtype: L{Disk}
1145
    @return: the corresponding disk
1146
    @raise errors.OpPrereqError: when the given index is not valid
1147

1148
    """
1149
    try:
1150
      idx = int(idx)
1151
      return self.disks[idx]
1152
    except (TypeError, ValueError), err:
1153
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1154
                                 errors.ECODE_INVAL)
1155
    except IndexError:
1156
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1157
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1158
                                 errors.ECODE_INVAL)
1159

    
1160
  def ToDict(self):
1161
    """Instance-specific conversion to standard python types.
1162

1163
    This replaces the children lists of objects with lists of standard
1164
    python types.
1165

1166
    """
1167
    bo = super(Instance, self).ToDict()
1168

    
1169
    for attr in "nics", "disks":
1170
      alist = bo.get(attr, None)
1171
      if alist:
1172
        nlist = outils.ContainerToDicts(alist)
1173
      else:
1174
        nlist = []
1175
      bo[attr] = nlist
1176
    return bo
1177

    
1178
  @classmethod
1179
  def FromDict(cls, val):
1180
    """Custom function for instances.
1181

1182
    """
1183
    if "admin_state" not in val:
1184
      if val.get("admin_up", False):
1185
        val["admin_state"] = constants.ADMINST_UP
1186
      else:
1187
        val["admin_state"] = constants.ADMINST_DOWN
1188
    if "admin_up" in val:
1189
      del val["admin_up"]
1190
    obj = super(Instance, cls).FromDict(val)
1191
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1192
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1193
    return obj
1194

    
1195
  def UpgradeConfig(self):
1196
    """Fill defaults for missing configuration values.
1197

1198
    """
1199
    for nic in self.nics:
1200
      nic.UpgradeConfig()
1201
    for disk in self.disks:
1202
      disk.UpgradeConfig()
1203
    if self.hvparams:
1204
      for key in constants.HVC_GLOBALS:
1205
        try:
1206
          del self.hvparams[key]
1207
        except KeyError:
1208
          pass
1209
    if self.osparams is None:
1210
      self.osparams = {}
1211
    UpgradeBeParams(self.beparams)
1212

    
1213

    
1214
class OS(ConfigObject):
1215
  """Config object representing an operating system.
1216

1217
  @type supported_parameters: list
1218
  @ivar supported_parameters: a list of tuples, name and description,
1219
      containing the supported parameters by this OS
1220

1221
  @type VARIANT_DELIM: string
1222
  @cvar VARIANT_DELIM: the variant delimiter
1223

1224
  """
1225
  __slots__ = [
1226
    "name",
1227
    "path",
1228
    "api_versions",
1229
    "create_script",
1230
    "export_script",
1231
    "import_script",
1232
    "rename_script",
1233
    "verify_script",
1234
    "supported_variants",
1235
    "supported_parameters",
1236
    ]
1237

    
1238
  VARIANT_DELIM = "+"
1239

    
1240
  @classmethod
1241
  def SplitNameVariant(cls, name):
1242
    """Splits the name into the proper name and variant.
1243

1244
    @param name: the OS (unprocessed) name
1245
    @rtype: list
1246
    @return: a list of two elements; if the original name didn't
1247
        contain a variant, it's returned as an empty string
1248

1249
    """
1250
    nv = name.split(cls.VARIANT_DELIM, 1)
1251
    if len(nv) == 1:
1252
      nv.append("")
1253
    return nv
1254

    
1255
  @classmethod
1256
  def GetName(cls, name):
1257
    """Returns the proper name of the os (without the variant).
1258

1259
    @param name: the OS (unprocessed) name
1260

1261
    """
1262
    return cls.SplitNameVariant(name)[0]
1263

    
1264
  @classmethod
1265
  def GetVariant(cls, name):
1266
    """Returns the variant the os (without the base name).
1267

1268
    @param name: the OS (unprocessed) name
1269

1270
    """
1271
    return cls.SplitNameVariant(name)[1]
1272

    
1273

    
1274
class ExtStorage(ConfigObject):
1275
  """Config object representing an External Storage Provider.
1276

1277
  """
1278
  __slots__ = [
1279
    "name",
1280
    "path",
1281
    "create_script",
1282
    "remove_script",
1283
    "grow_script",
1284
    "attach_script",
1285
    "detach_script",
1286
    "setinfo_script",
1287
    "verify_script",
1288
    "supported_parameters",
1289
    ]
1290

    
1291

    
1292
class NodeHvState(ConfigObject):
1293
  """Hypvervisor state on a node.
1294

1295
  @ivar mem_total: Total amount of memory
1296
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1297
    available)
1298
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1299
    rounding
1300
  @ivar mem_inst: Memory used by instances living on node
1301
  @ivar cpu_total: Total node CPU core count
1302
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1303

1304
  """
1305
  __slots__ = [
1306
    "mem_total",
1307
    "mem_node",
1308
    "mem_hv",
1309
    "mem_inst",
1310
    "cpu_total",
1311
    "cpu_node",
1312
    ] + _TIMESTAMPS
1313

    
1314

    
1315
class NodeDiskState(ConfigObject):
1316
  """Disk state on a node.
1317

1318
  """
1319
  __slots__ = [
1320
    "total",
1321
    "reserved",
1322
    "overhead",
1323
    ] + _TIMESTAMPS
1324

    
1325

    
1326
class Node(TaggableObject):
1327
  """Config object representing a node.
1328

1329
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1330
  @ivar hv_state_static: Hypervisor state overriden by user
1331
  @ivar disk_state: Disk state (e.g. free space)
1332
  @ivar disk_state_static: Disk state overriden by user
1333

1334
  """
1335
  __slots__ = [
1336
    "name",
1337
    "primary_ip",
1338
    "secondary_ip",
1339
    "serial_no",
1340
    "master_candidate",
1341
    "offline",
1342
    "drained",
1343
    "group",
1344
    "master_capable",
1345
    "vm_capable",
1346
    "ndparams",
1347
    "powered",
1348
    "hv_state",
1349
    "hv_state_static",
1350
    "disk_state",
1351
    "disk_state_static",
1352
    ] + _TIMESTAMPS + _UUID
1353

    
1354
  def UpgradeConfig(self):
1355
    """Fill defaults for missing configuration values.
1356

1357
    """
1358
    # pylint: disable=E0203
1359
    # because these are "defined" via slots, not manually
1360
    if self.master_capable is None:
1361
      self.master_capable = True
1362

    
1363
    if self.vm_capable is None:
1364
      self.vm_capable = True
1365

    
1366
    if self.ndparams is None:
1367
      self.ndparams = {}
1368
    # And remove any global parameter
1369
    for key in constants.NDC_GLOBALS:
1370
      if key in self.ndparams:
1371
        logging.warning("Ignoring %s node parameter for node %s",
1372
                        key, self.name)
1373
        del self.ndparams[key]
1374

    
1375
    if self.powered is None:
1376
      self.powered = True
1377

    
1378
  def ToDict(self):
1379
    """Custom function for serializing.
1380

1381
    """
1382
    data = super(Node, self).ToDict()
1383

    
1384
    hv_state = data.get("hv_state", None)
1385
    if hv_state is not None:
1386
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1387

    
1388
    disk_state = data.get("disk_state", None)
1389
    if disk_state is not None:
1390
      data["disk_state"] = \
1391
        dict((key, outils.ContainerToDicts(value))
1392
             for (key, value) in disk_state.items())
1393

    
1394
    return data
1395

    
1396
  @classmethod
1397
  def FromDict(cls, val):
1398
    """Custom function for deserializing.
1399

1400
    """
1401
    obj = super(Node, cls).FromDict(val)
1402

    
1403
    if obj.hv_state is not None:
1404
      obj.hv_state = \
1405
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1406

    
1407
    if obj.disk_state is not None:
1408
      obj.disk_state = \
1409
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1410
             for (key, value) in obj.disk_state.items())
1411

    
1412
    return obj
1413

    
1414

    
1415
class NodeGroup(TaggableObject):
1416
  """Config object representing a node group."""
1417
  __slots__ = [
1418
    "name",
1419
    "members",
1420
    "ndparams",
1421
    "diskparams",
1422
    "ipolicy",
1423
    "serial_no",
1424
    "hv_state_static",
1425
    "disk_state_static",
1426
    "alloc_policy",
1427
    "networks",
1428
    ] + _TIMESTAMPS + _UUID
1429

    
1430
  def ToDict(self):
1431
    """Custom function for nodegroup.
1432

1433
    This discards the members object, which gets recalculated and is only kept
1434
    in memory.
1435

1436
    """
1437
    mydict = super(NodeGroup, self).ToDict()
1438
    del mydict["members"]
1439
    return mydict
1440

    
1441
  @classmethod
1442
  def FromDict(cls, val):
1443
    """Custom function for nodegroup.
1444

1445
    The members slot is initialized to an empty list, upon deserialization.
1446

1447
    """
1448
    obj = super(NodeGroup, cls).FromDict(val)
1449
    obj.members = []
1450
    return obj
1451

    
1452
  def UpgradeConfig(self):
1453
    """Fill defaults for missing configuration values.
1454

1455
    """
1456
    if self.ndparams is None:
1457
      self.ndparams = {}
1458

    
1459
    if self.serial_no is None:
1460
      self.serial_no = 1
1461

    
1462
    if self.alloc_policy is None:
1463
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1464

    
1465
    # We only update mtime, and not ctime, since we would not be able
1466
    # to provide a correct value for creation time.
1467
    if self.mtime is None:
1468
      self.mtime = time.time()
1469

    
1470
    if self.diskparams is None:
1471
      self.diskparams = {}
1472
    if self.ipolicy is None:
1473
      self.ipolicy = MakeEmptyIPolicy()
1474

    
1475
    if self.networks is None:
1476
      self.networks = {}
1477

    
1478
  def FillND(self, node):
1479
    """Return filled out ndparams for L{objects.Node}
1480

1481
    @type node: L{objects.Node}
1482
    @param node: A Node object to fill
1483
    @return a copy of the node's ndparams with defaults filled
1484

1485
    """
1486
    return self.SimpleFillND(node.ndparams)
1487

    
1488
  def SimpleFillND(self, ndparams):
1489
    """Fill a given ndparams dict with defaults.
1490

1491
    @type ndparams: dict
1492
    @param ndparams: the dict to fill
1493
    @rtype: dict
1494
    @return: a copy of the passed in ndparams with missing keys filled
1495
        from the node group defaults
1496

1497
    """
1498
    return FillDict(self.ndparams, ndparams)
1499

    
1500

    
1501
class Cluster(TaggableObject):
1502
  """Config object representing the cluster."""
1503
  __slots__ = [
1504
    "serial_no",
1505
    "rsahostkeypub",
1506
    "highest_used_port",
1507
    "tcpudp_port_pool",
1508
    "mac_prefix",
1509
    "volume_group_name",
1510
    "reserved_lvs",
1511
    "drbd_usermode_helper",
1512
    "default_bridge",
1513
    "default_hypervisor",
1514
    "master_node",
1515
    "master_ip",
1516
    "master_netdev",
1517
    "master_netmask",
1518
    "use_external_mip_script",
1519
    "cluster_name",
1520
    "file_storage_dir",
1521
    "shared_file_storage_dir",
1522
    "enabled_hypervisors",
1523
    "hvparams",
1524
    "ipolicy",
1525
    "os_hvp",
1526
    "beparams",
1527
    "osparams",
1528
    "nicparams",
1529
    "ndparams",
1530
    "diskparams",
1531
    "candidate_pool_size",
1532
    "modify_etc_hosts",
1533
    "modify_ssh_setup",
1534
    "maintain_node_health",
1535
    "uid_pool",
1536
    "default_iallocator",
1537
    "hidden_os",
1538
    "blacklisted_os",
1539
    "primary_ip_family",
1540
    "prealloc_wipe_disks",
1541
    "hv_state_static",
1542
    "disk_state_static",
1543
    "enabled_storage_types",
1544
    ] + _TIMESTAMPS + _UUID
1545

    
1546
  def UpgradeConfig(self):
1547
    """Fill defaults for missing configuration values.
1548

1549
    """
1550
    # pylint: disable=E0203
1551
    # because these are "defined" via slots, not manually
1552
    if self.hvparams is None:
1553
      self.hvparams = constants.HVC_DEFAULTS
1554
    else:
1555
      for hypervisor in self.hvparams:
1556
        self.hvparams[hypervisor] = FillDict(
1557
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1558

    
1559
    if self.os_hvp is None:
1560
      self.os_hvp = {}
1561

    
1562
    # osparams added before 2.2
1563
    if self.osparams is None:
1564
      self.osparams = {}
1565

    
1566
    self.ndparams = UpgradeNDParams(self.ndparams)
1567

    
1568
    self.beparams = UpgradeGroupedParams(self.beparams,
1569
                                         constants.BEC_DEFAULTS)
1570
    for beparams_group in self.beparams:
1571
      UpgradeBeParams(self.beparams[beparams_group])
1572

    
1573
    migrate_default_bridge = not self.nicparams
1574
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1575
                                          constants.NICC_DEFAULTS)
1576
    if migrate_default_bridge:
1577
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1578
        self.default_bridge
1579

    
1580
    if self.modify_etc_hosts is None:
1581
      self.modify_etc_hosts = True
1582

    
1583
    if self.modify_ssh_setup is None:
1584
      self.modify_ssh_setup = True
1585

    
1586
    # default_bridge is no longer used in 2.1. The slot is left there to
1587
    # support auto-upgrading. It can be removed once we decide to deprecate
1588
    # upgrading straight from 2.0.
1589
    if self.default_bridge is not None:
1590
      self.default_bridge = None
1591

    
1592
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1593
    # code can be removed once upgrading straight from 2.0 is deprecated.
1594
    if self.default_hypervisor is not None:
1595
      self.enabled_hypervisors = ([self.default_hypervisor] +
1596
                                  [hvname for hvname in self.enabled_hypervisors
1597
                                   if hvname != self.default_hypervisor])
1598
      self.default_hypervisor = None
1599

    
1600
    # maintain_node_health added after 2.1.1
1601
    if self.maintain_node_health is None:
1602
      self.maintain_node_health = False
1603

    
1604
    if self.uid_pool is None:
1605
      self.uid_pool = []
1606

    
1607
    if self.default_iallocator is None:
1608
      self.default_iallocator = ""
1609

    
1610
    # reserved_lvs added before 2.2
1611
    if self.reserved_lvs is None:
1612
      self.reserved_lvs = []
1613

    
1614
    # hidden and blacklisted operating systems added before 2.2.1
1615
    if self.hidden_os is None:
1616
      self.hidden_os = []
1617

    
1618
    if self.blacklisted_os is None:
1619
      self.blacklisted_os = []
1620

    
1621
    # primary_ip_family added before 2.3
1622
    if self.primary_ip_family is None:
1623
      self.primary_ip_family = AF_INET
1624

    
1625
    if self.master_netmask is None:
1626
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1627
      self.master_netmask = ipcls.iplen
1628

    
1629
    if self.prealloc_wipe_disks is None:
1630
      self.prealloc_wipe_disks = False
1631

    
1632
    # shared_file_storage_dir added before 2.5
1633
    if self.shared_file_storage_dir is None:
1634
      self.shared_file_storage_dir = ""
1635

    
1636
    if self.use_external_mip_script is None:
1637
      self.use_external_mip_script = False
1638

    
1639
    if self.diskparams:
1640
      self.diskparams = UpgradeDiskParams(self.diskparams)
1641
    else:
1642
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1643

    
1644
    # instance policy added before 2.6
1645
    if self.ipolicy is None:
1646
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1647
    else:
1648
      # we can either make sure to upgrade the ipolicy always, or only
1649
      # do it in some corner cases (e.g. missing keys); note that this
1650
      # will break any removal of keys from the ipolicy dict
1651
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1652
      if wrongkeys:
1653
        # These keys would be silently removed by FillIPolicy()
1654
        msg = ("Cluster instance policy contains spourious keys: %s" %
1655
               utils.CommaJoin(wrongkeys))
1656
        raise errors.ConfigurationError(msg)
1657
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1658

    
1659
  @property
1660
  def primary_hypervisor(self):
1661
    """The first hypervisor is the primary.
1662

1663
    Useful, for example, for L{Node}'s hv/disk state.
1664

1665
    """
1666
    return self.enabled_hypervisors[0]
1667

    
1668
  def ToDict(self):
1669
    """Custom function for cluster.
1670

1671
    """
1672
    mydict = super(Cluster, self).ToDict()
1673

    
1674
    if self.tcpudp_port_pool is None:
1675
      tcpudp_port_pool = []
1676
    else:
1677
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1678

    
1679
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1680

    
1681
    return mydict
1682

    
1683
  @classmethod
1684
  def FromDict(cls, val):
1685
    """Custom function for cluster.
1686

1687
    """
1688
    obj = super(Cluster, cls).FromDict(val)
1689

    
1690
    if obj.tcpudp_port_pool is None:
1691
      obj.tcpudp_port_pool = set()
1692
    elif not isinstance(obj.tcpudp_port_pool, set):
1693
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1694

    
1695
    return obj
1696

    
1697
  def SimpleFillDP(self, diskparams):
1698
    """Fill a given diskparams dict with cluster defaults.
1699

1700
    @param diskparams: The diskparams
1701
    @return: The defaults dict
1702

1703
    """
1704
    return FillDiskParams(self.diskparams, diskparams)
1705

    
1706
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1707
    """Get the default hypervisor parameters for the cluster.
1708

1709
    @param hypervisor: the hypervisor name
1710
    @param os_name: if specified, we'll also update the defaults for this OS
1711
    @param skip_keys: if passed, list of keys not to use
1712
    @return: the defaults dict
1713

1714
    """
1715
    if skip_keys is None:
1716
      skip_keys = []
1717

    
1718
    fill_stack = [self.hvparams.get(hypervisor, {})]
1719
    if os_name is not None:
1720
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1721
      fill_stack.append(os_hvp)
1722

    
1723
    ret_dict = {}
1724
    for o_dict in fill_stack:
1725
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1726

    
1727
    return ret_dict
1728

    
1729
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1730
    """Fill a given hvparams dict with cluster defaults.
1731

1732
    @type hv_name: string
1733
    @param hv_name: the hypervisor to use
1734
    @type os_name: string
1735
    @param os_name: the OS to use for overriding the hypervisor defaults
1736
    @type skip_globals: boolean
1737
    @param skip_globals: if True, the global hypervisor parameters will
1738
        not be filled
1739
    @rtype: dict
1740
    @return: a copy of the given hvparams with missing keys filled from
1741
        the cluster defaults
1742

1743
    """
1744
    if skip_globals:
1745
      skip_keys = constants.HVC_GLOBALS
1746
    else:
1747
      skip_keys = []
1748

    
1749
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1750
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1751

    
1752
  def FillHV(self, instance, skip_globals=False):
1753
    """Fill an instance's hvparams dict with cluster defaults.
1754

1755
    @type instance: L{objects.Instance}
1756
    @param instance: the instance parameter to fill
1757
    @type skip_globals: boolean
1758
    @param skip_globals: if True, the global hypervisor parameters will
1759
        not be filled
1760
    @rtype: dict
1761
    @return: a copy of the instance's hvparams with missing keys filled from
1762
        the cluster defaults
1763

1764
    """
1765
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1766
                             instance.hvparams, skip_globals)
1767

    
1768
  def SimpleFillBE(self, beparams):
1769
    """Fill a given beparams dict with cluster defaults.
1770

1771
    @type beparams: dict
1772
    @param beparams: the dict to fill
1773
    @rtype: dict
1774
    @return: a copy of the passed in beparams with missing keys filled
1775
        from the cluster defaults
1776

1777
    """
1778
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1779

    
1780
  def FillBE(self, instance):
1781
    """Fill an instance's beparams dict with cluster defaults.
1782

1783
    @type instance: L{objects.Instance}
1784
    @param instance: the instance parameter to fill
1785
    @rtype: dict
1786
    @return: a copy of the instance's beparams with missing keys filled from
1787
        the cluster defaults
1788

1789
    """
1790
    return self.SimpleFillBE(instance.beparams)
1791

    
1792
  def SimpleFillNIC(self, nicparams):
1793
    """Fill a given nicparams dict with cluster defaults.
1794

1795
    @type nicparams: dict
1796
    @param nicparams: the dict to fill
1797
    @rtype: dict
1798
    @return: a copy of the passed in nicparams with missing keys filled
1799
        from the cluster defaults
1800

1801
    """
1802
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1803

    
1804
  def SimpleFillOS(self, os_name, os_params):
1805
    """Fill an instance's osparams dict with cluster defaults.
1806

1807
    @type os_name: string
1808
    @param os_name: the OS name to use
1809
    @type os_params: dict
1810
    @param os_params: the dict to fill with default values
1811
    @rtype: dict
1812
    @return: a copy of the instance's osparams with missing keys filled from
1813
        the cluster defaults
1814

1815
    """
1816
    name_only = os_name.split("+", 1)[0]
1817
    # base OS
1818
    result = self.osparams.get(name_only, {})
1819
    # OS with variant
1820
    result = FillDict(result, self.osparams.get(os_name, {}))
1821
    # specified params
1822
    return FillDict(result, os_params)
1823

    
1824
  @staticmethod
1825
  def SimpleFillHvState(hv_state):
1826
    """Fill an hv_state sub dict with cluster defaults.
1827

1828
    """
1829
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1830

    
1831
  @staticmethod
1832
  def SimpleFillDiskState(disk_state):
1833
    """Fill an disk_state sub dict with cluster defaults.
1834

1835
    """
1836
    return FillDict(constants.DS_DEFAULTS, disk_state)
1837

    
1838
  def FillND(self, node, nodegroup):
1839
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1840

1841
    @type node: L{objects.Node}
1842
    @param node: A Node object to fill
1843
    @type nodegroup: L{objects.NodeGroup}
1844
    @param nodegroup: A Node object to fill
1845
    @return a copy of the node's ndparams with defaults filled
1846

1847
    """
1848
    return self.SimpleFillND(nodegroup.FillND(node))
1849

    
1850
  def SimpleFillND(self, ndparams):
1851
    """Fill a given ndparams dict with defaults.
1852

1853
    @type ndparams: dict
1854
    @param ndparams: the dict to fill
1855
    @rtype: dict
1856
    @return: a copy of the passed in ndparams with missing keys filled
1857
        from the cluster defaults
1858

1859
    """
1860
    return FillDict(self.ndparams, ndparams)
1861

    
1862
  def SimpleFillIPolicy(self, ipolicy):
1863
    """ Fill instance policy dict with defaults.
1864

1865
    @type ipolicy: dict
1866
    @param ipolicy: the dict to fill
1867
    @rtype: dict
1868
    @return: a copy of passed ipolicy with missing keys filled from
1869
      the cluster defaults
1870

1871
    """
1872
    return FillIPolicy(self.ipolicy, ipolicy)
1873

    
1874

    
1875
class BlockDevStatus(ConfigObject):
1876
  """Config object representing the status of a block device."""
1877
  __slots__ = [
1878
    "dev_path",
1879
    "major",
1880
    "minor",
1881
    "sync_percent",
1882
    "estimated_time",
1883
    "is_degraded",
1884
    "ldisk_status",
1885
    ]
1886

    
1887

    
1888
class ImportExportStatus(ConfigObject):
1889
  """Config object representing the status of an import or export."""
1890
  __slots__ = [
1891
    "recent_output",
1892
    "listen_port",
1893
    "connected",
1894
    "progress_mbytes",
1895
    "progress_throughput",
1896
    "progress_eta",
1897
    "progress_percent",
1898
    "exit_status",
1899
    "error_message",
1900
    ] + _TIMESTAMPS
1901

    
1902

    
1903
class ImportExportOptions(ConfigObject):
1904
  """Options for import/export daemon
1905

1906
  @ivar key_name: X509 key name (None for cluster certificate)
1907
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1908
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1909
  @ivar magic: Used to ensure the connection goes to the right disk
1910
  @ivar ipv6: Whether to use IPv6
1911
  @ivar connect_timeout: Number of seconds for establishing connection
1912

1913
  """
1914
  __slots__ = [
1915
    "key_name",
1916
    "ca_pem",
1917
    "compress",
1918
    "magic",
1919
    "ipv6",
1920
    "connect_timeout",
1921
    ]
1922

    
1923

    
1924
class ConfdRequest(ConfigObject):
1925
  """Object holding a confd request.
1926

1927
  @ivar protocol: confd protocol version
1928
  @ivar type: confd query type
1929
  @ivar query: query request
1930
  @ivar rsalt: requested reply salt
1931

1932
  """
1933
  __slots__ = [
1934
    "protocol",
1935
    "type",
1936
    "query",
1937
    "rsalt",
1938
    ]
1939

    
1940

    
1941
class ConfdReply(ConfigObject):
1942
  """Object holding a confd reply.
1943

1944
  @ivar protocol: confd protocol version
1945
  @ivar status: reply status code (ok, error)
1946
  @ivar answer: confd query reply
1947
  @ivar serial: configuration serial number
1948

1949
  """
1950
  __slots__ = [
1951
    "protocol",
1952
    "status",
1953
    "answer",
1954
    "serial",
1955
    ]
1956

    
1957

    
1958
class QueryFieldDefinition(ConfigObject):
1959
  """Object holding a query field definition.
1960

1961
  @ivar name: Field name
1962
  @ivar title: Human-readable title
1963
  @ivar kind: Field type
1964
  @ivar doc: Human-readable description
1965

1966
  """
1967
  __slots__ = [
1968
    "name",
1969
    "title",
1970
    "kind",
1971
    "doc",
1972
    ]
1973

    
1974

    
1975
class _QueryResponseBase(ConfigObject):
1976
  __slots__ = [
1977
    "fields",
1978
    ]
1979

    
1980
  def ToDict(self):
1981
    """Custom function for serializing.
1982

1983
    """
1984
    mydict = super(_QueryResponseBase, self).ToDict()
1985
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
1986
    return mydict
1987

    
1988
  @classmethod
1989
  def FromDict(cls, val):
1990
    """Custom function for de-serializing.
1991

1992
    """
1993
    obj = super(_QueryResponseBase, cls).FromDict(val)
1994
    obj.fields = \
1995
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1996
    return obj
1997

    
1998

    
1999
class QueryResponse(_QueryResponseBase):
2000
  """Object holding the response to a query.
2001

2002
  @ivar fields: List of L{QueryFieldDefinition} objects
2003
  @ivar data: Requested data
2004

2005
  """
2006
  __slots__ = [
2007
    "data",
2008
    ]
2009

    
2010

    
2011
class QueryFieldsRequest(ConfigObject):
2012
  """Object holding a request for querying available fields.
2013

2014
  """
2015
  __slots__ = [
2016
    "what",
2017
    "fields",
2018
    ]
2019

    
2020

    
2021
class QueryFieldsResponse(_QueryResponseBase):
2022
  """Object holding the response to a query for fields.
2023

2024
  @ivar fields: List of L{QueryFieldDefinition} objects
2025

2026
  """
2027
  __slots__ = []
2028

    
2029

    
2030
class MigrationStatus(ConfigObject):
2031
  """Object holding the status of a migration.
2032

2033
  """
2034
  __slots__ = [
2035
    "status",
2036
    "transferred_ram",
2037
    "total_ram",
2038
    ]
2039

    
2040

    
2041
class InstanceConsole(ConfigObject):
2042
  """Object describing how to access the console of an instance.
2043

2044
  """
2045
  __slots__ = [
2046
    "instance",
2047
    "kind",
2048
    "message",
2049
    "host",
2050
    "port",
2051
    "user",
2052
    "command",
2053
    "display",
2054
    ]
2055

    
2056
  def Validate(self):
2057
    """Validates contents of this object.
2058

2059
    """
2060
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2061
    assert self.instance, "Missing instance name"
2062
    assert self.message or self.kind in [constants.CONS_SSH,
2063
                                         constants.CONS_SPICE,
2064
                                         constants.CONS_VNC]
2065
    assert self.host or self.kind == constants.CONS_MESSAGE
2066
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2067
                                      constants.CONS_SSH]
2068
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2069
                                      constants.CONS_SPICE,
2070
                                      constants.CONS_VNC]
2071
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2072
                                         constants.CONS_SPICE,
2073
                                         constants.CONS_VNC]
2074
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2075
                                         constants.CONS_SPICE,
2076
                                         constants.CONS_SSH]
2077
    return True
2078

    
2079

    
2080
class Network(TaggableObject):
2081
  """Object representing a network definition for ganeti.
2082

2083
  """
2084
  __slots__ = [
2085
    "name",
2086
    "serial_no",
2087
    "mac_prefix",
2088
    "network",
2089
    "network6",
2090
    "gateway",
2091
    "gateway6",
2092
    "reservations",
2093
    "ext_reservations",
2094
    ] + _TIMESTAMPS + _UUID
2095

    
2096
  def HooksDict(self, prefix=""):
2097
    """Export a dictionary used by hooks with a network's information.
2098

2099
    @type prefix: String
2100
    @param prefix: Prefix to prepend to the dict entries
2101

2102
    """
2103
    result = {
2104
      "%sNETWORK_NAME" % prefix: self.name,
2105
      "%sNETWORK_UUID" % prefix: self.uuid,
2106
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2107
    }
2108
    if self.network:
2109
      result["%sNETWORK_SUBNET" % prefix] = self.network
2110
    if self.gateway:
2111
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2112
    if self.network6:
2113
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2114
    if self.gateway6:
2115
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2116
    if self.mac_prefix:
2117
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2118

    
2119
    return result
2120

    
2121
  @classmethod
2122
  def FromDict(cls, val):
2123
    """Custom function for networks.
2124

2125
    Remove deprecated network_type and family.
2126

2127
    """
2128
    if "network_type" in val:
2129
      del val["network_type"]
2130
    if "family" in val:
2131
      del val["family"]
2132
    obj = super(Network, cls).FromDict(val)
2133
    return obj
2134

    
2135

    
2136
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2137
  """Simple wrapper over ConfigParse that allows serialization.
2138

2139
  This class is basically ConfigParser.SafeConfigParser with two
2140
  additional methods that allow it to serialize/unserialize to/from a
2141
  buffer.
2142

2143
  """
2144
  def Dumps(self):
2145
    """Dump this instance and return the string representation."""
2146
    buf = StringIO()
2147
    self.write(buf)
2148
    return buf.getvalue()
2149

    
2150
  @classmethod
2151
  def Loads(cls, data):
2152
    """Load data from a string."""
2153
    buf = StringIO(data)
2154
    cfp = cls()
2155
    cfp.readfp(buf)
2156
    return cfp
2157

    
2158

    
2159
class LvmPvInfo(ConfigObject):
2160
  """Information about an LVM physical volume (PV).
2161

2162
  @type name: string
2163
  @ivar name: name of the PV
2164
  @type vg_name: string
2165
  @ivar vg_name: name of the volume group containing the PV
2166
  @type size: float
2167
  @ivar size: size of the PV in MiB
2168
  @type free: float
2169
  @ivar free: free space in the PV, in MiB
2170
  @type attributes: string
2171
  @ivar attributes: PV attributes
2172
  @type lv_list: list of strings
2173
  @ivar lv_list: names of the LVs hosted on the PV
2174
  """
2175
  __slots__ = [
2176
    "name",
2177
    "vg_name",
2178
    "size",
2179
    "free",
2180
    "attributes",
2181
    "lv_list"
2182
    ]
2183

    
2184
  def IsEmpty(self):
2185
    """Is this PV empty?
2186

2187
    """
2188
    return self.size <= (self.free + 1)
2189

    
2190
  def IsAllocatable(self):
2191
    """Is this PV allocatable?
2192

2193
    """
2194
    return ("a" in self.attributes)