Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 966e1580

History | View | Annotate | Download (63 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitly initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import logging
42
import time
43
from cStringIO import StringIO
44

    
45
from ganeti import errors
46
from ganeti import constants
47
from ganeti import netutils
48
from ganeti import outils
49
from ganeti import utils
50

    
51
from socket import AF_INET
52

    
53

    
54
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56

    
57
_TIMESTAMPS = ["ctime", "mtime"]
58
_UUID = ["uuid"]
59

    
60

    
61
def FillDict(defaults_dict, custom_dict, skip_keys=None):
62
  """Basic function to apply settings on top a default dict.
63

64
  @type defaults_dict: dict
65
  @param defaults_dict: dictionary holding the default values
66
  @type custom_dict: dict
67
  @param custom_dict: dictionary holding customized value
68
  @type skip_keys: list
69
  @param skip_keys: which keys not to fill
70
  @rtype: dict
71
  @return: dict with the 'full' values
72

73
  """
74
  ret_dict = copy.deepcopy(defaults_dict)
75
  ret_dict.update(custom_dict)
76
  if skip_keys:
77
    for k in skip_keys:
78
      try:
79
        del ret_dict[k]
80
      except KeyError:
81
        pass
82
  return ret_dict
83

    
84

    
85
def _FillMinMaxISpecs(default_specs, custom_specs):
86
  assert frozenset(default_specs.keys()) == constants.ISPECS_MINMAX_KEYS
87
  ret_specs = {}
88
  for key in constants.ISPECS_MINMAX_KEYS:
89
    ret_specs[key] = FillDict(default_specs[key],
90
                              custom_specs.get(key, {}))
91
  return ret_specs
92

    
93

    
94
def FillIPolicy(default_ipolicy, custom_ipolicy):
95
  """Fills an instance policy with defaults.
96

97
  """
98
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
99
  ret_dict = {}
100
  # Instance specs
101
  new_mm = _FillMinMaxISpecs(default_ipolicy[constants.ISPECS_MINMAX],
102
                             custom_ipolicy.get(constants.ISPECS_MINMAX, {}))
103
  ret_dict[constants.ISPECS_MINMAX] = new_mm
104
  new_std = FillDict(default_ipolicy[constants.ISPECS_STD],
105
                     custom_ipolicy.get(constants.ISPECS_STD, {}))
106
  ret_dict[constants.ISPECS_STD] = new_std
107
  # list items
108
  for key in [constants.IPOLICY_DTS]:
109
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
110
  # other items which we know we can directly copy (immutables)
111
  for key in constants.IPOLICY_PARAMETERS:
112
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
113

    
114
  return ret_dict
115

    
116

    
117
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
118
  """Fills the disk parameter defaults.
119

120
  @see: L{FillDict} for parameters and return value
121

122
  """
123
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
124

    
125
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
126
                             skip_keys=skip_keys))
127
              for dt in constants.DISK_TEMPLATES)
128

    
129

    
130
def UpgradeGroupedParams(target, defaults):
131
  """Update all groups for the target parameter.
132

133
  @type target: dict of dicts
134
  @param target: {group: {parameter: value}}
135
  @type defaults: dict
136
  @param defaults: default parameter values
137

138
  """
139
  if target is None:
140
    target = {constants.PP_DEFAULT: defaults}
141
  else:
142
    for group in target:
143
      target[group] = FillDict(defaults, target[group])
144
  return target
145

    
146

    
147
def UpgradeBeParams(target):
148
  """Update the be parameters dict to the new format.
149

150
  @type target: dict
151
  @param target: "be" parameters dict
152

153
  """
154
  if constants.BE_MEMORY in target:
155
    memory = target[constants.BE_MEMORY]
156
    target[constants.BE_MAXMEM] = memory
157
    target[constants.BE_MINMEM] = memory
158
    del target[constants.BE_MEMORY]
159

    
160

    
161
def UpgradeDiskParams(diskparams):
162
  """Upgrade the disk parameters.
163

164
  @type diskparams: dict
165
  @param diskparams: disk parameters to upgrade
166
  @rtype: dict
167
  @return: the upgraded disk parameters dict
168

169
  """
170
  if not diskparams:
171
    result = {}
172
  else:
173
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
174

    
175
  return result
176

    
177

    
178
def UpgradeNDParams(ndparams):
179
  """Upgrade ndparams structure.
180

181
  @type ndparams: dict
182
  @param ndparams: disk parameters to upgrade
183
  @rtype: dict
184
  @return: the upgraded node parameters dict
185

186
  """
187
  if ndparams is None:
188
    ndparams = {}
189

    
190
  if (constants.ND_OOB_PROGRAM in ndparams and
191
      ndparams[constants.ND_OOB_PROGRAM] is None):
192
    # will be reset by the line below
193
    del ndparams[constants.ND_OOB_PROGRAM]
194
  return FillDict(constants.NDC_DEFAULTS, ndparams)
195

    
196

    
197
def MakeEmptyIPolicy():
198
  """Create empty IPolicy dictionary.
199

200
  """
201
  return {
202
    constants.ISPECS_MINMAX: {
203
      constants.ISPECS_MIN: {},
204
      constants.ISPECS_MAX: {},
205
      },
206
    constants.ISPECS_STD: {},
207
    }
208

    
209

    
210
class ConfigObject(outils.ValidatedSlots):
211
  """A generic config object.
212

213
  It has the following properties:
214

215
    - provides somewhat safe recursive unpickling and pickling for its classes
216
    - unset attributes which are defined in slots are always returned
217
      as None instead of raising an error
218

219
  Classes derived from this must always declare __slots__ (we use many
220
  config objects and the memory reduction is useful)
221

222
  """
223
  __slots__ = []
224

    
225
  def __getattr__(self, name):
226
    if name not in self.GetAllSlots():
227
      raise AttributeError("Invalid object attribute %s.%s" %
228
                           (type(self).__name__, name))
229
    return None
230

    
231
  def __setstate__(self, state):
232
    slots = self.GetAllSlots()
233
    for name in state:
234
      if name in slots:
235
        setattr(self, name, state[name])
236

    
237
  def Validate(self):
238
    """Validates the slots.
239

240
    """
241

    
242
  def ToDict(self):
243
    """Convert to a dict holding only standard python types.
244

245
    The generic routine just dumps all of this object's attributes in
246
    a dict. It does not work if the class has children who are
247
    ConfigObjects themselves (e.g. the nics list in an Instance), in
248
    which case the object should subclass the function in order to
249
    make sure all objects returned are only standard python types.
250

251
    """
252
    result = {}
253
    for name in self.GetAllSlots():
254
      value = getattr(self, name, None)
255
      if value is not None:
256
        result[name] = value
257
    return result
258

    
259
  __getstate__ = ToDict
260

    
261
  @classmethod
262
  def FromDict(cls, val):
263
    """Create an object from a dictionary.
264

265
    This generic routine takes a dict, instantiates a new instance of
266
    the given class, and sets attributes based on the dict content.
267

268
    As for `ToDict`, this does not work if the class has children
269
    who are ConfigObjects themselves (e.g. the nics list in an
270
    Instance), in which case the object should subclass the function
271
    and alter the objects.
272

273
    """
274
    if not isinstance(val, dict):
275
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
276
                                      " expected dict, got %s" % type(val))
277
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
278
    obj = cls(**val_str) # pylint: disable=W0142
279
    return obj
280

    
281
  def Copy(self):
282
    """Makes a deep copy of the current object and its children.
283

284
    """
285
    dict_form = self.ToDict()
286
    clone_obj = self.__class__.FromDict(dict_form)
287
    return clone_obj
288

    
289
  def __repr__(self):
290
    """Implement __repr__ for ConfigObjects."""
291
    return repr(self.ToDict())
292

    
293
  def UpgradeConfig(self):
294
    """Fill defaults for missing configuration values.
295

296
    This method will be called at configuration load time, and its
297
    implementation will be object dependent.
298

299
    """
300
    pass
301

    
302

    
303
class TaggableObject(ConfigObject):
304
  """An generic class supporting tags.
305

306
  """
307
  __slots__ = ["tags"]
308
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
309

    
310
  @classmethod
311
  def ValidateTag(cls, tag):
312
    """Check if a tag is valid.
313

314
    If the tag is invalid, an errors.TagError will be raised. The
315
    function has no return value.
316

317
    """
318
    if not isinstance(tag, basestring):
319
      raise errors.TagError("Invalid tag type (not a string)")
320
    if len(tag) > constants.MAX_TAG_LEN:
321
      raise errors.TagError("Tag too long (>%d characters)" %
322
                            constants.MAX_TAG_LEN)
323
    if not tag:
324
      raise errors.TagError("Tags cannot be empty")
325
    if not cls.VALID_TAG_RE.match(tag):
326
      raise errors.TagError("Tag contains invalid characters")
327

    
328
  def GetTags(self):
329
    """Return the tags list.
330

331
    """
332
    tags = getattr(self, "tags", None)
333
    if tags is None:
334
      tags = self.tags = set()
335
    return tags
336

    
337
  def AddTag(self, tag):
338
    """Add a new tag.
339

340
    """
341
    self.ValidateTag(tag)
342
    tags = self.GetTags()
343
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
344
      raise errors.TagError("Too many tags")
345
    self.GetTags().add(tag)
346

    
347
  def RemoveTag(self, tag):
348
    """Remove a tag.
349

350
    """
351
    self.ValidateTag(tag)
352
    tags = self.GetTags()
353
    try:
354
      tags.remove(tag)
355
    except KeyError:
356
      raise errors.TagError("Tag not found")
357

    
358
  def ToDict(self):
359
    """Taggable-object-specific conversion to standard python types.
360

361
    This replaces the tags set with a list.
362

363
    """
364
    bo = super(TaggableObject, self).ToDict()
365

    
366
    tags = bo.get("tags", None)
367
    if isinstance(tags, set):
368
      bo["tags"] = list(tags)
369
    return bo
370

    
371
  @classmethod
372
  def FromDict(cls, val):
373
    """Custom function for instances.
374

375
    """
376
    obj = super(TaggableObject, cls).FromDict(val)
377
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
378
      obj.tags = set(obj.tags)
379
    return obj
380

    
381

    
382
class MasterNetworkParameters(ConfigObject):
383
  """Network configuration parameters for the master
384

385
  @ivar name: master name
386
  @ivar ip: master IP
387
  @ivar netmask: master netmask
388
  @ivar netdev: master network device
389
  @ivar ip_family: master IP family
390

391
  """
392
  __slots__ = [
393
    "name",
394
    "ip",
395
    "netmask",
396
    "netdev",
397
    "ip_family",
398
    ]
399

    
400

    
401
class ConfigData(ConfigObject):
402
  """Top-level config object."""
403
  __slots__ = [
404
    "version",
405
    "cluster",
406
    "nodes",
407
    "nodegroups",
408
    "instances",
409
    "networks",
410
    "serial_no",
411
    ] + _TIMESTAMPS
412

    
413
  def ToDict(self):
414
    """Custom function for top-level config data.
415

416
    This just replaces the list of instances, nodes and the cluster
417
    with standard python types.
418

419
    """
420
    mydict = super(ConfigData, self).ToDict()
421
    mydict["cluster"] = mydict["cluster"].ToDict()
422
    for key in "nodes", "instances", "nodegroups", "networks":
423
      mydict[key] = outils.ContainerToDicts(mydict[key])
424

    
425
    return mydict
426

    
427
  @classmethod
428
  def FromDict(cls, val):
429
    """Custom function for top-level config data
430

431
    """
432
    obj = super(ConfigData, cls).FromDict(val)
433
    obj.cluster = Cluster.FromDict(obj.cluster)
434
    obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
435
    obj.instances = \
436
      outils.ContainerFromDicts(obj.instances, dict, Instance)
437
    obj.nodegroups = \
438
      outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
439
    obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
440
    return obj
441

    
442
  def HasAnyDiskOfType(self, dev_type):
443
    """Check if in there is at disk of the given type in the configuration.
444

445
    @type dev_type: L{constants.LDS_BLOCK}
446
    @param dev_type: the type to look for
447
    @rtype: boolean
448
    @return: boolean indicating if a disk of the given type was found or not
449

450
    """
451
    for instance in self.instances.values():
452
      for disk in instance.disks:
453
        if disk.IsBasedOnDiskType(dev_type):
454
          return True
455
    return False
456

    
457
  def UpgradeConfig(self):
458
    """Fill defaults for missing configuration values.
459

460
    """
461
    self.cluster.UpgradeConfig()
462
    for node in self.nodes.values():
463
      node.UpgradeConfig()
464
    for instance in self.instances.values():
465
      instance.UpgradeConfig()
466
    if self.nodegroups is None:
467
      self.nodegroups = {}
468
    for nodegroup in self.nodegroups.values():
469
      nodegroup.UpgradeConfig()
470
    if self.cluster.drbd_usermode_helper is None:
471
      # To decide if we set an helper let's check if at least one instance has
472
      # a DRBD disk. This does not cover all the possible scenarios but it
473
      # gives a good approximation.
474
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
475
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
476
    if self.networks is None:
477
      self.networks = {}
478
    for network in self.networks.values():
479
      network.UpgradeConfig()
480
    self._UpgradeEnabledDiskTemplates()
481

    
482
  def _UpgradeEnabledDiskTemplates(self):
483
    """Upgrade the cluster's enabled disk templates by inspecting the currently
484
       enabled and/or used disk templates.
485

486
    """
487
    # enabled_disk_templates in the cluster config were introduced in 2.8.
488
    # Remove this code once upgrading from earlier versions is deprecated.
489
    if not self.cluster.enabled_disk_templates:
490
      template_set = \
491
        set([inst.disk_template for inst in self.instances.values()])
492
      # Add drbd and plain, if lvm is enabled (by specifying a volume group)
493
      if self.cluster.volume_group_name:
494
        template_set.add(constants.DT_DRBD8)
495
        template_set.add(constants.DT_PLAIN)
496
      # FIXME: Adapt this when dis/enabling at configure time is removed.
497
      # Enable 'file' and 'sharedfile', if they are enabled, even though they
498
      # might currently not be used.
499
      if constants.ENABLE_FILE_STORAGE:
500
        template_set.add(constants.DT_FILE)
501
      if constants.ENABLE_SHARED_FILE_STORAGE:
502
        template_set.add(constants.DT_SHARED_FILE)
503
      # Set enabled_disk_templates to the inferred disk templates. Order them
504
      # according to a preference list that is based on Ganeti's history of
505
      # supported disk templates.
506
      self.cluster.enabled_disk_templates = []
507
      for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
508
        if preferred_template in template_set:
509
          self.cluster.enabled_disk_templates.append(preferred_template)
510
          template_set.remove(preferred_template)
511
      self.cluster.enabled_disk_templates.extend(list(template_set))
512

    
513

    
514
class NIC(ConfigObject):
515
  """Config object representing a network card."""
516
  __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
517

    
518
  @classmethod
519
  def CheckParameterSyntax(cls, nicparams):
520
    """Check the given parameters for validity.
521

522
    @type nicparams:  dict
523
    @param nicparams: dictionary with parameter names/value
524
    @raise errors.ConfigurationError: when a parameter is not valid
525

526
    """
527
    mode = nicparams[constants.NIC_MODE]
528
    if (mode not in constants.NIC_VALID_MODES and
529
        mode != constants.VALUE_AUTO):
530
      raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
531

    
532
    if (mode == constants.NIC_MODE_BRIDGED and
533
        not nicparams[constants.NIC_LINK]):
534
      raise errors.ConfigurationError("Missing bridged NIC link")
535

    
536

    
537
class Disk(ConfigObject):
538
  """Config object representing a block device."""
539
  __slots__ = ["dev_type", "logical_id", "physical_id",
540
               "children", "iv_name", "size", "mode", "params"]
541

    
542
  def CreateOnSecondary(self):
543
    """Test if this device needs to be created on a secondary node."""
544
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
545

    
546
  def AssembleOnSecondary(self):
547
    """Test if this device needs to be assembled on a secondary node."""
548
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
549

    
550
  def OpenOnSecondary(self):
551
    """Test if this device needs to be opened on a secondary node."""
552
    return self.dev_type in (constants.LD_LV,)
553

    
554
  def StaticDevPath(self):
555
    """Return the device path if this device type has a static one.
556

557
    Some devices (LVM for example) live always at the same /dev/ path,
558
    irrespective of their status. For such devices, we return this
559
    path, for others we return None.
560

561
    @warning: The path returned is not a normalized pathname; callers
562
        should check that it is a valid path.
563

564
    """
565
    if self.dev_type == constants.LD_LV:
566
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
567
    elif self.dev_type == constants.LD_BLOCKDEV:
568
      return self.logical_id[1]
569
    elif self.dev_type == constants.LD_RBD:
570
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
571
    return None
572

    
573
  def ChildrenNeeded(self):
574
    """Compute the needed number of children for activation.
575

576
    This method will return either -1 (all children) or a positive
577
    number denoting the minimum number of children needed for
578
    activation (only mirrored devices will usually return >=0).
579

580
    Currently, only DRBD8 supports diskless activation (therefore we
581
    return 0), for all other we keep the previous semantics and return
582
    -1.
583

584
    """
585
    if self.dev_type == constants.LD_DRBD8:
586
      return 0
587
    return -1
588

    
589
  def IsBasedOnDiskType(self, dev_type):
590
    """Check if the disk or its children are based on the given type.
591

592
    @type dev_type: L{constants.LDS_BLOCK}
593
    @param dev_type: the type to look for
594
    @rtype: boolean
595
    @return: boolean indicating if a device of the given type was found or not
596

597
    """
598
    if self.children:
599
      for child in self.children:
600
        if child.IsBasedOnDiskType(dev_type):
601
          return True
602
    return self.dev_type == dev_type
603

    
604
  def GetNodes(self, node):
605
    """This function returns the nodes this device lives on.
606

607
    Given the node on which the parent of the device lives on (or, in
608
    case of a top-level device, the primary node of the devices'
609
    instance), this function will return a list of nodes on which this
610
    devices needs to (or can) be assembled.
611

612
    """
613
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
614
                         constants.LD_BLOCKDEV, constants.LD_RBD,
615
                         constants.LD_EXT]:
616
      result = [node]
617
    elif self.dev_type in constants.LDS_DRBD:
618
      result = [self.logical_id[0], self.logical_id[1]]
619
      if node not in result:
620
        raise errors.ConfigurationError("DRBD device passed unknown node")
621
    else:
622
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
623
    return result
624

    
625
  def ComputeNodeTree(self, parent_node):
626
    """Compute the node/disk tree for this disk and its children.
627

628
    This method, given the node on which the parent disk lives, will
629
    return the list of all (node, disk) pairs which describe the disk
630
    tree in the most compact way. For example, a drbd/lvm stack
631
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
632
    which represents all the top-level devices on the nodes.
633

634
    """
635
    my_nodes = self.GetNodes(parent_node)
636
    result = [(node, self) for node in my_nodes]
637
    if not self.children:
638
      # leaf device
639
      return result
640
    for node in my_nodes:
641
      for child in self.children:
642
        child_result = child.ComputeNodeTree(node)
643
        if len(child_result) == 1:
644
          # child (and all its descendants) is simple, doesn't split
645
          # over multiple hosts, so we don't need to describe it, our
646
          # own entry for this node describes it completely
647
          continue
648
        else:
649
          # check if child nodes differ from my nodes; note that
650
          # subdisk can differ from the child itself, and be instead
651
          # one of its descendants
652
          for subnode, subdisk in child_result:
653
            if subnode not in my_nodes:
654
              result.append((subnode, subdisk))
655
            # otherwise child is under our own node, so we ignore this
656
            # entry (but probably the other results in the list will
657
            # be different)
658
    return result
659

    
660
  def ComputeGrowth(self, amount):
661
    """Compute the per-VG growth requirements.
662

663
    This only works for VG-based disks.
664

665
    @type amount: integer
666
    @param amount: the desired increase in (user-visible) disk space
667
    @rtype: dict
668
    @return: a dictionary of volume-groups and the required size
669

670
    """
671
    if self.dev_type == constants.LD_LV:
672
      return {self.logical_id[0]: amount}
673
    elif self.dev_type == constants.LD_DRBD8:
674
      if self.children:
675
        return self.children[0].ComputeGrowth(amount)
676
      else:
677
        return {}
678
    else:
679
      # Other disk types do not require VG space
680
      return {}
681

    
682
  def RecordGrow(self, amount):
683
    """Update the size of this disk after growth.
684

685
    This method recurses over the disks's children and updates their
686
    size correspondigly. The method needs to be kept in sync with the
687
    actual algorithms from bdev.
688

689
    """
690
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
691
                         constants.LD_RBD, constants.LD_EXT):
692
      self.size += amount
693
    elif self.dev_type == constants.LD_DRBD8:
694
      if self.children:
695
        self.children[0].RecordGrow(amount)
696
      self.size += amount
697
    else:
698
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
699
                                   " disk type %s" % self.dev_type)
700

    
701
  def Update(self, size=None, mode=None):
702
    """Apply changes to size and mode.
703

704
    """
705
    if self.dev_type == constants.LD_DRBD8:
706
      if self.children:
707
        self.children[0].Update(size=size, mode=mode)
708
    else:
709
      assert not self.children
710

    
711
    if size is not None:
712
      self.size = size
713
    if mode is not None:
714
      self.mode = mode
715

    
716
  def UnsetSize(self):
717
    """Sets recursively the size to zero for the disk and its children.
718

719
    """
720
    if self.children:
721
      for child in self.children:
722
        child.UnsetSize()
723
    self.size = 0
724

    
725
  def SetPhysicalID(self, target_node, nodes_ip):
726
    """Convert the logical ID to the physical ID.
727

728
    This is used only for drbd, which needs ip/port configuration.
729

730
    The routine descends down and updates its children also, because
731
    this helps when the only the top device is passed to the remote
732
    node.
733

734
    Arguments:
735
      - target_node: the node we wish to configure for
736
      - nodes_ip: a mapping of node name to ip
737

738
    The target_node must exist in in nodes_ip, and must be one of the
739
    nodes in the logical ID for each of the DRBD devices encountered
740
    in the disk tree.
741

742
    """
743
    if self.children:
744
      for child in self.children:
745
        child.SetPhysicalID(target_node, nodes_ip)
746

    
747
    if self.logical_id is None and self.physical_id is not None:
748
      return
749
    if self.dev_type in constants.LDS_DRBD:
750
      pnode, snode, port, pminor, sminor, secret = self.logical_id
751
      if target_node not in (pnode, snode):
752
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
753
                                        target_node)
754
      pnode_ip = nodes_ip.get(pnode, None)
755
      snode_ip = nodes_ip.get(snode, None)
756
      if pnode_ip is None or snode_ip is None:
757
        raise errors.ConfigurationError("Can't find primary or secondary node"
758
                                        " for %s" % str(self))
759
      p_data = (pnode_ip, port)
760
      s_data = (snode_ip, port)
761
      if pnode == target_node:
762
        self.physical_id = p_data + s_data + (pminor, secret)
763
      else: # it must be secondary, we tested above
764
        self.physical_id = s_data + p_data + (sminor, secret)
765
    else:
766
      self.physical_id = self.logical_id
767
    return
768

    
769
  def ToDict(self):
770
    """Disk-specific conversion to standard python types.
771

772
    This replaces the children lists of objects with lists of
773
    standard python types.
774

775
    """
776
    bo = super(Disk, self).ToDict()
777

    
778
    for attr in ("children",):
779
      alist = bo.get(attr, None)
780
      if alist:
781
        bo[attr] = outils.ContainerToDicts(alist)
782
    return bo
783

    
784
  @classmethod
785
  def FromDict(cls, val):
786
    """Custom function for Disks
787

788
    """
789
    obj = super(Disk, cls).FromDict(val)
790
    if obj.children:
791
      obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
792
    if obj.logical_id and isinstance(obj.logical_id, list):
793
      obj.logical_id = tuple(obj.logical_id)
794
    if obj.physical_id and isinstance(obj.physical_id, list):
795
      obj.physical_id = tuple(obj.physical_id)
796
    if obj.dev_type in constants.LDS_DRBD:
797
      # we need a tuple of length six here
798
      if len(obj.logical_id) < 6:
799
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
800
    return obj
801

    
802
  def __str__(self):
803
    """Custom str() formatter for disks.
804

805
    """
806
    if self.dev_type == constants.LD_LV:
807
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
808
    elif self.dev_type in constants.LDS_DRBD:
809
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
810
      val = "<DRBD8("
811
      if self.physical_id is None:
812
        phy = "unconfigured"
813
      else:
814
        phy = ("configured as %s:%s %s:%s" %
815
               (self.physical_id[0], self.physical_id[1],
816
                self.physical_id[2], self.physical_id[3]))
817

    
818
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
819
              (node_a, minor_a, node_b, minor_b, port, phy))
820
      if self.children and self.children.count(None) == 0:
821
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
822
      else:
823
        val += "no local storage"
824
    else:
825
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
826
             (self.dev_type, self.logical_id, self.physical_id, self.children))
827
    if self.iv_name is None:
828
      val += ", not visible"
829
    else:
830
      val += ", visible as /dev/%s" % self.iv_name
831
    if isinstance(self.size, int):
832
      val += ", size=%dm)>" % self.size
833
    else:
834
      val += ", size='%s')>" % (self.size,)
835
    return val
836

    
837
  def Verify(self):
838
    """Checks that this disk is correctly configured.
839

840
    """
841
    all_errors = []
842
    if self.mode not in constants.DISK_ACCESS_SET:
843
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
844
    return all_errors
845

    
846
  def UpgradeConfig(self):
847
    """Fill defaults for missing configuration values.
848

849
    """
850
    if self.children:
851
      for child in self.children:
852
        child.UpgradeConfig()
853

    
854
    # FIXME: Make this configurable in Ganeti 2.7
855
    self.params = {}
856
    # add here config upgrade for this disk
857

    
858
  @staticmethod
859
  def ComputeLDParams(disk_template, disk_params):
860
    """Computes Logical Disk parameters from Disk Template parameters.
861

862
    @type disk_template: string
863
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
864
    @type disk_params: dict
865
    @param disk_params: disk template parameters;
866
                        dict(template_name -> parameters
867
    @rtype: list(dict)
868
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
869
      contains the LD parameters of the node. The tree is flattened in-order.
870

871
    """
872
    if disk_template not in constants.DISK_TEMPLATES:
873
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
874

    
875
    assert disk_template in disk_params
876

    
877
    result = list()
878
    dt_params = disk_params[disk_template]
879
    if disk_template == constants.DT_DRBD8:
880
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
881
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
882
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
883
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
884
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
885
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
886
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
887
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
888
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
889
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
890
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
891
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
892
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
893
        }))
894

    
895
      # data LV
896
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
897
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
898
        }))
899

    
900
      # metadata LV
901
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
902
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
903
        }))
904

    
905
    elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
906
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
907

    
908
    elif disk_template == constants.DT_PLAIN:
909
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
910
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
911
        }))
912

    
913
    elif disk_template == constants.DT_BLOCK:
914
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
915

    
916
    elif disk_template == constants.DT_RBD:
917
      result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
918
        constants.LDP_POOL: dt_params[constants.RBD_POOL],
919
        }))
920

    
921
    elif disk_template == constants.DT_EXT:
922
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
923

    
924
    return result
925

    
926

    
927
class InstancePolicy(ConfigObject):
928
  """Config object representing instance policy limits dictionary.
929

930
  Note that this object is not actually used in the config, it's just
931
  used as a placeholder for a few functions.
932

933
  """
934
  @classmethod
935
  def CheckParameterSyntax(cls, ipolicy, check_std):
936
    """ Check the instance policy for validity.
937

938
    @type ipolicy: dict
939
    @param ipolicy: dictionary with min/max/std specs and policies
940
    @type check_std: bool
941
    @param check_std: Whether to check std value or just assume compliance
942
    @raise errors.ConfigurationError: when the policy is not legal
943

944
    """
945
    if constants.ISPECS_MINMAX in ipolicy:
946
      if check_std and constants.ISPECS_STD not in ipolicy:
947
        msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
948
        raise errors.ConfigurationError(msg)
949
      minmaxspecs = ipolicy[constants.ISPECS_MINMAX]
950
      stdspec = ipolicy.get(constants.ISPECS_STD)
951
      for param in constants.ISPECS_PARAMETERS:
952
        InstancePolicy.CheckISpecSyntax(minmaxspecs, stdspec, param, check_std)
953
    if constants.IPOLICY_DTS in ipolicy:
954
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
955
    for key in constants.IPOLICY_PARAMETERS:
956
      if key in ipolicy:
957
        InstancePolicy.CheckParameter(key, ipolicy[key])
958
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
959
    if wrong_keys:
960
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
961
                                      utils.CommaJoin(wrong_keys))
962

    
963
  @classmethod
964
  def CheckISpecSyntax(cls, minmaxspecs, stdspec, name, check_std):
965
    """Check the instance policy specs for validity on a given key.
966

967
    We check if the instance specs makes sense for a given key, that is
968
    if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
969

970
    @type minmaxspecs: dict
971
    @param minmaxspecs: dictionary with min and max instance spec
972
    @type stdspec: dict
973
    @param stdspec: dictionary with standard instance spec
974
    @type name: string
975
    @param name: what are the limits for
976
    @type check_std: bool
977
    @param check_std: Whether to check std value or just assume compliance
978
    @raise errors.ConfigurationError: when specs for the given name are not
979
        valid
980

981
    """
982
    missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
983
    if missing:
984
      msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
985
      raise errors.ConfigurationError(msg)
986

    
987
    minspec = minmaxspecs[constants.ISPECS_MIN]
988
    maxspec = minmaxspecs[constants.ISPECS_MAX]
989
    min_v = minspec.get(name, 0)
990

    
991
    if check_std:
992
      std_v = stdspec.get(name, min_v)
993
      std_msg = std_v
994
    else:
995
      std_v = min_v
996
      std_msg = "-"
997

    
998
    max_v = maxspec.get(name, std_v)
999
    if min_v > std_v or std_v > max_v:
1000
      err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
1001
             (name,
1002
              minspec.get(name, "-"),
1003
              maxspec.get(name, "-"),
1004
              std_msg))
1005
      raise errors.ConfigurationError(err)
1006

    
1007
  @classmethod
1008
  def CheckDiskTemplates(cls, disk_templates):
1009
    """Checks the disk templates for validity.
1010

1011
    """
1012
    if not disk_templates:
1013
      raise errors.ConfigurationError("Instance policy must contain" +
1014
                                      " at least one disk template")
1015
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1016
    if wrong:
1017
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1018
                                      utils.CommaJoin(wrong))
1019

    
1020
  @classmethod
1021
  def CheckParameter(cls, key, value):
1022
    """Checks a parameter.
1023

1024
    Currently we expect all parameters to be float values.
1025

1026
    """
1027
    try:
1028
      float(value)
1029
    except (TypeError, ValueError), err:
1030
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1031
                                      " '%s', error: %s" % (key, value, err))
1032

    
1033

    
1034
class Instance(TaggableObject):
1035
  """Config object representing an instance."""
1036
  __slots__ = [
1037
    "name",
1038
    "primary_node",
1039
    "os",
1040
    "hypervisor",
1041
    "hvparams",
1042
    "beparams",
1043
    "osparams",
1044
    "admin_state",
1045
    "nics",
1046
    "disks",
1047
    "disk_template",
1048
    "network_port",
1049
    "serial_no",
1050
    ] + _TIMESTAMPS + _UUID
1051

    
1052
  def _ComputeSecondaryNodes(self):
1053
    """Compute the list of secondary nodes.
1054

1055
    This is a simple wrapper over _ComputeAllNodes.
1056

1057
    """
1058
    all_nodes = set(self._ComputeAllNodes())
1059
    all_nodes.discard(self.primary_node)
1060
    return tuple(all_nodes)
1061

    
1062
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1063
                             "List of names of secondary nodes")
1064

    
1065
  def _ComputeAllNodes(self):
1066
    """Compute the list of all nodes.
1067

1068
    Since the data is already there (in the drbd disks), keeping it as
1069
    a separate normal attribute is redundant and if not properly
1070
    synchronised can cause problems. Thus it's better to compute it
1071
    dynamically.
1072

1073
    """
1074
    def _Helper(nodes, device):
1075
      """Recursively computes nodes given a top device."""
1076
      if device.dev_type in constants.LDS_DRBD:
1077
        nodea, nodeb = device.logical_id[:2]
1078
        nodes.add(nodea)
1079
        nodes.add(nodeb)
1080
      if device.children:
1081
        for child in device.children:
1082
          _Helper(nodes, child)
1083

    
1084
    all_nodes = set()
1085
    all_nodes.add(self.primary_node)
1086
    for device in self.disks:
1087
      _Helper(all_nodes, device)
1088
    return tuple(all_nodes)
1089

    
1090
  all_nodes = property(_ComputeAllNodes, None, None,
1091
                       "List of names of all the nodes of the instance")
1092

    
1093
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1094
    """Provide a mapping of nodes to LVs this instance owns.
1095

1096
    This function figures out what logical volumes should belong on
1097
    which nodes, recursing through a device tree.
1098

1099
    @param lvmap: optional dictionary to receive the
1100
        'node' : ['lv', ...] data.
1101

1102
    @return: None if lvmap arg is given, otherwise, a dictionary of
1103
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1104
        volumeN is of the form "vg_name/lv_name", compatible with
1105
        GetVolumeList()
1106

1107
    """
1108
    if node is None:
1109
      node = self.primary_node
1110

    
1111
    if lvmap is None:
1112
      lvmap = {
1113
        node: [],
1114
        }
1115
      ret = lvmap
1116
    else:
1117
      if not node in lvmap:
1118
        lvmap[node] = []
1119
      ret = None
1120

    
1121
    if not devs:
1122
      devs = self.disks
1123

    
1124
    for dev in devs:
1125
      if dev.dev_type == constants.LD_LV:
1126
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1127

    
1128
      elif dev.dev_type in constants.LDS_DRBD:
1129
        if dev.children:
1130
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1131
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1132

    
1133
      elif dev.children:
1134
        self.MapLVsByNode(lvmap, dev.children, node)
1135

    
1136
    return ret
1137

    
1138
  def FindDisk(self, idx):
1139
    """Find a disk given having a specified index.
1140

1141
    This is just a wrapper that does validation of the index.
1142

1143
    @type idx: int
1144
    @param idx: the disk index
1145
    @rtype: L{Disk}
1146
    @return: the corresponding disk
1147
    @raise errors.OpPrereqError: when the given index is not valid
1148

1149
    """
1150
    try:
1151
      idx = int(idx)
1152
      return self.disks[idx]
1153
    except (TypeError, ValueError), err:
1154
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1155
                                 errors.ECODE_INVAL)
1156
    except IndexError:
1157
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1158
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1159
                                 errors.ECODE_INVAL)
1160

    
1161
  def ToDict(self):
1162
    """Instance-specific conversion to standard python types.
1163

1164
    This replaces the children lists of objects with lists of standard
1165
    python types.
1166

1167
    """
1168
    bo = super(Instance, self).ToDict()
1169

    
1170
    for attr in "nics", "disks":
1171
      alist = bo.get(attr, None)
1172
      if alist:
1173
        nlist = outils.ContainerToDicts(alist)
1174
      else:
1175
        nlist = []
1176
      bo[attr] = nlist
1177
    return bo
1178

    
1179
  @classmethod
1180
  def FromDict(cls, val):
1181
    """Custom function for instances.
1182

1183
    """
1184
    if "admin_state" not in val:
1185
      if val.get("admin_up", False):
1186
        val["admin_state"] = constants.ADMINST_UP
1187
      else:
1188
        val["admin_state"] = constants.ADMINST_DOWN
1189
    if "admin_up" in val:
1190
      del val["admin_up"]
1191
    obj = super(Instance, cls).FromDict(val)
1192
    obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1193
    obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1194
    return obj
1195

    
1196
  def UpgradeConfig(self):
1197
    """Fill defaults for missing configuration values.
1198

1199
    """
1200
    for nic in self.nics:
1201
      nic.UpgradeConfig()
1202
    for disk in self.disks:
1203
      disk.UpgradeConfig()
1204
    if self.hvparams:
1205
      for key in constants.HVC_GLOBALS:
1206
        try:
1207
          del self.hvparams[key]
1208
        except KeyError:
1209
          pass
1210
    if self.osparams is None:
1211
      self.osparams = {}
1212
    UpgradeBeParams(self.beparams)
1213

    
1214

    
1215
class OS(ConfigObject):
1216
  """Config object representing an operating system.
1217

1218
  @type supported_parameters: list
1219
  @ivar supported_parameters: a list of tuples, name and description,
1220
      containing the supported parameters by this OS
1221

1222
  @type VARIANT_DELIM: string
1223
  @cvar VARIANT_DELIM: the variant delimiter
1224

1225
  """
1226
  __slots__ = [
1227
    "name",
1228
    "path",
1229
    "api_versions",
1230
    "create_script",
1231
    "export_script",
1232
    "import_script",
1233
    "rename_script",
1234
    "verify_script",
1235
    "supported_variants",
1236
    "supported_parameters",
1237
    ]
1238

    
1239
  VARIANT_DELIM = "+"
1240

    
1241
  @classmethod
1242
  def SplitNameVariant(cls, name):
1243
    """Splits the name into the proper name and variant.
1244

1245
    @param name: the OS (unprocessed) name
1246
    @rtype: list
1247
    @return: a list of two elements; if the original name didn't
1248
        contain a variant, it's returned as an empty string
1249

1250
    """
1251
    nv = name.split(cls.VARIANT_DELIM, 1)
1252
    if len(nv) == 1:
1253
      nv.append("")
1254
    return nv
1255

    
1256
  @classmethod
1257
  def GetName(cls, name):
1258
    """Returns the proper name of the os (without the variant).
1259

1260
    @param name: the OS (unprocessed) name
1261

1262
    """
1263
    return cls.SplitNameVariant(name)[0]
1264

    
1265
  @classmethod
1266
  def GetVariant(cls, name):
1267
    """Returns the variant the os (without the base name).
1268

1269
    @param name: the OS (unprocessed) name
1270

1271
    """
1272
    return cls.SplitNameVariant(name)[1]
1273

    
1274

    
1275
class ExtStorage(ConfigObject):
1276
  """Config object representing an External Storage Provider.
1277

1278
  """
1279
  __slots__ = [
1280
    "name",
1281
    "path",
1282
    "create_script",
1283
    "remove_script",
1284
    "grow_script",
1285
    "attach_script",
1286
    "detach_script",
1287
    "setinfo_script",
1288
    "verify_script",
1289
    "supported_parameters",
1290
    ]
1291

    
1292

    
1293
class NodeHvState(ConfigObject):
1294
  """Hypvervisor state on a node.
1295

1296
  @ivar mem_total: Total amount of memory
1297
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1298
    available)
1299
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1300
    rounding
1301
  @ivar mem_inst: Memory used by instances living on node
1302
  @ivar cpu_total: Total node CPU core count
1303
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1304

1305
  """
1306
  __slots__ = [
1307
    "mem_total",
1308
    "mem_node",
1309
    "mem_hv",
1310
    "mem_inst",
1311
    "cpu_total",
1312
    "cpu_node",
1313
    ] + _TIMESTAMPS
1314

    
1315

    
1316
class NodeDiskState(ConfigObject):
1317
  """Disk state on a node.
1318

1319
  """
1320
  __slots__ = [
1321
    "total",
1322
    "reserved",
1323
    "overhead",
1324
    ] + _TIMESTAMPS
1325

    
1326

    
1327
class Node(TaggableObject):
1328
  """Config object representing a node.
1329

1330
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1331
  @ivar hv_state_static: Hypervisor state overriden by user
1332
  @ivar disk_state: Disk state (e.g. free space)
1333
  @ivar disk_state_static: Disk state overriden by user
1334

1335
  """
1336
  __slots__ = [
1337
    "name",
1338
    "primary_ip",
1339
    "secondary_ip",
1340
    "serial_no",
1341
    "master_candidate",
1342
    "offline",
1343
    "drained",
1344
    "group",
1345
    "master_capable",
1346
    "vm_capable",
1347
    "ndparams",
1348
    "powered",
1349
    "hv_state",
1350
    "hv_state_static",
1351
    "disk_state",
1352
    "disk_state_static",
1353
    ] + _TIMESTAMPS + _UUID
1354

    
1355
  def UpgradeConfig(self):
1356
    """Fill defaults for missing configuration values.
1357

1358
    """
1359
    # pylint: disable=E0203
1360
    # because these are "defined" via slots, not manually
1361
    if self.master_capable is None:
1362
      self.master_capable = True
1363

    
1364
    if self.vm_capable is None:
1365
      self.vm_capable = True
1366

    
1367
    if self.ndparams is None:
1368
      self.ndparams = {}
1369
    # And remove any global parameter
1370
    for key in constants.NDC_GLOBALS:
1371
      if key in self.ndparams:
1372
        logging.warning("Ignoring %s node parameter for node %s",
1373
                        key, self.name)
1374
        del self.ndparams[key]
1375

    
1376
    if self.powered is None:
1377
      self.powered = True
1378

    
1379
  def ToDict(self):
1380
    """Custom function for serializing.
1381

1382
    """
1383
    data = super(Node, self).ToDict()
1384

    
1385
    hv_state = data.get("hv_state", None)
1386
    if hv_state is not None:
1387
      data["hv_state"] = outils.ContainerToDicts(hv_state)
1388

    
1389
    disk_state = data.get("disk_state", None)
1390
    if disk_state is not None:
1391
      data["disk_state"] = \
1392
        dict((key, outils.ContainerToDicts(value))
1393
             for (key, value) in disk_state.items())
1394

    
1395
    return data
1396

    
1397
  @classmethod
1398
  def FromDict(cls, val):
1399
    """Custom function for deserializing.
1400

1401
    """
1402
    obj = super(Node, cls).FromDict(val)
1403

    
1404
    if obj.hv_state is not None:
1405
      obj.hv_state = \
1406
        outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1407

    
1408
    if obj.disk_state is not None:
1409
      obj.disk_state = \
1410
        dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1411
             for (key, value) in obj.disk_state.items())
1412

    
1413
    return obj
1414

    
1415

    
1416
class NodeGroup(TaggableObject):
1417
  """Config object representing a node group."""
1418
  __slots__ = [
1419
    "name",
1420
    "members",
1421
    "ndparams",
1422
    "diskparams",
1423
    "ipolicy",
1424
    "serial_no",
1425
    "hv_state_static",
1426
    "disk_state_static",
1427
    "alloc_policy",
1428
    "networks",
1429
    ] + _TIMESTAMPS + _UUID
1430

    
1431
  def ToDict(self):
1432
    """Custom function for nodegroup.
1433

1434
    This discards the members object, which gets recalculated and is only kept
1435
    in memory.
1436

1437
    """
1438
    mydict = super(NodeGroup, self).ToDict()
1439
    del mydict["members"]
1440
    return mydict
1441

    
1442
  @classmethod
1443
  def FromDict(cls, val):
1444
    """Custom function for nodegroup.
1445

1446
    The members slot is initialized to an empty list, upon deserialization.
1447

1448
    """
1449
    obj = super(NodeGroup, cls).FromDict(val)
1450
    obj.members = []
1451
    return obj
1452

    
1453
  def UpgradeConfig(self):
1454
    """Fill defaults for missing configuration values.
1455

1456
    """
1457
    if self.ndparams is None:
1458
      self.ndparams = {}
1459

    
1460
    if self.serial_no is None:
1461
      self.serial_no = 1
1462

    
1463
    if self.alloc_policy is None:
1464
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1465

    
1466
    # We only update mtime, and not ctime, since we would not be able
1467
    # to provide a correct value for creation time.
1468
    if self.mtime is None:
1469
      self.mtime = time.time()
1470

    
1471
    if self.diskparams is None:
1472
      self.diskparams = {}
1473
    if self.ipolicy is None:
1474
      self.ipolicy = MakeEmptyIPolicy()
1475

    
1476
    if self.networks is None:
1477
      self.networks = {}
1478

    
1479
  def FillND(self, node):
1480
    """Return filled out ndparams for L{objects.Node}
1481

1482
    @type node: L{objects.Node}
1483
    @param node: A Node object to fill
1484
    @return a copy of the node's ndparams with defaults filled
1485

1486
    """
1487
    return self.SimpleFillND(node.ndparams)
1488

    
1489
  def SimpleFillND(self, ndparams):
1490
    """Fill a given ndparams dict with defaults.
1491

1492
    @type ndparams: dict
1493
    @param ndparams: the dict to fill
1494
    @rtype: dict
1495
    @return: a copy of the passed in ndparams with missing keys filled
1496
        from the node group defaults
1497

1498
    """
1499
    return FillDict(self.ndparams, ndparams)
1500

    
1501

    
1502
class Cluster(TaggableObject):
1503
  """Config object representing the cluster."""
1504
  __slots__ = [
1505
    "serial_no",
1506
    "rsahostkeypub",
1507
    "highest_used_port",
1508
    "tcpudp_port_pool",
1509
    "mac_prefix",
1510
    "volume_group_name",
1511
    "reserved_lvs",
1512
    "drbd_usermode_helper",
1513
    "default_bridge",
1514
    "default_hypervisor",
1515
    "master_node",
1516
    "master_ip",
1517
    "master_netdev",
1518
    "master_netmask",
1519
    "use_external_mip_script",
1520
    "cluster_name",
1521
    "file_storage_dir",
1522
    "shared_file_storage_dir",
1523
    "enabled_hypervisors",
1524
    "hvparams",
1525
    "ipolicy",
1526
    "os_hvp",
1527
    "beparams",
1528
    "osparams",
1529
    "nicparams",
1530
    "ndparams",
1531
    "diskparams",
1532
    "candidate_pool_size",
1533
    "modify_etc_hosts",
1534
    "modify_ssh_setup",
1535
    "maintain_node_health",
1536
    "uid_pool",
1537
    "default_iallocator",
1538
    "hidden_os",
1539
    "blacklisted_os",
1540
    "primary_ip_family",
1541
    "prealloc_wipe_disks",
1542
    "hv_state_static",
1543
    "disk_state_static",
1544
    # Keeping this in temporarily to not break the build between patches of
1545
    # this series. Remove after 'enabled_disk_templates' is fully implemented.
1546
    "enabled_storage_types",
1547
    "enabled_disk_templates",
1548
    ] + _TIMESTAMPS + _UUID
1549

    
1550
  def UpgradeConfig(self):
1551
    """Fill defaults for missing configuration values.
1552

1553
    """
1554
    # pylint: disable=E0203
1555
    # because these are "defined" via slots, not manually
1556
    if self.hvparams is None:
1557
      self.hvparams = constants.HVC_DEFAULTS
1558
    else:
1559
      for hypervisor in self.hvparams:
1560
        self.hvparams[hypervisor] = FillDict(
1561
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1562

    
1563
    if self.os_hvp is None:
1564
      self.os_hvp = {}
1565

    
1566
    # osparams added before 2.2
1567
    if self.osparams is None:
1568
      self.osparams = {}
1569

    
1570
    self.ndparams = UpgradeNDParams(self.ndparams)
1571

    
1572
    self.beparams = UpgradeGroupedParams(self.beparams,
1573
                                         constants.BEC_DEFAULTS)
1574
    for beparams_group in self.beparams:
1575
      UpgradeBeParams(self.beparams[beparams_group])
1576

    
1577
    migrate_default_bridge = not self.nicparams
1578
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1579
                                          constants.NICC_DEFAULTS)
1580
    if migrate_default_bridge:
1581
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1582
        self.default_bridge
1583

    
1584
    if self.modify_etc_hosts is None:
1585
      self.modify_etc_hosts = True
1586

    
1587
    if self.modify_ssh_setup is None:
1588
      self.modify_ssh_setup = True
1589

    
1590
    # default_bridge is no longer used in 2.1. The slot is left there to
1591
    # support auto-upgrading. It can be removed once we decide to deprecate
1592
    # upgrading straight from 2.0.
1593
    if self.default_bridge is not None:
1594
      self.default_bridge = None
1595

    
1596
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1597
    # code can be removed once upgrading straight from 2.0 is deprecated.
1598
    if self.default_hypervisor is not None:
1599
      self.enabled_hypervisors = ([self.default_hypervisor] +
1600
                                  [hvname for hvname in self.enabled_hypervisors
1601
                                   if hvname != self.default_hypervisor])
1602
      self.default_hypervisor = None
1603

    
1604
    # maintain_node_health added after 2.1.1
1605
    if self.maintain_node_health is None:
1606
      self.maintain_node_health = False
1607

    
1608
    if self.uid_pool is None:
1609
      self.uid_pool = []
1610

    
1611
    if self.default_iallocator is None:
1612
      self.default_iallocator = ""
1613

    
1614
    # reserved_lvs added before 2.2
1615
    if self.reserved_lvs is None:
1616
      self.reserved_lvs = []
1617

    
1618
    # hidden and blacklisted operating systems added before 2.2.1
1619
    if self.hidden_os is None:
1620
      self.hidden_os = []
1621

    
1622
    if self.blacklisted_os is None:
1623
      self.blacklisted_os = []
1624

    
1625
    # primary_ip_family added before 2.3
1626
    if self.primary_ip_family is None:
1627
      self.primary_ip_family = AF_INET
1628

    
1629
    if self.master_netmask is None:
1630
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1631
      self.master_netmask = ipcls.iplen
1632

    
1633
    if self.prealloc_wipe_disks is None:
1634
      self.prealloc_wipe_disks = False
1635

    
1636
    # shared_file_storage_dir added before 2.5
1637
    if self.shared_file_storage_dir is None:
1638
      self.shared_file_storage_dir = ""
1639

    
1640
    if self.use_external_mip_script is None:
1641
      self.use_external_mip_script = False
1642

    
1643
    if self.diskparams:
1644
      self.diskparams = UpgradeDiskParams(self.diskparams)
1645
    else:
1646
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1647

    
1648
    # instance policy added before 2.6
1649
    if self.ipolicy is None:
1650
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1651
    else:
1652
      # we can either make sure to upgrade the ipolicy always, or only
1653
      # do it in some corner cases (e.g. missing keys); note that this
1654
      # will break any removal of keys from the ipolicy dict
1655
      wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1656
      if wrongkeys:
1657
        # These keys would be silently removed by FillIPolicy()
1658
        msg = ("Cluster instance policy contains spourious keys: %s" %
1659
               utils.CommaJoin(wrongkeys))
1660
        raise errors.ConfigurationError(msg)
1661
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1662

    
1663
  @property
1664
  def primary_hypervisor(self):
1665
    """The first hypervisor is the primary.
1666

1667
    Useful, for example, for L{Node}'s hv/disk state.
1668

1669
    """
1670
    return self.enabled_hypervisors[0]
1671

    
1672
  def ToDict(self):
1673
    """Custom function for cluster.
1674

1675
    """
1676
    mydict = super(Cluster, self).ToDict()
1677

    
1678
    if self.tcpudp_port_pool is None:
1679
      tcpudp_port_pool = []
1680
    else:
1681
      tcpudp_port_pool = list(self.tcpudp_port_pool)
1682

    
1683
    mydict["tcpudp_port_pool"] = tcpudp_port_pool
1684

    
1685
    return mydict
1686

    
1687
  @classmethod
1688
  def FromDict(cls, val):
1689
    """Custom function for cluster.
1690

1691
    """
1692
    obj = super(Cluster, cls).FromDict(val)
1693

    
1694
    if obj.tcpudp_port_pool is None:
1695
      obj.tcpudp_port_pool = set()
1696
    elif not isinstance(obj.tcpudp_port_pool, set):
1697
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1698

    
1699
    return obj
1700

    
1701
  def SimpleFillDP(self, diskparams):
1702
    """Fill a given diskparams dict with cluster defaults.
1703

1704
    @param diskparams: The diskparams
1705
    @return: The defaults dict
1706

1707
    """
1708
    return FillDiskParams(self.diskparams, diskparams)
1709

    
1710
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1711
    """Get the default hypervisor parameters for the cluster.
1712

1713
    @param hypervisor: the hypervisor name
1714
    @param os_name: if specified, we'll also update the defaults for this OS
1715
    @param skip_keys: if passed, list of keys not to use
1716
    @return: the defaults dict
1717

1718
    """
1719
    if skip_keys is None:
1720
      skip_keys = []
1721

    
1722
    fill_stack = [self.hvparams.get(hypervisor, {})]
1723
    if os_name is not None:
1724
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1725
      fill_stack.append(os_hvp)
1726

    
1727
    ret_dict = {}
1728
    for o_dict in fill_stack:
1729
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1730

    
1731
    return ret_dict
1732

    
1733
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1734
    """Fill a given hvparams dict with cluster defaults.
1735

1736
    @type hv_name: string
1737
    @param hv_name: the hypervisor to use
1738
    @type os_name: string
1739
    @param os_name: the OS to use for overriding the hypervisor defaults
1740
    @type skip_globals: boolean
1741
    @param skip_globals: if True, the global hypervisor parameters will
1742
        not be filled
1743
    @rtype: dict
1744
    @return: a copy of the given hvparams with missing keys filled from
1745
        the cluster defaults
1746

1747
    """
1748
    if skip_globals:
1749
      skip_keys = constants.HVC_GLOBALS
1750
    else:
1751
      skip_keys = []
1752

    
1753
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1754
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1755

    
1756
  def FillHV(self, instance, skip_globals=False):
1757
    """Fill an instance's hvparams dict with cluster defaults.
1758

1759
    @type instance: L{objects.Instance}
1760
    @param instance: the instance parameter to fill
1761
    @type skip_globals: boolean
1762
    @param skip_globals: if True, the global hypervisor parameters will
1763
        not be filled
1764
    @rtype: dict
1765
    @return: a copy of the instance's hvparams with missing keys filled from
1766
        the cluster defaults
1767

1768
    """
1769
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1770
                             instance.hvparams, skip_globals)
1771

    
1772
  def SimpleFillBE(self, beparams):
1773
    """Fill a given beparams dict with cluster defaults.
1774

1775
    @type beparams: dict
1776
    @param beparams: the dict to fill
1777
    @rtype: dict
1778
    @return: a copy of the passed in beparams with missing keys filled
1779
        from the cluster defaults
1780

1781
    """
1782
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1783

    
1784
  def FillBE(self, instance):
1785
    """Fill an instance's beparams dict with cluster defaults.
1786

1787
    @type instance: L{objects.Instance}
1788
    @param instance: the instance parameter to fill
1789
    @rtype: dict
1790
    @return: a copy of the instance's beparams with missing keys filled from
1791
        the cluster defaults
1792

1793
    """
1794
    return self.SimpleFillBE(instance.beparams)
1795

    
1796
  def SimpleFillNIC(self, nicparams):
1797
    """Fill a given nicparams dict with cluster defaults.
1798

1799
    @type nicparams: dict
1800
    @param nicparams: the dict to fill
1801
    @rtype: dict
1802
    @return: a copy of the passed in nicparams with missing keys filled
1803
        from the cluster defaults
1804

1805
    """
1806
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1807

    
1808
  def SimpleFillOS(self, os_name, os_params):
1809
    """Fill an instance's osparams dict with cluster defaults.
1810

1811
    @type os_name: string
1812
    @param os_name: the OS name to use
1813
    @type os_params: dict
1814
    @param os_params: the dict to fill with default values
1815
    @rtype: dict
1816
    @return: a copy of the instance's osparams with missing keys filled from
1817
        the cluster defaults
1818

1819
    """
1820
    name_only = os_name.split("+", 1)[0]
1821
    # base OS
1822
    result = self.osparams.get(name_only, {})
1823
    # OS with variant
1824
    result = FillDict(result, self.osparams.get(os_name, {}))
1825
    # specified params
1826
    return FillDict(result, os_params)
1827

    
1828
  @staticmethod
1829
  def SimpleFillHvState(hv_state):
1830
    """Fill an hv_state sub dict with cluster defaults.
1831

1832
    """
1833
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1834

    
1835
  @staticmethod
1836
  def SimpleFillDiskState(disk_state):
1837
    """Fill an disk_state sub dict with cluster defaults.
1838

1839
    """
1840
    return FillDict(constants.DS_DEFAULTS, disk_state)
1841

    
1842
  def FillND(self, node, nodegroup):
1843
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1844

1845
    @type node: L{objects.Node}
1846
    @param node: A Node object to fill
1847
    @type nodegroup: L{objects.NodeGroup}
1848
    @param nodegroup: A Node object to fill
1849
    @return a copy of the node's ndparams with defaults filled
1850

1851
    """
1852
    return self.SimpleFillND(nodegroup.FillND(node))
1853

    
1854
  def SimpleFillND(self, ndparams):
1855
    """Fill a given ndparams dict with defaults.
1856

1857
    @type ndparams: dict
1858
    @param ndparams: the dict to fill
1859
    @rtype: dict
1860
    @return: a copy of the passed in ndparams with missing keys filled
1861
        from the cluster defaults
1862

1863
    """
1864
    return FillDict(self.ndparams, ndparams)
1865

    
1866
  def SimpleFillIPolicy(self, ipolicy):
1867
    """ Fill instance policy dict with defaults.
1868

1869
    @type ipolicy: dict
1870
    @param ipolicy: the dict to fill
1871
    @rtype: dict
1872
    @return: a copy of passed ipolicy with missing keys filled from
1873
      the cluster defaults
1874

1875
    """
1876
    return FillIPolicy(self.ipolicy, ipolicy)
1877

    
1878

    
1879
class BlockDevStatus(ConfigObject):
1880
  """Config object representing the status of a block device."""
1881
  __slots__ = [
1882
    "dev_path",
1883
    "major",
1884
    "minor",
1885
    "sync_percent",
1886
    "estimated_time",
1887
    "is_degraded",
1888
    "ldisk_status",
1889
    ]
1890

    
1891

    
1892
class ImportExportStatus(ConfigObject):
1893
  """Config object representing the status of an import or export."""
1894
  __slots__ = [
1895
    "recent_output",
1896
    "listen_port",
1897
    "connected",
1898
    "progress_mbytes",
1899
    "progress_throughput",
1900
    "progress_eta",
1901
    "progress_percent",
1902
    "exit_status",
1903
    "error_message",
1904
    ] + _TIMESTAMPS
1905

    
1906

    
1907
class ImportExportOptions(ConfigObject):
1908
  """Options for import/export daemon
1909

1910
  @ivar key_name: X509 key name (None for cluster certificate)
1911
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1912
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1913
  @ivar magic: Used to ensure the connection goes to the right disk
1914
  @ivar ipv6: Whether to use IPv6
1915
  @ivar connect_timeout: Number of seconds for establishing connection
1916

1917
  """
1918
  __slots__ = [
1919
    "key_name",
1920
    "ca_pem",
1921
    "compress",
1922
    "magic",
1923
    "ipv6",
1924
    "connect_timeout",
1925
    ]
1926

    
1927

    
1928
class ConfdRequest(ConfigObject):
1929
  """Object holding a confd request.
1930

1931
  @ivar protocol: confd protocol version
1932
  @ivar type: confd query type
1933
  @ivar query: query request
1934
  @ivar rsalt: requested reply salt
1935

1936
  """
1937
  __slots__ = [
1938
    "protocol",
1939
    "type",
1940
    "query",
1941
    "rsalt",
1942
    ]
1943

    
1944

    
1945
class ConfdReply(ConfigObject):
1946
  """Object holding a confd reply.
1947

1948
  @ivar protocol: confd protocol version
1949
  @ivar status: reply status code (ok, error)
1950
  @ivar answer: confd query reply
1951
  @ivar serial: configuration serial number
1952

1953
  """
1954
  __slots__ = [
1955
    "protocol",
1956
    "status",
1957
    "answer",
1958
    "serial",
1959
    ]
1960

    
1961

    
1962
class QueryFieldDefinition(ConfigObject):
1963
  """Object holding a query field definition.
1964

1965
  @ivar name: Field name
1966
  @ivar title: Human-readable title
1967
  @ivar kind: Field type
1968
  @ivar doc: Human-readable description
1969

1970
  """
1971
  __slots__ = [
1972
    "name",
1973
    "title",
1974
    "kind",
1975
    "doc",
1976
    ]
1977

    
1978

    
1979
class _QueryResponseBase(ConfigObject):
1980
  __slots__ = [
1981
    "fields",
1982
    ]
1983

    
1984
  def ToDict(self):
1985
    """Custom function for serializing.
1986

1987
    """
1988
    mydict = super(_QueryResponseBase, self).ToDict()
1989
    mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
1990
    return mydict
1991

    
1992
  @classmethod
1993
  def FromDict(cls, val):
1994
    """Custom function for de-serializing.
1995

1996
    """
1997
    obj = super(_QueryResponseBase, cls).FromDict(val)
1998
    obj.fields = \
1999
      outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2000
    return obj
2001

    
2002

    
2003
class QueryResponse(_QueryResponseBase):
2004
  """Object holding the response to a query.
2005

2006
  @ivar fields: List of L{QueryFieldDefinition} objects
2007
  @ivar data: Requested data
2008

2009
  """
2010
  __slots__ = [
2011
    "data",
2012
    ]
2013

    
2014

    
2015
class QueryFieldsRequest(ConfigObject):
2016
  """Object holding a request for querying available fields.
2017

2018
  """
2019
  __slots__ = [
2020
    "what",
2021
    "fields",
2022
    ]
2023

    
2024

    
2025
class QueryFieldsResponse(_QueryResponseBase):
2026
  """Object holding the response to a query for fields.
2027

2028
  @ivar fields: List of L{QueryFieldDefinition} objects
2029

2030
  """
2031
  __slots__ = []
2032

    
2033

    
2034
class MigrationStatus(ConfigObject):
2035
  """Object holding the status of a migration.
2036

2037
  """
2038
  __slots__ = [
2039
    "status",
2040
    "transferred_ram",
2041
    "total_ram",
2042
    ]
2043

    
2044

    
2045
class InstanceConsole(ConfigObject):
2046
  """Object describing how to access the console of an instance.
2047

2048
  """
2049
  __slots__ = [
2050
    "instance",
2051
    "kind",
2052
    "message",
2053
    "host",
2054
    "port",
2055
    "user",
2056
    "command",
2057
    "display",
2058
    ]
2059

    
2060
  def Validate(self):
2061
    """Validates contents of this object.
2062

2063
    """
2064
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2065
    assert self.instance, "Missing instance name"
2066
    assert self.message or self.kind in [constants.CONS_SSH,
2067
                                         constants.CONS_SPICE,
2068
                                         constants.CONS_VNC]
2069
    assert self.host or self.kind == constants.CONS_MESSAGE
2070
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2071
                                      constants.CONS_SSH]
2072
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2073
                                      constants.CONS_SPICE,
2074
                                      constants.CONS_VNC]
2075
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2076
                                         constants.CONS_SPICE,
2077
                                         constants.CONS_VNC]
2078
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2079
                                         constants.CONS_SPICE,
2080
                                         constants.CONS_SSH]
2081
    return True
2082

    
2083

    
2084
class Network(TaggableObject):
2085
  """Object representing a network definition for ganeti.
2086

2087
  """
2088
  __slots__ = [
2089
    "name",
2090
    "serial_no",
2091
    "mac_prefix",
2092
    "network",
2093
    "network6",
2094
    "gateway",
2095
    "gateway6",
2096
    "reservations",
2097
    "ext_reservations",
2098
    ] + _TIMESTAMPS + _UUID
2099

    
2100
  def HooksDict(self, prefix=""):
2101
    """Export a dictionary used by hooks with a network's information.
2102

2103
    @type prefix: String
2104
    @param prefix: Prefix to prepend to the dict entries
2105

2106
    """
2107
    result = {
2108
      "%sNETWORK_NAME" % prefix: self.name,
2109
      "%sNETWORK_UUID" % prefix: self.uuid,
2110
      "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2111
    }
2112
    if self.network:
2113
      result["%sNETWORK_SUBNET" % prefix] = self.network
2114
    if self.gateway:
2115
      result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2116
    if self.network6:
2117
      result["%sNETWORK_SUBNET6" % prefix] = self.network6
2118
    if self.gateway6:
2119
      result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2120
    if self.mac_prefix:
2121
      result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2122

    
2123
    return result
2124

    
2125
  @classmethod
2126
  def FromDict(cls, val):
2127
    """Custom function for networks.
2128

2129
    Remove deprecated network_type and family.
2130

2131
    """
2132
    if "network_type" in val:
2133
      del val["network_type"]
2134
    if "family" in val:
2135
      del val["family"]
2136
    obj = super(Network, cls).FromDict(val)
2137
    return obj
2138

    
2139

    
2140
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2141
  """Simple wrapper over ConfigParse that allows serialization.
2142

2143
  This class is basically ConfigParser.SafeConfigParser with two
2144
  additional methods that allow it to serialize/unserialize to/from a
2145
  buffer.
2146

2147
  """
2148
  def Dumps(self):
2149
    """Dump this instance and return the string representation."""
2150
    buf = StringIO()
2151
    self.write(buf)
2152
    return buf.getvalue()
2153

    
2154
  @classmethod
2155
  def Loads(cls, data):
2156
    """Load data from a string."""
2157
    buf = StringIO(data)
2158
    cfp = cls()
2159
    cfp.readfp(buf)
2160
    return cfp
2161

    
2162

    
2163
class LvmPvInfo(ConfigObject):
2164
  """Information about an LVM physical volume (PV).
2165

2166
  @type name: string
2167
  @ivar name: name of the PV
2168
  @type vg_name: string
2169
  @ivar vg_name: name of the volume group containing the PV
2170
  @type size: float
2171
  @ivar size: size of the PV in MiB
2172
  @type free: float
2173
  @ivar free: free space in the PV, in MiB
2174
  @type attributes: string
2175
  @ivar attributes: PV attributes
2176
  @type lv_list: list of strings
2177
  @ivar lv_list: names of the LVs hosted on the PV
2178
  """
2179
  __slots__ = [
2180
    "name",
2181
    "vg_name",
2182
    "size",
2183
    "free",
2184
    "attributes",
2185
    "lv_list"
2186
    ]
2187

    
2188
  def IsEmpty(self):
2189
    """Is this PV empty?
2190

2191
    """
2192
    return self.size <= (self.free + 1)
2193

    
2194
  def IsAllocatable(self):
2195
    """Is this PV allocatable?
2196

2197
    """
2198
    return ("a" in self.attributes)