Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ c60abd62

History | View | Annotate | Download (36.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44
from socket import AF_INET
45

    
46

    
47
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
48
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
49

    
50
_TIMESTAMPS = ["ctime", "mtime"]
51
_UUID = ["uuid"]
52

    
53

    
54
def FillDict(defaults_dict, custom_dict, skip_keys=None):
55
  """Basic function to apply settings on top a default dict.
56

57
  @type defaults_dict: dict
58
  @param defaults_dict: dictionary holding the default values
59
  @type custom_dict: dict
60
  @param custom_dict: dictionary holding customized value
61
  @type skip_keys: list
62
  @param skip_keys: which keys not to fill
63
  @rtype: dict
64
  @return: dict with the 'full' values
65

66
  """
67
  ret_dict = copy.deepcopy(defaults_dict)
68
  ret_dict.update(custom_dict)
69
  if skip_keys:
70
    for k in skip_keys:
71
      try:
72
        del ret_dict[k]
73
      except KeyError:
74
        pass
75
  return ret_dict
76

    
77

    
78
def UpgradeGroupedParams(target, defaults):
79
  """Update all groups for the target parameter.
80

81
  @type target: dict of dicts
82
  @param target: {group: {parameter: value}}
83
  @type defaults: dict
84
  @param defaults: default parameter values
85

86
  """
87
  if target is None:
88
    target = {constants.PP_DEFAULT: defaults}
89
  else:
90
    for group in target:
91
      target[group] = FillDict(defaults, target[group])
92
  return target
93

    
94

    
95
class ConfigObject(object):
96
  """A generic config object.
97

98
  It has the following properties:
99

100
    - provides somewhat safe recursive unpickling and pickling for its classes
101
    - unset attributes which are defined in slots are always returned
102
      as None instead of raising an error
103

104
  Classes derived from this must always declare __slots__ (we use many
105
  config objects and the memory reduction is useful)
106

107
  """
108
  __slots__ = []
109

    
110
  def __init__(self, **kwargs):
111
    for k, v in kwargs.iteritems():
112
      setattr(self, k, v)
113

    
114
  def __getattr__(self, name):
115
    if name not in self._all_slots():
116
      raise AttributeError("Invalid object attribute %s.%s" %
117
                           (type(self).__name__, name))
118
    return None
119

    
120
  def __setstate__(self, state):
121
    slots = self._all_slots()
122
    for name in state:
123
      if name in slots:
124
        setattr(self, name, state[name])
125

    
126
  @classmethod
127
  def _all_slots(cls):
128
    """Compute the list of all declared slots for a class.
129

130
    """
131
    slots = []
132
    for parent in cls.__mro__:
133
      slots.extend(getattr(parent, "__slots__", []))
134
    return slots
135

    
136
  def ToDict(self):
137
    """Convert to a dict holding only standard python types.
138

139
    The generic routine just dumps all of this object's attributes in
140
    a dict. It does not work if the class has children who are
141
    ConfigObjects themselves (e.g. the nics list in an Instance), in
142
    which case the object should subclass the function in order to
143
    make sure all objects returned are only standard python types.
144

145
    """
146
    result = {}
147
    for name in self._all_slots():
148
      value = getattr(self, name, None)
149
      if value is not None:
150
        result[name] = value
151
    return result
152

    
153
  __getstate__ = ToDict
154

    
155
  @classmethod
156
  def FromDict(cls, val):
157
    """Create an object from a dictionary.
158

159
    This generic routine takes a dict, instantiates a new instance of
160
    the given class, and sets attributes based on the dict content.
161

162
    As for `ToDict`, this does not work if the class has children
163
    who are ConfigObjects themselves (e.g. the nics list in an
164
    Instance), in which case the object should subclass the function
165
    and alter the objects.
166

167
    """
168
    if not isinstance(val, dict):
169
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
170
                                      " expected dict, got %s" % type(val))
171
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
172
    obj = cls(**val_str) # pylint: disable-msg=W0142
173
    return obj
174

    
175
  @staticmethod
176
  def _ContainerToDicts(container):
177
    """Convert the elements of a container to standard python types.
178

179
    This method converts a container with elements derived from
180
    ConfigData to standard python types. If the container is a dict,
181
    we don't touch the keys, only the values.
182

183
    """
184
    if isinstance(container, dict):
185
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
186
    elif isinstance(container, (list, tuple, set, frozenset)):
187
      ret = [elem.ToDict() for elem in container]
188
    else:
189
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
190
                      type(container))
191
    return ret
192

    
193
  @staticmethod
194
  def _ContainerFromDicts(source, c_type, e_type):
195
    """Convert a container from standard python types.
196

197
    This method converts a container with standard python types to
198
    ConfigData objects. If the container is a dict, we don't touch the
199
    keys, only the values.
200

201
    """
202
    if not isinstance(c_type, type):
203
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
204
                      " not a type" % type(c_type))
205
    if source is None:
206
      source = c_type()
207
    if c_type is dict:
208
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
209
    elif c_type in (list, tuple, set, frozenset):
210
      ret = c_type([e_type.FromDict(elem) for elem in source])
211
    else:
212
      raise TypeError("Invalid container type %s passed to"
213
                      " _ContainerFromDicts" % c_type)
214
    return ret
215

    
216
  def Copy(self):
217
    """Makes a deep copy of the current object and its children.
218

219
    """
220
    dict_form = self.ToDict()
221
    clone_obj = self.__class__.FromDict(dict_form)
222
    return clone_obj
223

    
224
  def __repr__(self):
225
    """Implement __repr__ for ConfigObjects."""
226
    return repr(self.ToDict())
227

    
228
  def UpgradeConfig(self):
229
    """Fill defaults for missing configuration values.
230

231
    This method will be called at configuration load time, and its
232
    implementation will be object dependent.
233

234
    """
235
    pass
236

    
237

    
238
class TaggableObject(ConfigObject):
239
  """An generic class supporting tags.
240

241
  """
242
  __slots__ = ["tags"]
243
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
244

    
245
  @classmethod
246
  def ValidateTag(cls, tag):
247
    """Check if a tag is valid.
248

249
    If the tag is invalid, an errors.TagError will be raised. The
250
    function has no return value.
251

252
    """
253
    if not isinstance(tag, basestring):
254
      raise errors.TagError("Invalid tag type (not a string)")
255
    if len(tag) > constants.MAX_TAG_LEN:
256
      raise errors.TagError("Tag too long (>%d characters)" %
257
                            constants.MAX_TAG_LEN)
258
    if not tag:
259
      raise errors.TagError("Tags cannot be empty")
260
    if not cls.VALID_TAG_RE.match(tag):
261
      raise errors.TagError("Tag contains invalid characters")
262

    
263
  def GetTags(self):
264
    """Return the tags list.
265

266
    """
267
    tags = getattr(self, "tags", None)
268
    if tags is None:
269
      tags = self.tags = set()
270
    return tags
271

    
272
  def AddTag(self, tag):
273
    """Add a new tag.
274

275
    """
276
    self.ValidateTag(tag)
277
    tags = self.GetTags()
278
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
279
      raise errors.TagError("Too many tags")
280
    self.GetTags().add(tag)
281

    
282
  def RemoveTag(self, tag):
283
    """Remove a tag.
284

285
    """
286
    self.ValidateTag(tag)
287
    tags = self.GetTags()
288
    try:
289
      tags.remove(tag)
290
    except KeyError:
291
      raise errors.TagError("Tag not found")
292

    
293
  def ToDict(self):
294
    """Taggable-object-specific conversion to standard python types.
295

296
    This replaces the tags set with a list.
297

298
    """
299
    bo = super(TaggableObject, self).ToDict()
300

    
301
    tags = bo.get("tags", None)
302
    if isinstance(tags, set):
303
      bo["tags"] = list(tags)
304
    return bo
305

    
306
  @classmethod
307
  def FromDict(cls, val):
308
    """Custom function for instances.
309

310
    """
311
    obj = super(TaggableObject, cls).FromDict(val)
312
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
313
      obj.tags = set(obj.tags)
314
    return obj
315

    
316

    
317
class ConfigData(ConfigObject):
318
  """Top-level config object."""
319
  __slots__ = [
320
    "version",
321
    "cluster",
322
    "nodes",
323
    "nodegroups",
324
    "instances",
325
    "serial_no",
326
    ] + _TIMESTAMPS
327

    
328
  def ToDict(self):
329
    """Custom function for top-level config data.
330

331
    This just replaces the list of instances, nodes and the cluster
332
    with standard python types.
333

334
    """
335
    mydict = super(ConfigData, self).ToDict()
336
    mydict["cluster"] = mydict["cluster"].ToDict()
337
    for key in "nodes", "instances", "nodegroups":
338
      mydict[key] = self._ContainerToDicts(mydict[key])
339

    
340
    return mydict
341

    
342
  @classmethod
343
  def FromDict(cls, val):
344
    """Custom function for top-level config data
345

346
    """
347
    obj = super(ConfigData, cls).FromDict(val)
348
    obj.cluster = Cluster.FromDict(obj.cluster)
349
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
350
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
351
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
352
    return obj
353

    
354
  def HasAnyDiskOfType(self, dev_type):
355
    """Check if in there is at disk of the given type in the configuration.
356

357
    @type dev_type: L{constants.LDS_BLOCK}
358
    @param dev_type: the type to look for
359
    @rtype: boolean
360
    @return: boolean indicating if a disk of the given type was found or not
361

362
    """
363
    for instance in self.instances.values():
364
      for disk in instance.disks:
365
        if disk.IsBasedOnDiskType(dev_type):
366
          return True
367
    return False
368

    
369
  def UpgradeConfig(self):
370
    """Fill defaults for missing configuration values.
371

372
    """
373
    self.cluster.UpgradeConfig()
374
    for node in self.nodes.values():
375
      node.UpgradeConfig()
376
    for instance in self.instances.values():
377
      instance.UpgradeConfig()
378
    if self.nodegroups is None:
379
      self.nodegroups = {}
380
    for nodegroup in self.nodegroups.values():
381
      nodegroup.UpgradeConfig()
382
    if self.cluster.drbd_usermode_helper is None:
383
      # To decide if we set an helper let's check if at least one instance has
384
      # a DRBD disk. This does not cover all the possible scenarios but it
385
      # gives a good approximation.
386
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
387
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
388

    
389

    
390
class NIC(ConfigObject):
391
  """Config object representing a network card."""
392
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
393

    
394
  @classmethod
395
  def CheckParameterSyntax(cls, nicparams):
396
    """Check the given parameters for validity.
397

398
    @type nicparams:  dict
399
    @param nicparams: dictionary with parameter names/value
400
    @raise errors.ConfigurationError: when a parameter is not valid
401

402
    """
403
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
404
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
405
      raise errors.ConfigurationError(err)
406

    
407
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
408
        not nicparams[constants.NIC_LINK]):
409
      err = "Missing bridged nic link"
410
      raise errors.ConfigurationError(err)
411

    
412
  def UpgradeConfig(self):
413
    """Fill defaults for missing configuration values.
414

415
    """
416
    if self.nicparams is None:
417
      self.nicparams = {}
418
      if self.bridge is not None:
419
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
420
        self.nicparams[constants.NIC_LINK] = self.bridge
421
    # bridge is no longer used it 2.1. The slot is left there to support
422
    # upgrading, but can be removed once upgrades to the current version
423
    # straight from 2.0 are deprecated.
424
    if self.bridge is not None:
425
      self.bridge = None
426

    
427

    
428
class Disk(ConfigObject):
429
  """Config object representing a block device."""
430
  __slots__ = ["dev_type", "logical_id", "physical_id",
431
               "children", "iv_name", "size", "mode"]
432

    
433
  def CreateOnSecondary(self):
434
    """Test if this device needs to be created on a secondary node."""
435
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
436

    
437
  def AssembleOnSecondary(self):
438
    """Test if this device needs to be assembled on a secondary node."""
439
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
440

    
441
  def OpenOnSecondary(self):
442
    """Test if this device needs to be opened on a secondary node."""
443
    return self.dev_type in (constants.LD_LV,)
444

    
445
  def StaticDevPath(self):
446
    """Return the device path if this device type has a static one.
447

448
    Some devices (LVM for example) live always at the same /dev/ path,
449
    irrespective of their status. For such devices, we return this
450
    path, for others we return None.
451

452
    @warning: The path returned is not a normalized pathname; callers
453
        should check that it is a valid path.
454

455
    """
456
    if self.dev_type == constants.LD_LV:
457
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
458
    return None
459

    
460
  def ChildrenNeeded(self):
461
    """Compute the needed number of children for activation.
462

463
    This method will return either -1 (all children) or a positive
464
    number denoting the minimum number of children needed for
465
    activation (only mirrored devices will usually return >=0).
466

467
    Currently, only DRBD8 supports diskless activation (therefore we
468
    return 0), for all other we keep the previous semantics and return
469
    -1.
470

471
    """
472
    if self.dev_type == constants.LD_DRBD8:
473
      return 0
474
    return -1
475

    
476
  def IsBasedOnDiskType(self, dev_type):
477
    """Check if the disk or its children are based on the given type.
478

479
    @type dev_type: L{constants.LDS_BLOCK}
480
    @param dev_type: the type to look for
481
    @rtype: boolean
482
    @return: boolean indicating if a device of the given type was found or not
483

484
    """
485
    if self.children:
486
      for child in self.children:
487
        if child.IsBasedOnDiskType(dev_type):
488
          return True
489
    return self.dev_type == dev_type
490

    
491
  def GetNodes(self, node):
492
    """This function returns the nodes this device lives on.
493

494
    Given the node on which the parent of the device lives on (or, in
495
    case of a top-level device, the primary node of the devices'
496
    instance), this function will return a list of nodes on which this
497
    devices needs to (or can) be assembled.
498

499
    """
500
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
501
      result = [node]
502
    elif self.dev_type in constants.LDS_DRBD:
503
      result = [self.logical_id[0], self.logical_id[1]]
504
      if node not in result:
505
        raise errors.ConfigurationError("DRBD device passed unknown node")
506
    else:
507
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
508
    return result
509

    
510
  def ComputeNodeTree(self, parent_node):
511
    """Compute the node/disk tree for this disk and its children.
512

513
    This method, given the node on which the parent disk lives, will
514
    return the list of all (node, disk) pairs which describe the disk
515
    tree in the most compact way. For example, a drbd/lvm stack
516
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
517
    which represents all the top-level devices on the nodes.
518

519
    """
520
    my_nodes = self.GetNodes(parent_node)
521
    result = [(node, self) for node in my_nodes]
522
    if not self.children:
523
      # leaf device
524
      return result
525
    for node in my_nodes:
526
      for child in self.children:
527
        child_result = child.ComputeNodeTree(node)
528
        if len(child_result) == 1:
529
          # child (and all its descendants) is simple, doesn't split
530
          # over multiple hosts, so we don't need to describe it, our
531
          # own entry for this node describes it completely
532
          continue
533
        else:
534
          # check if child nodes differ from my nodes; note that
535
          # subdisk can differ from the child itself, and be instead
536
          # one of its descendants
537
          for subnode, subdisk in child_result:
538
            if subnode not in my_nodes:
539
              result.append((subnode, subdisk))
540
            # otherwise child is under our own node, so we ignore this
541
            # entry (but probably the other results in the list will
542
            # be different)
543
    return result
544

    
545
  def RecordGrow(self, amount):
546
    """Update the size of this disk after growth.
547

548
    This method recurses over the disks's children and updates their
549
    size correspondigly. The method needs to be kept in sync with the
550
    actual algorithms from bdev.
551

552
    """
553
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
554
      self.size += amount
555
    elif self.dev_type == constants.LD_DRBD8:
556
      if self.children:
557
        self.children[0].RecordGrow(amount)
558
      self.size += amount
559
    else:
560
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
561
                                   " disk type %s" % self.dev_type)
562

    
563
  def UnsetSize(self):
564
    """Sets recursively the size to zero for the disk and its children.
565

566
    """
567
    if self.children:
568
      for child in self.children:
569
        child.UnsetSize()
570
    self.size = 0
571

    
572
  def SetPhysicalID(self, target_node, nodes_ip):
573
    """Convert the logical ID to the physical ID.
574

575
    This is used only for drbd, which needs ip/port configuration.
576

577
    The routine descends down and updates its children also, because
578
    this helps when the only the top device is passed to the remote
579
    node.
580

581
    Arguments:
582
      - target_node: the node we wish to configure for
583
      - nodes_ip: a mapping of node name to ip
584

585
    The target_node must exist in in nodes_ip, and must be one of the
586
    nodes in the logical ID for each of the DRBD devices encountered
587
    in the disk tree.
588

589
    """
590
    if self.children:
591
      for child in self.children:
592
        child.SetPhysicalID(target_node, nodes_ip)
593

    
594
    if self.logical_id is None and self.physical_id is not None:
595
      return
596
    if self.dev_type in constants.LDS_DRBD:
597
      pnode, snode, port, pminor, sminor, secret = self.logical_id
598
      if target_node not in (pnode, snode):
599
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
600
                                        target_node)
601
      pnode_ip = nodes_ip.get(pnode, None)
602
      snode_ip = nodes_ip.get(snode, None)
603
      if pnode_ip is None or snode_ip is None:
604
        raise errors.ConfigurationError("Can't find primary or secondary node"
605
                                        " for %s" % str(self))
606
      p_data = (pnode_ip, port)
607
      s_data = (snode_ip, port)
608
      if pnode == target_node:
609
        self.physical_id = p_data + s_data + (pminor, secret)
610
      else: # it must be secondary, we tested above
611
        self.physical_id = s_data + p_data + (sminor, secret)
612
    else:
613
      self.physical_id = self.logical_id
614
    return
615

    
616
  def ToDict(self):
617
    """Disk-specific conversion to standard python types.
618

619
    This replaces the children lists of objects with lists of
620
    standard python types.
621

622
    """
623
    bo = super(Disk, self).ToDict()
624

    
625
    for attr in ("children",):
626
      alist = bo.get(attr, None)
627
      if alist:
628
        bo[attr] = self._ContainerToDicts(alist)
629
    return bo
630

    
631
  @classmethod
632
  def FromDict(cls, val):
633
    """Custom function for Disks
634

635
    """
636
    obj = super(Disk, cls).FromDict(val)
637
    if obj.children:
638
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
639
    if obj.logical_id and isinstance(obj.logical_id, list):
640
      obj.logical_id = tuple(obj.logical_id)
641
    if obj.physical_id and isinstance(obj.physical_id, list):
642
      obj.physical_id = tuple(obj.physical_id)
643
    if obj.dev_type in constants.LDS_DRBD:
644
      # we need a tuple of length six here
645
      if len(obj.logical_id) < 6:
646
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
647
    return obj
648

    
649
  def __str__(self):
650
    """Custom str() formatter for disks.
651

652
    """
653
    if self.dev_type == constants.LD_LV:
654
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
655
    elif self.dev_type in constants.LDS_DRBD:
656
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
657
      val = "<DRBD8("
658
      if self.physical_id is None:
659
        phy = "unconfigured"
660
      else:
661
        phy = ("configured as %s:%s %s:%s" %
662
               (self.physical_id[0], self.physical_id[1],
663
                self.physical_id[2], self.physical_id[3]))
664

    
665
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
666
              (node_a, minor_a, node_b, minor_b, port, phy))
667
      if self.children and self.children.count(None) == 0:
668
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
669
      else:
670
        val += "no local storage"
671
    else:
672
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
673
             (self.dev_type, self.logical_id, self.physical_id, self.children))
674
    if self.iv_name is None:
675
      val += ", not visible"
676
    else:
677
      val += ", visible as /dev/%s" % self.iv_name
678
    if isinstance(self.size, int):
679
      val += ", size=%dm)>" % self.size
680
    else:
681
      val += ", size='%s')>" % (self.size,)
682
    return val
683

    
684
  def Verify(self):
685
    """Checks that this disk is correctly configured.
686

687
    """
688
    all_errors = []
689
    if self.mode not in constants.DISK_ACCESS_SET:
690
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
691
    return all_errors
692

    
693
  def UpgradeConfig(self):
694
    """Fill defaults for missing configuration values.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UpgradeConfig()
700
    # add here config upgrade for this disk
701

    
702

    
703
class Instance(TaggableObject):
704
  """Config object representing an instance."""
705
  __slots__ = [
706
    "name",
707
    "primary_node",
708
    "os",
709
    "hypervisor",
710
    "hvparams",
711
    "beparams",
712
    "osparams",
713
    "admin_up",
714
    "nics",
715
    "disks",
716
    "disk_template",
717
    "network_port",
718
    "serial_no",
719
    ] + _TIMESTAMPS + _UUID
720

    
721
  def _ComputeSecondaryNodes(self):
722
    """Compute the list of secondary nodes.
723

724
    This is a simple wrapper over _ComputeAllNodes.
725

726
    """
727
    all_nodes = set(self._ComputeAllNodes())
728
    all_nodes.discard(self.primary_node)
729
    return tuple(all_nodes)
730

    
731
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
732
                             "List of secondary nodes")
733

    
734
  def _ComputeAllNodes(self):
735
    """Compute the list of all nodes.
736

737
    Since the data is already there (in the drbd disks), keeping it as
738
    a separate normal attribute is redundant and if not properly
739
    synchronised can cause problems. Thus it's better to compute it
740
    dynamically.
741

742
    """
743
    def _Helper(nodes, device):
744
      """Recursively computes nodes given a top device."""
745
      if device.dev_type in constants.LDS_DRBD:
746
        nodea, nodeb = device.logical_id[:2]
747
        nodes.add(nodea)
748
        nodes.add(nodeb)
749
      if device.children:
750
        for child in device.children:
751
          _Helper(nodes, child)
752

    
753
    all_nodes = set()
754
    all_nodes.add(self.primary_node)
755
    for device in self.disks:
756
      _Helper(all_nodes, device)
757
    return tuple(all_nodes)
758

    
759
  all_nodes = property(_ComputeAllNodes, None, None,
760
                       "List of all nodes of the instance")
761

    
762
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
763
    """Provide a mapping of nodes to LVs this instance owns.
764

765
    This function figures out what logical volumes should belong on
766
    which nodes, recursing through a device tree.
767

768
    @param lvmap: optional dictionary to receive the
769
        'node' : ['lv', ...] data.
770

771
    @return: None if lvmap arg is given, otherwise, a dictionary
772
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
773

774
    """
775
    if node == None:
776
      node = self.primary_node
777

    
778
    if lvmap is None:
779
      lvmap = { node : [] }
780
      ret = lvmap
781
    else:
782
      if not node in lvmap:
783
        lvmap[node] = []
784
      ret = None
785

    
786
    if not devs:
787
      devs = self.disks
788

    
789
    for dev in devs:
790
      if dev.dev_type == constants.LD_LV:
791
        lvmap[node].append(dev.logical_id[1])
792

    
793
      elif dev.dev_type in constants.LDS_DRBD:
794
        if dev.children:
795
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
796
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
797

    
798
      elif dev.children:
799
        self.MapLVsByNode(lvmap, dev.children, node)
800

    
801
    return ret
802

    
803
  def FindDisk(self, idx):
804
    """Find a disk given having a specified index.
805

806
    This is just a wrapper that does validation of the index.
807

808
    @type idx: int
809
    @param idx: the disk index
810
    @rtype: L{Disk}
811
    @return: the corresponding disk
812
    @raise errors.OpPrereqError: when the given index is not valid
813

814
    """
815
    try:
816
      idx = int(idx)
817
      return self.disks[idx]
818
    except (TypeError, ValueError), err:
819
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
820
                                 errors.ECODE_INVAL)
821
    except IndexError:
822
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
823
                                 " 0 to %d" % (idx, len(self.disks)),
824
                                 errors.ECODE_INVAL)
825

    
826
  def ToDict(self):
827
    """Instance-specific conversion to standard python types.
828

829
    This replaces the children lists of objects with lists of standard
830
    python types.
831

832
    """
833
    bo = super(Instance, self).ToDict()
834

    
835
    for attr in "nics", "disks":
836
      alist = bo.get(attr, None)
837
      if alist:
838
        nlist = self._ContainerToDicts(alist)
839
      else:
840
        nlist = []
841
      bo[attr] = nlist
842
    return bo
843

    
844
  @classmethod
845
  def FromDict(cls, val):
846
    """Custom function for instances.
847

848
    """
849
    obj = super(Instance, cls).FromDict(val)
850
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
851
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
852
    return obj
853

    
854
  def UpgradeConfig(self):
855
    """Fill defaults for missing configuration values.
856

857
    """
858
    for nic in self.nics:
859
      nic.UpgradeConfig()
860
    for disk in self.disks:
861
      disk.UpgradeConfig()
862
    if self.hvparams:
863
      for key in constants.HVC_GLOBALS:
864
        try:
865
          del self.hvparams[key]
866
        except KeyError:
867
          pass
868
    if self.osparams is None:
869
      self.osparams = {}
870

    
871

    
872
class OS(ConfigObject):
873
  """Config object representing an operating system.
874

875
  @type supported_parameters: list
876
  @ivar supported_parameters: a list of tuples, name and description,
877
      containing the supported parameters by this OS
878

879
  """
880
  __slots__ = [
881
    "name",
882
    "path",
883
    "api_versions",
884
    "create_script",
885
    "export_script",
886
    "import_script",
887
    "rename_script",
888
    "verify_script",
889
    "supported_variants",
890
    "supported_parameters",
891
    ]
892

    
893

    
894
class Node(TaggableObject):
895
  """Config object representing a node."""
896
  __slots__ = [
897
    "name",
898
    "primary_ip",
899
    "secondary_ip",
900
    "serial_no",
901
    "master_candidate",
902
    "offline",
903
    "drained",
904
    "nodegroup",
905
    ] + _TIMESTAMPS + _UUID
906

    
907

    
908
class NodeGroup(ConfigObject):
909
  """Config object representing a node group."""
910
  __slots__ = [
911
    "name",
912
    "members",
913
    ] + _TIMESTAMPS + _UUID
914

    
915
  def ToDict(self):
916
    """Custom function for nodegroup.
917

918
    This discards the members object, which gets recalculated and is only kept
919
    in memory.
920

921
    """
922
    mydict = super(NodeGroup, self).ToDict()
923
    del mydict["members"]
924
    return mydict
925

    
926
  @classmethod
927
  def FromDict(cls, val):
928
    """Custom function for nodegroup.
929

930
    The members slot is initialized to an empty list, upon deserialization.
931

932
    """
933
    obj = super(NodeGroup, cls).FromDict(val)
934
    obj.members = []
935
    return obj
936

    
937

    
938
class Cluster(TaggableObject):
939
  """Config object representing the cluster."""
940
  __slots__ = [
941
    "serial_no",
942
    "rsahostkeypub",
943
    "highest_used_port",
944
    "tcpudp_port_pool",
945
    "mac_prefix",
946
    "volume_group_name",
947
    "reserved_lvs",
948
    "drbd_usermode_helper",
949
    "default_bridge",
950
    "default_hypervisor",
951
    "master_node",
952
    "master_ip",
953
    "master_netdev",
954
    "cluster_name",
955
    "file_storage_dir",
956
    "enabled_hypervisors",
957
    "hvparams",
958
    "os_hvp",
959
    "beparams",
960
    "osparams",
961
    "nicparams",
962
    "candidate_pool_size",
963
    "modify_etc_hosts",
964
    "modify_ssh_setup",
965
    "maintain_node_health",
966
    "uid_pool",
967
    "default_iallocator",
968
    "primary_ip_family",
969
    ] + _TIMESTAMPS + _UUID
970

    
971
  def UpgradeConfig(self):
972
    """Fill defaults for missing configuration values.
973

974
    """
975
    # pylint: disable-msg=E0203
976
    # because these are "defined" via slots, not manually
977
    if self.hvparams is None:
978
      self.hvparams = constants.HVC_DEFAULTS
979
    else:
980
      for hypervisor in self.hvparams:
981
        self.hvparams[hypervisor] = FillDict(
982
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
983

    
984
    if self.os_hvp is None:
985
      self.os_hvp = {}
986

    
987
    # osparams added before 2.2
988
    if self.osparams is None:
989
      self.osparams = {}
990

    
991
    self.beparams = UpgradeGroupedParams(self.beparams,
992
                                         constants.BEC_DEFAULTS)
993
    migrate_default_bridge = not self.nicparams
994
    self.nicparams = UpgradeGroupedParams(self.nicparams,
995
                                          constants.NICC_DEFAULTS)
996
    if migrate_default_bridge:
997
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
998
        self.default_bridge
999

    
1000
    if self.modify_etc_hosts is None:
1001
      self.modify_etc_hosts = True
1002

    
1003
    if self.modify_ssh_setup is None:
1004
      self.modify_ssh_setup = True
1005

    
1006
    # default_bridge is no longer used it 2.1. The slot is left there to
1007
    # support auto-upgrading. It can be removed once we decide to deprecate
1008
    # upgrading straight from 2.0.
1009
    if self.default_bridge is not None:
1010
      self.default_bridge = None
1011

    
1012
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1013
    # code can be removed once upgrading straight from 2.0 is deprecated.
1014
    if self.default_hypervisor is not None:
1015
      self.enabled_hypervisors = ([self.default_hypervisor] +
1016
        [hvname for hvname in self.enabled_hypervisors
1017
         if hvname != self.default_hypervisor])
1018
      self.default_hypervisor = None
1019

    
1020
    # maintain_node_health added after 2.1.1
1021
    if self.maintain_node_health is None:
1022
      self.maintain_node_health = False
1023

    
1024
    if self.uid_pool is None:
1025
      self.uid_pool = []
1026

    
1027
    if self.default_iallocator is None:
1028
      self.default_iallocator = ""
1029

    
1030
    # reserved_lvs added before 2.2
1031
    if self.reserved_lvs is None:
1032
      self.reserved_lvs = []
1033

    
1034
    # primary_ip_family added before 2.3
1035
    if self.primary_ip_family is None:
1036
      self.primary_ip_family = AF_INET
1037

    
1038
  def ToDict(self):
1039
    """Custom function for cluster.
1040

1041
    """
1042
    mydict = super(Cluster, self).ToDict()
1043
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1044
    return mydict
1045

    
1046
  @classmethod
1047
  def FromDict(cls, val):
1048
    """Custom function for cluster.
1049

1050
    """
1051
    obj = super(Cluster, cls).FromDict(val)
1052
    if not isinstance(obj.tcpudp_port_pool, set):
1053
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1054
    return obj
1055

    
1056
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1057
    """Get the default hypervisor parameters for the cluster.
1058

1059
    @param hypervisor: the hypervisor name
1060
    @param os_name: if specified, we'll also update the defaults for this OS
1061
    @param skip_keys: if passed, list of keys not to use
1062
    @return: the defaults dict
1063

1064
    """
1065
    if skip_keys is None:
1066
      skip_keys = []
1067

    
1068
    fill_stack = [self.hvparams.get(hypervisor, {})]
1069
    if os_name is not None:
1070
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1071
      fill_stack.append(os_hvp)
1072

    
1073
    ret_dict = {}
1074
    for o_dict in fill_stack:
1075
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1076

    
1077
    return ret_dict
1078

    
1079
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1080
    """Fill a given hvparams dict with cluster defaults.
1081

1082
    @type hv_name: string
1083
    @param hv_name: the hypervisor to use
1084
    @type os_name: string
1085
    @param os_name: the OS to use for overriding the hypervisor defaults
1086
    @type skip_globals: boolean
1087
    @param skip_globals: if True, the global hypervisor parameters will
1088
        not be filled
1089
    @rtype: dict
1090
    @return: a copy of the given hvparams with missing keys filled from
1091
        the cluster defaults
1092

1093
    """
1094
    if skip_globals:
1095
      skip_keys = constants.HVC_GLOBALS
1096
    else:
1097
      skip_keys = []
1098

    
1099
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1100
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1101

    
1102
  def FillHV(self, instance, skip_globals=False):
1103
    """Fill an instance's hvparams dict with cluster defaults.
1104

1105
    @type instance: L{objects.Instance}
1106
    @param instance: the instance parameter to fill
1107
    @type skip_globals: boolean
1108
    @param skip_globals: if True, the global hypervisor parameters will
1109
        not be filled
1110
    @rtype: dict
1111
    @return: a copy of the instance's hvparams with missing keys filled from
1112
        the cluster defaults
1113

1114
    """
1115
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1116
                             instance.hvparams, skip_globals)
1117

    
1118
  def SimpleFillBE(self, beparams):
1119
    """Fill a given beparams dict with cluster defaults.
1120

1121
    @type beparams: dict
1122
    @param beparams: the dict to fill
1123
    @rtype: dict
1124
    @return: a copy of the passed in beparams with missing keys filled
1125
        from the cluster defaults
1126

1127
    """
1128
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1129

    
1130
  def FillBE(self, instance):
1131
    """Fill an instance's beparams dict with cluster defaults.
1132

1133
    @type instance: L{objects.Instance}
1134
    @param instance: the instance parameter to fill
1135
    @rtype: dict
1136
    @return: a copy of the instance's beparams with missing keys filled from
1137
        the cluster defaults
1138

1139
    """
1140
    return self.SimpleFillBE(instance.beparams)
1141

    
1142
  def SimpleFillNIC(self, nicparams):
1143
    """Fill a given nicparams dict with cluster defaults.
1144

1145
    @type nicparams: dict
1146
    @param nicparams: the dict to fill
1147
    @rtype: dict
1148
    @return: a copy of the passed in nicparams with missing keys filled
1149
        from the cluster defaults
1150

1151
    """
1152
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1153

    
1154
  def SimpleFillOS(self, os_name, os_params):
1155
    """Fill an instance's osparams dict with cluster defaults.
1156

1157
    @type os_name: string
1158
    @param os_name: the OS name to use
1159
    @type os_params: dict
1160
    @param os_params: the dict to fill with default values
1161
    @rtype: dict
1162
    @return: a copy of the instance's osparams with missing keys filled from
1163
        the cluster defaults
1164

1165
    """
1166
    name_only = os_name.split("+", 1)[0]
1167
    # base OS
1168
    result = self.osparams.get(name_only, {})
1169
    # OS with variant
1170
    result = FillDict(result, self.osparams.get(os_name, {}))
1171
    # specified params
1172
    return FillDict(result, os_params)
1173

    
1174

    
1175
class BlockDevStatus(ConfigObject):
1176
  """Config object representing the status of a block device."""
1177
  __slots__ = [
1178
    "dev_path",
1179
    "major",
1180
    "minor",
1181
    "sync_percent",
1182
    "estimated_time",
1183
    "is_degraded",
1184
    "ldisk_status",
1185
    ]
1186

    
1187

    
1188
class ImportExportStatus(ConfigObject):
1189
  """Config object representing the status of an import or export."""
1190
  __slots__ = [
1191
    "recent_output",
1192
    "listen_port",
1193
    "connected",
1194
    "progress_mbytes",
1195
    "progress_throughput",
1196
    "progress_eta",
1197
    "progress_percent",
1198
    "exit_status",
1199
    "error_message",
1200
    ] + _TIMESTAMPS
1201

    
1202

    
1203
class ImportExportOptions(ConfigObject):
1204
  """Options for import/export daemon
1205

1206
  @ivar key_name: X509 key name (None for cluster certificate)
1207
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1208
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1209
  @ivar magic: Used to ensure the connection goes to the right disk
1210

1211
  """
1212
  __slots__ = [
1213
    "key_name",
1214
    "ca_pem",
1215
    "compress",
1216
    "magic",
1217
    ]
1218

    
1219

    
1220
class ConfdRequest(ConfigObject):
1221
  """Object holding a confd request.
1222

1223
  @ivar protocol: confd protocol version
1224
  @ivar type: confd query type
1225
  @ivar query: query request
1226
  @ivar rsalt: requested reply salt
1227

1228
  """
1229
  __slots__ = [
1230
    "protocol",
1231
    "type",
1232
    "query",
1233
    "rsalt",
1234
    ]
1235

    
1236

    
1237
class ConfdReply(ConfigObject):
1238
  """Object holding a confd reply.
1239

1240
  @ivar protocol: confd protocol version
1241
  @ivar status: reply status code (ok, error)
1242
  @ivar answer: confd query reply
1243
  @ivar serial: configuration serial number
1244

1245
  """
1246
  __slots__ = [
1247
    "protocol",
1248
    "status",
1249
    "answer",
1250
    "serial",
1251
    ]
1252

    
1253

    
1254
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1255
  """Simple wrapper over ConfigParse that allows serialization.
1256

1257
  This class is basically ConfigParser.SafeConfigParser with two
1258
  additional methods that allow it to serialize/unserialize to/from a
1259
  buffer.
1260

1261
  """
1262
  def Dumps(self):
1263
    """Dump this instance and return the string representation."""
1264
    buf = StringIO()
1265
    self.write(buf)
1266
    return buf.getvalue()
1267

    
1268
  @classmethod
1269
  def Loads(cls, data):
1270
    """Load data from a string."""
1271
    buf = StringIO(data)
1272
    cfp = cls()
1273
    cfp.readfp(buf)
1274
    return cfp