Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 9e33896b

History | View | Annotate | Download (34.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44

    
45
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46
           "OS", "Node", "Cluster", "FillDict"]
47

    
48
_TIMESTAMPS = ["ctime", "mtime"]
49
_UUID = ["uuid"]
50

    
51

    
52
def FillDict(defaults_dict, custom_dict, skip_keys=None):
53
  """Basic function to apply settings on top a default dict.
54

55
  @type defaults_dict: dict
56
  @param defaults_dict: dictionary holding the default values
57
  @type custom_dict: dict
58
  @param custom_dict: dictionary holding customized value
59
  @type skip_keys: list
60
  @param skip_keys: which keys not to fill
61
  @rtype: dict
62
  @return: dict with the 'full' values
63

64
  """
65
  ret_dict = copy.deepcopy(defaults_dict)
66
  ret_dict.update(custom_dict)
67
  if skip_keys:
68
    for k in skip_keys:
69
      try:
70
        del ret_dict[k]
71
      except KeyError:
72
        pass
73
  return ret_dict
74

    
75

    
76
def UpgradeGroupedParams(target, defaults):
77
  """Update all groups for the target parameter.
78

79
  @type target: dict of dicts
80
  @param target: {group: {parameter: value}}
81
  @type defaults: dict
82
  @param defaults: default parameter values
83

84
  """
85
  if target is None:
86
    target = {constants.PP_DEFAULT: defaults}
87
  else:
88
    for group in target:
89
      target[group] = FillDict(defaults, target[group])
90
  return target
91

    
92

    
93
class ConfigObject(object):
94
  """A generic config object.
95

96
  It has the following properties:
97

98
    - provides somewhat safe recursive unpickling and pickling for its classes
99
    - unset attributes which are defined in slots are always returned
100
      as None instead of raising an error
101

102
  Classes derived from this must always declare __slots__ (we use many
103
  config objects and the memory reduction is useful)
104

105
  """
106
  __slots__ = []
107

    
108
  def __init__(self, **kwargs):
109
    for k, v in kwargs.iteritems():
110
      setattr(self, k, v)
111

    
112
  def __getattr__(self, name):
113
    if name not in self._all_slots():
114
      raise AttributeError("Invalid object attribute %s.%s" %
115
                           (type(self).__name__, name))
116
    return None
117

    
118
  def __setstate__(self, state):
119
    slots = self._all_slots()
120
    for name in state:
121
      if name in slots:
122
        setattr(self, name, state[name])
123

    
124
  @classmethod
125
  def _all_slots(cls):
126
    """Compute the list of all declared slots for a class.
127

128
    """
129
    slots = []
130
    for parent in cls.__mro__:
131
      slots.extend(getattr(parent, "__slots__", []))
132
    return slots
133

    
134
  def ToDict(self):
135
    """Convert to a dict holding only standard python types.
136

137
    The generic routine just dumps all of this object's attributes in
138
    a dict. It does not work if the class has children who are
139
    ConfigObjects themselves (e.g. the nics list in an Instance), in
140
    which case the object should subclass the function in order to
141
    make sure all objects returned are only standard python types.
142

143
    """
144
    result = {}
145
    for name in self._all_slots():
146
      value = getattr(self, name, None)
147
      if value is not None:
148
        result[name] = value
149
    return result
150

    
151
  __getstate__ = ToDict
152

    
153
  @classmethod
154
  def FromDict(cls, val):
155
    """Create an object from a dictionary.
156

157
    This generic routine takes a dict, instantiates a new instance of
158
    the given class, and sets attributes based on the dict content.
159

160
    As for `ToDict`, this does not work if the class has children
161
    who are ConfigObjects themselves (e.g. the nics list in an
162
    Instance), in which case the object should subclass the function
163
    and alter the objects.
164

165
    """
166
    if not isinstance(val, dict):
167
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
168
                                      " expected dict, got %s" % type(val))
169
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
170
    obj = cls(**val_str) # pylint: disable-msg=W0142
171
    return obj
172

    
173
  @staticmethod
174
  def _ContainerToDicts(container):
175
    """Convert the elements of a container to standard python types.
176

177
    This method converts a container with elements derived from
178
    ConfigData to standard python types. If the container is a dict,
179
    we don't touch the keys, only the values.
180

181
    """
182
    if isinstance(container, dict):
183
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
184
    elif isinstance(container, (list, tuple, set, frozenset)):
185
      ret = [elem.ToDict() for elem in container]
186
    else:
187
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
188
                      type(container))
189
    return ret
190

    
191
  @staticmethod
192
  def _ContainerFromDicts(source, c_type, e_type):
193
    """Convert a container from standard python types.
194

195
    This method converts a container with standard python types to
196
    ConfigData objects. If the container is a dict, we don't touch the
197
    keys, only the values.
198

199
    """
200
    if not isinstance(c_type, type):
201
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
202
                      " not a type" % type(c_type))
203
    if c_type is dict:
204
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
205
    elif c_type in (list, tuple, set, frozenset):
206
      ret = c_type([e_type.FromDict(elem) for elem in source])
207
    else:
208
      raise TypeError("Invalid container type %s passed to"
209
                      " _ContainerFromDicts" % c_type)
210
    return ret
211

    
212
  def Copy(self):
213
    """Makes a deep copy of the current object and its children.
214

215
    """
216
    dict_form = self.ToDict()
217
    clone_obj = self.__class__.FromDict(dict_form)
218
    return clone_obj
219

    
220
  def __repr__(self):
221
    """Implement __repr__ for ConfigObjects."""
222
    return repr(self.ToDict())
223

    
224
  def UpgradeConfig(self):
225
    """Fill defaults for missing configuration values.
226

227
    This method will be called at configuration load time, and its
228
    implementation will be object dependent.
229

230
    """
231
    pass
232

    
233

    
234
class TaggableObject(ConfigObject):
235
  """An generic class supporting tags.
236

237
  """
238
  __slots__ = ["tags"]
239
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
240

    
241
  @classmethod
242
  def ValidateTag(cls, tag):
243
    """Check if a tag is valid.
244

245
    If the tag is invalid, an errors.TagError will be raised. The
246
    function has no return value.
247

248
    """
249
    if not isinstance(tag, basestring):
250
      raise errors.TagError("Invalid tag type (not a string)")
251
    if len(tag) > constants.MAX_TAG_LEN:
252
      raise errors.TagError("Tag too long (>%d characters)" %
253
                            constants.MAX_TAG_LEN)
254
    if not tag:
255
      raise errors.TagError("Tags cannot be empty")
256
    if not cls.VALID_TAG_RE.match(tag):
257
      raise errors.TagError("Tag contains invalid characters")
258

    
259
  def GetTags(self):
260
    """Return the tags list.
261

262
    """
263
    tags = getattr(self, "tags", None)
264
    if tags is None:
265
      tags = self.tags = set()
266
    return tags
267

    
268
  def AddTag(self, tag):
269
    """Add a new tag.
270

271
    """
272
    self.ValidateTag(tag)
273
    tags = self.GetTags()
274
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
275
      raise errors.TagError("Too many tags")
276
    self.GetTags().add(tag)
277

    
278
  def RemoveTag(self, tag):
279
    """Remove a tag.
280

281
    """
282
    self.ValidateTag(tag)
283
    tags = self.GetTags()
284
    try:
285
      tags.remove(tag)
286
    except KeyError:
287
      raise errors.TagError("Tag not found")
288

    
289
  def ToDict(self):
290
    """Taggable-object-specific conversion to standard python types.
291

292
    This replaces the tags set with a list.
293

294
    """
295
    bo = super(TaggableObject, self).ToDict()
296

    
297
    tags = bo.get("tags", None)
298
    if isinstance(tags, set):
299
      bo["tags"] = list(tags)
300
    return bo
301

    
302
  @classmethod
303
  def FromDict(cls, val):
304
    """Custom function for instances.
305

306
    """
307
    obj = super(TaggableObject, cls).FromDict(val)
308
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
309
      obj.tags = set(obj.tags)
310
    return obj
311

    
312

    
313
class ConfigData(ConfigObject):
314
  """Top-level config object."""
315
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
316
               _TIMESTAMPS)
317

    
318
  def ToDict(self):
319
    """Custom function for top-level config data.
320

321
    This just replaces the list of instances, nodes and the cluster
322
    with standard python types.
323

324
    """
325
    mydict = super(ConfigData, self).ToDict()
326
    mydict["cluster"] = mydict["cluster"].ToDict()
327
    for key in "nodes", "instances":
328
      mydict[key] = self._ContainerToDicts(mydict[key])
329

    
330
    return mydict
331

    
332
  @classmethod
333
  def FromDict(cls, val):
334
    """Custom function for top-level config data
335

336
    """
337
    obj = super(ConfigData, cls).FromDict(val)
338
    obj.cluster = Cluster.FromDict(obj.cluster)
339
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
340
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
341
    return obj
342

    
343
  def UpgradeConfig(self):
344
    """Fill defaults for missing configuration values.
345

346
    """
347
    self.cluster.UpgradeConfig()
348
    for node in self.nodes.values():
349
      node.UpgradeConfig()
350
    for instance in self.instances.values():
351
      instance.UpgradeConfig()
352

    
353

    
354
class NIC(ConfigObject):
355
  """Config object representing a network card."""
356
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
357

    
358
  @classmethod
359
  def CheckParameterSyntax(cls, nicparams):
360
    """Check the given parameters for validity.
361

362
    @type nicparams:  dict
363
    @param nicparams: dictionary with parameter names/value
364
    @raise errors.ConfigurationError: when a parameter is not valid
365

366
    """
367
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
368
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
369
      raise errors.ConfigurationError(err)
370

    
371
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
372
        not nicparams[constants.NIC_LINK]):
373
      err = "Missing bridged nic link"
374
      raise errors.ConfigurationError(err)
375

    
376
  def UpgradeConfig(self):
377
    """Fill defaults for missing configuration values.
378

379
    """
380
    if self.nicparams is None:
381
      self.nicparams = {}
382
      if self.bridge is not None:
383
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
384
        self.nicparams[constants.NIC_LINK] = self.bridge
385
    # bridge is no longer used it 2.1. The slot is left there to support
386
    # upgrading, but can be removed once upgrades to the current version
387
    # straight from 2.0 are deprecated.
388
    if self.bridge is not None:
389
      self.bridge = None
390

    
391

    
392
class Disk(ConfigObject):
393
  """Config object representing a block device."""
394
  __slots__ = ["dev_type", "logical_id", "physical_id",
395
               "children", "iv_name", "size", "mode"]
396

    
397
  def CreateOnSecondary(self):
398
    """Test if this device needs to be created on a secondary node."""
399
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
400

    
401
  def AssembleOnSecondary(self):
402
    """Test if this device needs to be assembled on a secondary node."""
403
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
404

    
405
  def OpenOnSecondary(self):
406
    """Test if this device needs to be opened on a secondary node."""
407
    return self.dev_type in (constants.LD_LV,)
408

    
409
  def StaticDevPath(self):
410
    """Return the device path if this device type has a static one.
411

412
    Some devices (LVM for example) live always at the same /dev/ path,
413
    irrespective of their status. For such devices, we return this
414
    path, for others we return None.
415

416
    @warning: The path returned is not a normalized pathname; callers
417
        should check that it is a valid path.
418

419
    """
420
    if self.dev_type == constants.LD_LV:
421
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
422
    return None
423

    
424
  def ChildrenNeeded(self):
425
    """Compute the needed number of children for activation.
426

427
    This method will return either -1 (all children) or a positive
428
    number denoting the minimum number of children needed for
429
    activation (only mirrored devices will usually return >=0).
430

431
    Currently, only DRBD8 supports diskless activation (therefore we
432
    return 0), for all other we keep the previous semantics and return
433
    -1.
434

435
    """
436
    if self.dev_type == constants.LD_DRBD8:
437
      return 0
438
    return -1
439

    
440
  def GetNodes(self, node):
441
    """This function returns the nodes this device lives on.
442

443
    Given the node on which the parent of the device lives on (or, in
444
    case of a top-level device, the primary node of the devices'
445
    instance), this function will return a list of nodes on which this
446
    devices needs to (or can) be assembled.
447

448
    """
449
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
450
      result = [node]
451
    elif self.dev_type in constants.LDS_DRBD:
452
      result = [self.logical_id[0], self.logical_id[1]]
453
      if node not in result:
454
        raise errors.ConfigurationError("DRBD device passed unknown node")
455
    else:
456
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
457
    return result
458

    
459
  def ComputeNodeTree(self, parent_node):
460
    """Compute the node/disk tree for this disk and its children.
461

462
    This method, given the node on which the parent disk lives, will
463
    return the list of all (node, disk) pairs which describe the disk
464
    tree in the most compact way. For example, a drbd/lvm stack
465
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
466
    which represents all the top-level devices on the nodes.
467

468
    """
469
    my_nodes = self.GetNodes(parent_node)
470
    result = [(node, self) for node in my_nodes]
471
    if not self.children:
472
      # leaf device
473
      return result
474
    for node in my_nodes:
475
      for child in self.children:
476
        child_result = child.ComputeNodeTree(node)
477
        if len(child_result) == 1:
478
          # child (and all its descendants) is simple, doesn't split
479
          # over multiple hosts, so we don't need to describe it, our
480
          # own entry for this node describes it completely
481
          continue
482
        else:
483
          # check if child nodes differ from my nodes; note that
484
          # subdisk can differ from the child itself, and be instead
485
          # one of its descendants
486
          for subnode, subdisk in child_result:
487
            if subnode not in my_nodes:
488
              result.append((subnode, subdisk))
489
            # otherwise child is under our own node, so we ignore this
490
            # entry (but probably the other results in the list will
491
            # be different)
492
    return result
493

    
494
  def RecordGrow(self, amount):
495
    """Update the size of this disk after growth.
496

497
    This method recurses over the disks's children and updates their
498
    size correspondigly. The method needs to be kept in sync with the
499
    actual algorithms from bdev.
500

501
    """
502
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
503
      self.size += amount
504
    elif self.dev_type == constants.LD_DRBD8:
505
      if self.children:
506
        self.children[0].RecordGrow(amount)
507
      self.size += amount
508
    else:
509
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
510
                                   " disk type %s" % self.dev_type)
511

    
512
  def UnsetSize(self):
513
    """Sets recursively the size to zero for the disk and its children.
514

515
    """
516
    if self.children:
517
      for child in self.children:
518
        child.UnsetSize()
519
    self.size = 0
520

    
521
  def SetPhysicalID(self, target_node, nodes_ip):
522
    """Convert the logical ID to the physical ID.
523

524
    This is used only for drbd, which needs ip/port configuration.
525

526
    The routine descends down and updates its children also, because
527
    this helps when the only the top device is passed to the remote
528
    node.
529

530
    Arguments:
531
      - target_node: the node we wish to configure for
532
      - nodes_ip: a mapping of node name to ip
533

534
    The target_node must exist in in nodes_ip, and must be one of the
535
    nodes in the logical ID for each of the DRBD devices encountered
536
    in the disk tree.
537

538
    """
539
    if self.children:
540
      for child in self.children:
541
        child.SetPhysicalID(target_node, nodes_ip)
542

    
543
    if self.logical_id is None and self.physical_id is not None:
544
      return
545
    if self.dev_type in constants.LDS_DRBD:
546
      pnode, snode, port, pminor, sminor, secret = self.logical_id
547
      if target_node not in (pnode, snode):
548
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
549
                                        target_node)
550
      pnode_ip = nodes_ip.get(pnode, None)
551
      snode_ip = nodes_ip.get(snode, None)
552
      if pnode_ip is None or snode_ip is None:
553
        raise errors.ConfigurationError("Can't find primary or secondary node"
554
                                        " for %s" % str(self))
555
      p_data = (pnode_ip, port)
556
      s_data = (snode_ip, port)
557
      if pnode == target_node:
558
        self.physical_id = p_data + s_data + (pminor, secret)
559
      else: # it must be secondary, we tested above
560
        self.physical_id = s_data + p_data + (sminor, secret)
561
    else:
562
      self.physical_id = self.logical_id
563
    return
564

    
565
  def ToDict(self):
566
    """Disk-specific conversion to standard python types.
567

568
    This replaces the children lists of objects with lists of
569
    standard python types.
570

571
    """
572
    bo = super(Disk, self).ToDict()
573

    
574
    for attr in ("children",):
575
      alist = bo.get(attr, None)
576
      if alist:
577
        bo[attr] = self._ContainerToDicts(alist)
578
    return bo
579

    
580
  @classmethod
581
  def FromDict(cls, val):
582
    """Custom function for Disks
583

584
    """
585
    obj = super(Disk, cls).FromDict(val)
586
    if obj.children:
587
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
588
    if obj.logical_id and isinstance(obj.logical_id, list):
589
      obj.logical_id = tuple(obj.logical_id)
590
    if obj.physical_id and isinstance(obj.physical_id, list):
591
      obj.physical_id = tuple(obj.physical_id)
592
    if obj.dev_type in constants.LDS_DRBD:
593
      # we need a tuple of length six here
594
      if len(obj.logical_id) < 6:
595
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
596
    return obj
597

    
598
  def __str__(self):
599
    """Custom str() formatter for disks.
600

601
    """
602
    if self.dev_type == constants.LD_LV:
603
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
604
    elif self.dev_type in constants.LDS_DRBD:
605
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
606
      val = "<DRBD8("
607
      if self.physical_id is None:
608
        phy = "unconfigured"
609
      else:
610
        phy = ("configured as %s:%s %s:%s" %
611
               (self.physical_id[0], self.physical_id[1],
612
                self.physical_id[2], self.physical_id[3]))
613

    
614
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
615
              (node_a, minor_a, node_b, minor_b, port, phy))
616
      if self.children and self.children.count(None) == 0:
617
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
618
      else:
619
        val += "no local storage"
620
    else:
621
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
622
             (self.dev_type, self.logical_id, self.physical_id, self.children))
623
    if self.iv_name is None:
624
      val += ", not visible"
625
    else:
626
      val += ", visible as /dev/%s" % self.iv_name
627
    if isinstance(self.size, int):
628
      val += ", size=%dm)>" % self.size
629
    else:
630
      val += ", size='%s')>" % (self.size,)
631
    return val
632

    
633
  def Verify(self):
634
    """Checks that this disk is correctly configured.
635

636
    """
637
    all_errors = []
638
    if self.mode not in constants.DISK_ACCESS_SET:
639
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
640
    return all_errors
641

    
642
  def UpgradeConfig(self):
643
    """Fill defaults for missing configuration values.
644

645
    """
646
    if self.children:
647
      for child in self.children:
648
        child.UpgradeConfig()
649
    # add here config upgrade for this disk
650

    
651

    
652
class Instance(TaggableObject):
653
  """Config object representing an instance."""
654
  __slots__ = [
655
    "name",
656
    "primary_node",
657
    "os",
658
    "hypervisor",
659
    "hvparams",
660
    "beparams",
661
    "osparams",
662
    "admin_up",
663
    "nics",
664
    "disks",
665
    "disk_template",
666
    "network_port",
667
    "serial_no",
668
    ] + _TIMESTAMPS + _UUID
669

    
670
  def _ComputeSecondaryNodes(self):
671
    """Compute the list of secondary nodes.
672

673
    This is a simple wrapper over _ComputeAllNodes.
674

675
    """
676
    all_nodes = set(self._ComputeAllNodes())
677
    all_nodes.discard(self.primary_node)
678
    return tuple(all_nodes)
679

    
680
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
681
                             "List of secondary nodes")
682

    
683
  def _ComputeAllNodes(self):
684
    """Compute the list of all nodes.
685

686
    Since the data is already there (in the drbd disks), keeping it as
687
    a separate normal attribute is redundant and if not properly
688
    synchronised can cause problems. Thus it's better to compute it
689
    dynamically.
690

691
    """
692
    def _Helper(nodes, device):
693
      """Recursively computes nodes given a top device."""
694
      if device.dev_type in constants.LDS_DRBD:
695
        nodea, nodeb = device.logical_id[:2]
696
        nodes.add(nodea)
697
        nodes.add(nodeb)
698
      if device.children:
699
        for child in device.children:
700
          _Helper(nodes, child)
701

    
702
    all_nodes = set()
703
    all_nodes.add(self.primary_node)
704
    for device in self.disks:
705
      _Helper(all_nodes, device)
706
    return tuple(all_nodes)
707

    
708
  all_nodes = property(_ComputeAllNodes, None, None,
709
                       "List of all nodes of the instance")
710

    
711
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
712
    """Provide a mapping of nodes to LVs this instance owns.
713

714
    This function figures out what logical volumes should belong on
715
    which nodes, recursing through a device tree.
716

717
    @param lvmap: optional dictionary to receive the
718
        'node' : ['lv', ...] data.
719

720
    @return: None if lvmap arg is given, otherwise, a dictionary
721
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
722

723
    """
724
    if node == None:
725
      node = self.primary_node
726

    
727
    if lvmap is None:
728
      lvmap = { node : [] }
729
      ret = lvmap
730
    else:
731
      if not node in lvmap:
732
        lvmap[node] = []
733
      ret = None
734

    
735
    if not devs:
736
      devs = self.disks
737

    
738
    for dev in devs:
739
      if dev.dev_type == constants.LD_LV:
740
        lvmap[node].append(dev.logical_id[1])
741

    
742
      elif dev.dev_type in constants.LDS_DRBD:
743
        if dev.children:
744
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
745
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
746

    
747
      elif dev.children:
748
        self.MapLVsByNode(lvmap, dev.children, node)
749

    
750
    return ret
751

    
752
  def FindDisk(self, idx):
753
    """Find a disk given having a specified index.
754

755
    This is just a wrapper that does validation of the index.
756

757
    @type idx: int
758
    @param idx: the disk index
759
    @rtype: L{Disk}
760
    @return: the corresponding disk
761
    @raise errors.OpPrereqError: when the given index is not valid
762

763
    """
764
    try:
765
      idx = int(idx)
766
      return self.disks[idx]
767
    except (TypeError, ValueError), err:
768
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
769
                                 errors.ECODE_INVAL)
770
    except IndexError:
771
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
772
                                 " 0 to %d" % (idx, len(self.disks)),
773
                                 errors.ECODE_INVAL)
774

    
775
  def ToDict(self):
776
    """Instance-specific conversion to standard python types.
777

778
    This replaces the children lists of objects with lists of standard
779
    python types.
780

781
    """
782
    bo = super(Instance, self).ToDict()
783

    
784
    for attr in "nics", "disks":
785
      alist = bo.get(attr, None)
786
      if alist:
787
        nlist = self._ContainerToDicts(alist)
788
      else:
789
        nlist = []
790
      bo[attr] = nlist
791
    return bo
792

    
793
  @classmethod
794
  def FromDict(cls, val):
795
    """Custom function for instances.
796

797
    """
798
    obj = super(Instance, cls).FromDict(val)
799
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
800
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
801
    return obj
802

    
803
  def UpgradeConfig(self):
804
    """Fill defaults for missing configuration values.
805

806
    """
807
    for nic in self.nics:
808
      nic.UpgradeConfig()
809
    for disk in self.disks:
810
      disk.UpgradeConfig()
811
    if self.hvparams:
812
      for key in constants.HVC_GLOBALS:
813
        try:
814
          del self.hvparams[key]
815
        except KeyError:
816
          pass
817
    if self.osparams is None:
818
      self.osparams = {}
819

    
820

    
821
class OS(ConfigObject):
822
  """Config object representing an operating system.
823

824
  @type supported_parameters: list
825
  @ivar supported_parameters: a list of tuples, name and description,
826
      containing the supported parameters by this OS
827

828
  """
829
  __slots__ = [
830
    "name",
831
    "path",
832
    "api_versions",
833
    "create_script",
834
    "export_script",
835
    "import_script",
836
    "rename_script",
837
    "verify_script",
838
    "supported_variants",
839
    "supported_parameters",
840
    ]
841

    
842

    
843
class Node(TaggableObject):
844
  """Config object representing a node."""
845
  __slots__ = [
846
    "name",
847
    "primary_ip",
848
    "secondary_ip",
849
    "serial_no",
850
    "master_candidate",
851
    "offline",
852
    "drained",
853
    ] + _TIMESTAMPS + _UUID
854

    
855

    
856
class Cluster(TaggableObject):
857
  """Config object representing the cluster."""
858
  __slots__ = [
859
    "serial_no",
860
    "rsahostkeypub",
861
    "highest_used_port",
862
    "tcpudp_port_pool",
863
    "mac_prefix",
864
    "volume_group_name",
865
    "drbd_usermode_helper",
866
    "default_bridge",
867
    "default_hypervisor",
868
    "master_node",
869
    "master_ip",
870
    "master_netdev",
871
    "cluster_name",
872
    "file_storage_dir",
873
    "enabled_hypervisors",
874
    "hvparams",
875
    "os_hvp",
876
    "beparams",
877
    "osparams",
878
    "nicparams",
879
    "candidate_pool_size",
880
    "modify_etc_hosts",
881
    "modify_ssh_setup",
882
    "maintain_node_health",
883
    "uid_pool",
884
    ] + _TIMESTAMPS + _UUID
885

    
886
  def UpgradeConfig(self):
887
    """Fill defaults for missing configuration values.
888

889
    """
890
    # pylint: disable-msg=E0203
891
    # because these are "defined" via slots, not manually
892
    if self.hvparams is None:
893
      self.hvparams = constants.HVC_DEFAULTS
894
    else:
895
      for hypervisor in self.hvparams:
896
        self.hvparams[hypervisor] = FillDict(
897
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
898

    
899
    if self.os_hvp is None:
900
      self.os_hvp = {}
901

    
902
    # osparams added before 2.2
903
    if self.osparams is None:
904
      self.osparams = {}
905

    
906
    self.beparams = UpgradeGroupedParams(self.beparams,
907
                                         constants.BEC_DEFAULTS)
908
    migrate_default_bridge = not self.nicparams
909
    self.nicparams = UpgradeGroupedParams(self.nicparams,
910
                                          constants.NICC_DEFAULTS)
911
    if migrate_default_bridge:
912
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
913
        self.default_bridge
914

    
915
    if self.modify_etc_hosts is None:
916
      self.modify_etc_hosts = True
917

    
918
    if self.modify_ssh_setup is None:
919
      self.modify_ssh_setup = True
920

    
921
    # default_bridge is no longer used it 2.1. The slot is left there to
922
    # support auto-upgrading. It can be removed once we decide to deprecate
923
    # upgrading straight from 2.0.
924
    if self.default_bridge is not None:
925
      self.default_bridge = None
926

    
927
    # default_hypervisor is just the first enabled one in 2.1. This slot and
928
    # code can be removed once upgrading straight from 2.0 is deprecated.
929
    if self.default_hypervisor is not None:
930
      self.enabled_hypervisors = ([self.default_hypervisor] +
931
        [hvname for hvname in self.enabled_hypervisors
932
         if hvname != self.default_hypervisor])
933
      self.default_hypervisor = None
934

    
935
    # maintain_node_health added after 2.1.1
936
    if self.maintain_node_health is None:
937
      self.maintain_node_health = False
938

    
939
    if self.uid_pool is None:
940
      self.uid_pool = []
941

    
942
  def ToDict(self):
943
    """Custom function for cluster.
944

945
    """
946
    mydict = super(Cluster, self).ToDict()
947
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
948
    return mydict
949

    
950
  @classmethod
951
  def FromDict(cls, val):
952
    """Custom function for cluster.
953

954
    """
955
    obj = super(Cluster, cls).FromDict(val)
956
    if not isinstance(obj.tcpudp_port_pool, set):
957
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
958
    return obj
959

    
960
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
961
    """Get the default hypervisor parameters for the cluster.
962

963
    @param hypervisor: the hypervisor name
964
    @param os_name: if specified, we'll also update the defaults for this OS
965
    @param skip_keys: if passed, list of keys not to use
966
    @return: the defaults dict
967

968
    """
969
    if skip_keys is None:
970
      skip_keys = []
971

    
972
    fill_stack = [self.hvparams.get(hypervisor, {})]
973
    if os_name is not None:
974
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
975
      fill_stack.append(os_hvp)
976

    
977
    ret_dict = {}
978
    for o_dict in fill_stack:
979
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
980

    
981
    return ret_dict
982

    
983
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
984
    """Fill a given hvparams dict with cluster defaults.
985

986
    @type hv_name: string
987
    @param hv_name: the hypervisor to use
988
    @type os_name: string
989
    @param os_name: the OS to use for overriding the hypervisor defaults
990
    @type skip_globals: boolean
991
    @param skip_globals: if True, the global hypervisor parameters will
992
        not be filled
993
    @rtype: dict
994
    @return: a copy of the given hvparams with missing keys filled from
995
        the cluster defaults
996

997
    """
998
    if skip_globals:
999
      skip_keys = constants.HVC_GLOBALS
1000
    else:
1001
      skip_keys = []
1002

    
1003
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1004
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1005

    
1006
  def FillHV(self, instance, skip_globals=False):
1007
    """Fill an instance's hvparams dict with cluster defaults.
1008

1009
    @type instance: L{objects.Instance}
1010
    @param instance: the instance parameter to fill
1011
    @type skip_globals: boolean
1012
    @param skip_globals: if True, the global hypervisor parameters will
1013
        not be filled
1014
    @rtype: dict
1015
    @return: a copy of the instance's hvparams with missing keys filled from
1016
        the cluster defaults
1017

1018
    """
1019
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1020
                             instance.hvparams, skip_globals)
1021

    
1022
  def SimpleFillBE(self, beparams):
1023
    """Fill a given beparams dict with cluster defaults.
1024

1025
    @type beparams: dict
1026
    @param beparams: the dict to fill
1027
    @rtype: dict
1028
    @return: a copy of the passed in beparams with missing keys filled
1029
        from the cluster defaults
1030

1031
    """
1032
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1033

    
1034
  def FillBE(self, instance):
1035
    """Fill an instance's beparams dict with cluster defaults.
1036

1037
    @type instance: L{objects.Instance}
1038
    @param instance: the instance parameter to fill
1039
    @rtype: dict
1040
    @return: a copy of the instance's beparams with missing keys filled from
1041
        the cluster defaults
1042

1043
    """
1044
    return self.SimpleFillBE(instance.beparams)
1045

    
1046
  def SimpleFillNIC(self, nicparams):
1047
    """Fill a given nicparams dict with cluster defaults.
1048

1049
    @type nicparams: dict
1050
    @param nicparams: the dict to fill
1051
    @rtype: dict
1052
    @return: a copy of the passed in nicparams with missing keys filled
1053
        from the cluster defaults
1054

1055
    """
1056
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1057

    
1058
  def SimpleFillOS(self, os_name, os_params):
1059
    """Fill an instance's osparams dict with cluster defaults.
1060

1061
    @type os_name: string
1062
    @param os_name: the OS name to use
1063
    @type os_params: dict
1064
    @param os_params: the dict to fill with default values
1065
    @rtype: dict
1066
    @return: a copy of the instance's osparams with missing keys filled from
1067
        the cluster defaults
1068

1069
    """
1070
    name_only = os_name.split("+", 1)[0]
1071
    # base OS
1072
    result = self.osparams.get(name_only, {})
1073
    # OS with variant
1074
    result = FillDict(result, self.osparams.get(os_name, {}))
1075
    # specified params
1076
    return FillDict(result, os_params)
1077

    
1078

    
1079
class BlockDevStatus(ConfigObject):
1080
  """Config object representing the status of a block device."""
1081
  __slots__ = [
1082
    "dev_path",
1083
    "major",
1084
    "minor",
1085
    "sync_percent",
1086
    "estimated_time",
1087
    "is_degraded",
1088
    "ldisk_status",
1089
    ]
1090

    
1091

    
1092
class ImportExportStatus(ConfigObject):
1093
  """Config object representing the status of an import or export."""
1094
  __slots__ = [
1095
    "recent_output",
1096
    "listen_port",
1097
    "connected",
1098
    "progress_mbytes",
1099
    "progress_throughput",
1100
    "progress_eta",
1101
    "progress_percent",
1102
    "exit_status",
1103
    "error_message",
1104
    ] + _TIMESTAMPS
1105

    
1106

    
1107
class ImportExportOptions(ConfigObject):
1108
  """Options for import/export daemon
1109

1110
  @ivar key_name: X509 key name (None for cluster certificate)
1111
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1112
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1113
  @ivar magic: Used to ensure the connection goes to the right disk
1114

1115
  """
1116
  __slots__ = [
1117
    "key_name",
1118
    "ca_pem",
1119
    "compress",
1120
    "magic",
1121
    ]
1122

    
1123

    
1124
class ConfdRequest(ConfigObject):
1125
  """Object holding a confd request.
1126

1127
  @ivar protocol: confd protocol version
1128
  @ivar type: confd query type
1129
  @ivar query: query request
1130
  @ivar rsalt: requested reply salt
1131

1132
  """
1133
  __slots__ = [
1134
    "protocol",
1135
    "type",
1136
    "query",
1137
    "rsalt",
1138
    ]
1139

    
1140

    
1141
class ConfdReply(ConfigObject):
1142
  """Object holding a confd reply.
1143

1144
  @ivar protocol: confd protocol version
1145
  @ivar status: reply status code (ok, error)
1146
  @ivar answer: confd query reply
1147
  @ivar serial: configuration serial number
1148

1149
  """
1150
  __slots__ = [
1151
    "protocol",
1152
    "status",
1153
    "answer",
1154
    "serial",
1155
    ]
1156

    
1157

    
1158
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1159
  """Simple wrapper over ConfigParse that allows serialization.
1160

1161
  This class is basically ConfigParser.SafeConfigParser with two
1162
  additional methods that allow it to serialize/unserialize to/from a
1163
  buffer.
1164

1165
  """
1166
  def Dumps(self):
1167
    """Dump this instance and return the string representation."""
1168
    buf = StringIO()
1169
    self.write(buf)
1170
    return buf.getvalue()
1171

    
1172
  @classmethod
1173
  def Loads(cls, data):
1174
    """Load data from a string."""
1175
    buf = StringIO(data)
1176
    cfp = cls()
1177
    cfp.readfp(buf)
1178
    return cfp