Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 73e0328b

History | View | Annotate | Download (33.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44

    
45
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46
           "OS", "Node", "Cluster", "FillDict"]
47

    
48
_TIMESTAMPS = ["ctime", "mtime"]
49
_UUID = ["uuid"]
50

    
51

    
52
def FillDict(defaults_dict, custom_dict, skip_keys=None):
53
  """Basic function to apply settings on top a default dict.
54

55
  @type defaults_dict: dict
56
  @param defaults_dict: dictionary holding the default values
57
  @type custom_dict: dict
58
  @param custom_dict: dictionary holding customized value
59
  @type skip_keys: list
60
  @param skip_keys: which keys not to fill
61
  @rtype: dict
62
  @return: dict with the 'full' values
63

64
  """
65
  ret_dict = copy.deepcopy(defaults_dict)
66
  ret_dict.update(custom_dict)
67
  if skip_keys:
68
    for k in skip_keys:
69
      try:
70
        del ret_dict[k]
71
      except KeyError:
72
        pass
73
  return ret_dict
74

    
75

    
76
def UpgradeGroupedParams(target, defaults):
77
  """Update all groups for the target parameter.
78

79
  @type target: dict of dicts
80
  @param target: {group: {parameter: value}}
81
  @type defaults: dict
82
  @param defaults: default parameter values
83

84
  """
85
  if target is None:
86
    target = {constants.PP_DEFAULT: defaults}
87
  else:
88
    for group in target:
89
      target[group] = FillDict(defaults, target[group])
90
  return target
91

    
92

    
93
class ConfigObject(object):
94
  """A generic config object.
95

96
  It has the following properties:
97

98
    - provides somewhat safe recursive unpickling and pickling for its classes
99
    - unset attributes which are defined in slots are always returned
100
      as None instead of raising an error
101

102
  Classes derived from this must always declare __slots__ (we use many
103
  config objects and the memory reduction is useful)
104

105
  """
106
  __slots__ = []
107

    
108
  def __init__(self, **kwargs):
109
    for k, v in kwargs.iteritems():
110
      setattr(self, k, v)
111

    
112
  def __getattr__(self, name):
113
    if name not in self._all_slots():
114
      raise AttributeError("Invalid object attribute %s.%s" %
115
                           (type(self).__name__, name))
116
    return None
117

    
118
  def __setstate__(self, state):
119
    slots = self._all_slots()
120
    for name in state:
121
      if name in slots:
122
        setattr(self, name, state[name])
123

    
124
  @classmethod
125
  def _all_slots(cls):
126
    """Compute the list of all declared slots for a class.
127

128
    """
129
    slots = []
130
    for parent in cls.__mro__:
131
      slots.extend(getattr(parent, "__slots__", []))
132
    return slots
133

    
134
  def ToDict(self):
135
    """Convert to a dict holding only standard python types.
136

137
    The generic routine just dumps all of this object's attributes in
138
    a dict. It does not work if the class has children who are
139
    ConfigObjects themselves (e.g. the nics list in an Instance), in
140
    which case the object should subclass the function in order to
141
    make sure all objects returned are only standard python types.
142

143
    """
144
    result = {}
145
    for name in self._all_slots():
146
      value = getattr(self, name, None)
147
      if value is not None:
148
        result[name] = value
149
    return result
150

    
151
  __getstate__ = ToDict
152

    
153
  @classmethod
154
  def FromDict(cls, val):
155
    """Create an object from a dictionary.
156

157
    This generic routine takes a dict, instantiates a new instance of
158
    the given class, and sets attributes based on the dict content.
159

160
    As for `ToDict`, this does not work if the class has children
161
    who are ConfigObjects themselves (e.g. the nics list in an
162
    Instance), in which case the object should subclass the function
163
    and alter the objects.
164

165
    """
166
    if not isinstance(val, dict):
167
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
168
                                      " expected dict, got %s" % type(val))
169
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
170
    obj = cls(**val_str) # pylint: disable-msg=W0142
171
    return obj
172

    
173
  @staticmethod
174
  def _ContainerToDicts(container):
175
    """Convert the elements of a container to standard python types.
176

177
    This method converts a container with elements derived from
178
    ConfigData to standard python types. If the container is a dict,
179
    we don't touch the keys, only the values.
180

181
    """
182
    if isinstance(container, dict):
183
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
184
    elif isinstance(container, (list, tuple, set, frozenset)):
185
      ret = [elem.ToDict() for elem in container]
186
    else:
187
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
188
                      type(container))
189
    return ret
190

    
191
  @staticmethod
192
  def _ContainerFromDicts(source, c_type, e_type):
193
    """Convert a container from standard python types.
194

195
    This method converts a container with standard python types to
196
    ConfigData objects. If the container is a dict, we don't touch the
197
    keys, only the values.
198

199
    """
200
    if not isinstance(c_type, type):
201
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
202
                      " not a type" % type(c_type))
203
    if c_type is dict:
204
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
205
    elif c_type in (list, tuple, set, frozenset):
206
      ret = c_type([e_type.FromDict(elem) for elem in source])
207
    else:
208
      raise TypeError("Invalid container type %s passed to"
209
                      " _ContainerFromDicts" % c_type)
210
    return ret
211

    
212
  def Copy(self):
213
    """Makes a deep copy of the current object and its children.
214

215
    """
216
    dict_form = self.ToDict()
217
    clone_obj = self.__class__.FromDict(dict_form)
218
    return clone_obj
219

    
220
  def __repr__(self):
221
    """Implement __repr__ for ConfigObjects."""
222
    return repr(self.ToDict())
223

    
224
  def UpgradeConfig(self):
225
    """Fill defaults for missing configuration values.
226

227
    This method will be called at configuration load time, and its
228
    implementation will be object dependent.
229

230
    """
231
    pass
232

    
233

    
234
class TaggableObject(ConfigObject):
235
  """An generic class supporting tags.
236

237
  """
238
  __slots__ = ["tags"]
239
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
240

    
241
  @classmethod
242
  def ValidateTag(cls, tag):
243
    """Check if a tag is valid.
244

245
    If the tag is invalid, an errors.TagError will be raised. The
246
    function has no return value.
247

248
    """
249
    if not isinstance(tag, basestring):
250
      raise errors.TagError("Invalid tag type (not a string)")
251
    if len(tag) > constants.MAX_TAG_LEN:
252
      raise errors.TagError("Tag too long (>%d characters)" %
253
                            constants.MAX_TAG_LEN)
254
    if not tag:
255
      raise errors.TagError("Tags cannot be empty")
256
    if not cls.VALID_TAG_RE.match(tag):
257
      raise errors.TagError("Tag contains invalid characters")
258

    
259
  def GetTags(self):
260
    """Return the tags list.
261

262
    """
263
    tags = getattr(self, "tags", None)
264
    if tags is None:
265
      tags = self.tags = set()
266
    return tags
267

    
268
  def AddTag(self, tag):
269
    """Add a new tag.
270

271
    """
272
    self.ValidateTag(tag)
273
    tags = self.GetTags()
274
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
275
      raise errors.TagError("Too many tags")
276
    self.GetTags().add(tag)
277

    
278
  def RemoveTag(self, tag):
279
    """Remove a tag.
280

281
    """
282
    self.ValidateTag(tag)
283
    tags = self.GetTags()
284
    try:
285
      tags.remove(tag)
286
    except KeyError:
287
      raise errors.TagError("Tag not found")
288

    
289
  def ToDict(self):
290
    """Taggable-object-specific conversion to standard python types.
291

292
    This replaces the tags set with a list.
293

294
    """
295
    bo = super(TaggableObject, self).ToDict()
296

    
297
    tags = bo.get("tags", None)
298
    if isinstance(tags, set):
299
      bo["tags"] = list(tags)
300
    return bo
301

    
302
  @classmethod
303
  def FromDict(cls, val):
304
    """Custom function for instances.
305

306
    """
307
    obj = super(TaggableObject, cls).FromDict(val)
308
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
309
      obj.tags = set(obj.tags)
310
    return obj
311

    
312

    
313
class ConfigData(ConfigObject):
314
  """Top-level config object."""
315
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
316
               _TIMESTAMPS)
317

    
318
  def ToDict(self):
319
    """Custom function for top-level config data.
320

321
    This just replaces the list of instances, nodes and the cluster
322
    with standard python types.
323

324
    """
325
    mydict = super(ConfigData, self).ToDict()
326
    mydict["cluster"] = mydict["cluster"].ToDict()
327
    for key in "nodes", "instances":
328
      mydict[key] = self._ContainerToDicts(mydict[key])
329

    
330
    return mydict
331

    
332
  @classmethod
333
  def FromDict(cls, val):
334
    """Custom function for top-level config data
335

336
    """
337
    obj = super(ConfigData, cls).FromDict(val)
338
    obj.cluster = Cluster.FromDict(obj.cluster)
339
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
340
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
341
    return obj
342

    
343
  def UpgradeConfig(self):
344
    """Fill defaults for missing configuration values.
345

346
    """
347
    self.cluster.UpgradeConfig()
348
    for node in self.nodes.values():
349
      node.UpgradeConfig()
350
    for instance in self.instances.values():
351
      instance.UpgradeConfig()
352

    
353

    
354
class NIC(ConfigObject):
355
  """Config object representing a network card."""
356
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
357

    
358
  @classmethod
359
  def CheckParameterSyntax(cls, nicparams):
360
    """Check the given parameters for validity.
361

362
    @type nicparams:  dict
363
    @param nicparams: dictionary with parameter names/value
364
    @raise errors.ConfigurationError: when a parameter is not valid
365

366
    """
367
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
368
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
369
      raise errors.ConfigurationError(err)
370

    
371
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
372
        not nicparams[constants.NIC_LINK]):
373
      err = "Missing bridged nic link"
374
      raise errors.ConfigurationError(err)
375

    
376
  def UpgradeConfig(self):
377
    """Fill defaults for missing configuration values.
378

379
    """
380
    if self.nicparams is None:
381
      self.nicparams = {}
382
      if self.bridge is not None:
383
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
384
        self.nicparams[constants.NIC_LINK] = self.bridge
385
    # bridge is no longer used it 2.1. The slot is left there to support
386
    # upgrading, but will be removed in 2.2
387
    if self.bridge is not None:
388
      self.bridge = None
389

    
390

    
391
class Disk(ConfigObject):
392
  """Config object representing a block device."""
393
  __slots__ = ["dev_type", "logical_id", "physical_id",
394
               "children", "iv_name", "size", "mode"]
395

    
396
  def CreateOnSecondary(self):
397
    """Test if this device needs to be created on a secondary node."""
398
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
399

    
400
  def AssembleOnSecondary(self):
401
    """Test if this device needs to be assembled on a secondary node."""
402
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
403

    
404
  def OpenOnSecondary(self):
405
    """Test if this device needs to be opened on a secondary node."""
406
    return self.dev_type in (constants.LD_LV,)
407

    
408
  def StaticDevPath(self):
409
    """Return the device path if this device type has a static one.
410

411
    Some devices (LVM for example) live always at the same /dev/ path,
412
    irrespective of their status. For such devices, we return this
413
    path, for others we return None.
414

415
    @warning: The path returned is not a normalized pathname; callers
416
        should check that it is a valid path.
417

418
    """
419
    if self.dev_type == constants.LD_LV:
420
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
421
    return None
422

    
423
  def ChildrenNeeded(self):
424
    """Compute the needed number of children for activation.
425

426
    This method will return either -1 (all children) or a positive
427
    number denoting the minimum number of children needed for
428
    activation (only mirrored devices will usually return >=0).
429

430
    Currently, only DRBD8 supports diskless activation (therefore we
431
    return 0), for all other we keep the previous semantics and return
432
    -1.
433

434
    """
435
    if self.dev_type == constants.LD_DRBD8:
436
      return 0
437
    return -1
438

    
439
  def GetNodes(self, node):
440
    """This function returns the nodes this device lives on.
441

442
    Given the node on which the parent of the device lives on (or, in
443
    case of a top-level device, the primary node of the devices'
444
    instance), this function will return a list of nodes on which this
445
    devices needs to (or can) be assembled.
446

447
    """
448
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
449
      result = [node]
450
    elif self.dev_type in constants.LDS_DRBD:
451
      result = [self.logical_id[0], self.logical_id[1]]
452
      if node not in result:
453
        raise errors.ConfigurationError("DRBD device passed unknown node")
454
    else:
455
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
456
    return result
457

    
458
  def ComputeNodeTree(self, parent_node):
459
    """Compute the node/disk tree for this disk and its children.
460

461
    This method, given the node on which the parent disk lives, will
462
    return the list of all (node, disk) pairs which describe the disk
463
    tree in the most compact way. For example, a drbd/lvm stack
464
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
465
    which represents all the top-level devices on the nodes.
466

467
    """
468
    my_nodes = self.GetNodes(parent_node)
469
    result = [(node, self) for node in my_nodes]
470
    if not self.children:
471
      # leaf device
472
      return result
473
    for node in my_nodes:
474
      for child in self.children:
475
        child_result = child.ComputeNodeTree(node)
476
        if len(child_result) == 1:
477
          # child (and all its descendants) is simple, doesn't split
478
          # over multiple hosts, so we don't need to describe it, our
479
          # own entry for this node describes it completely
480
          continue
481
        else:
482
          # check if child nodes differ from my nodes; note that
483
          # subdisk can differ from the child itself, and be instead
484
          # one of its descendants
485
          for subnode, subdisk in child_result:
486
            if subnode not in my_nodes:
487
              result.append((subnode, subdisk))
488
            # otherwise child is under our own node, so we ignore this
489
            # entry (but probably the other results in the list will
490
            # be different)
491
    return result
492

    
493
  def RecordGrow(self, amount):
494
    """Update the size of this disk after growth.
495

496
    This method recurses over the disks's children and updates their
497
    size correspondigly. The method needs to be kept in sync with the
498
    actual algorithms from bdev.
499

500
    """
501
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
502
      self.size += amount
503
    elif self.dev_type == constants.LD_DRBD8:
504
      if self.children:
505
        self.children[0].RecordGrow(amount)
506
      self.size += amount
507
    else:
508
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
509
                                   " disk type %s" % self.dev_type)
510

    
511
  def UnsetSize(self):
512
    """Sets recursively the size to zero for the disk and its children.
513

514
    """
515
    if self.children:
516
      for child in self.children:
517
        child.UnsetSize()
518
    self.size = 0
519

    
520
  def SetPhysicalID(self, target_node, nodes_ip):
521
    """Convert the logical ID to the physical ID.
522

523
    This is used only for drbd, which needs ip/port configuration.
524

525
    The routine descends down and updates its children also, because
526
    this helps when the only the top device is passed to the remote
527
    node.
528

529
    Arguments:
530
      - target_node: the node we wish to configure for
531
      - nodes_ip: a mapping of node name to ip
532

533
    The target_node must exist in in nodes_ip, and must be one of the
534
    nodes in the logical ID for each of the DRBD devices encountered
535
    in the disk tree.
536

537
    """
538
    if self.children:
539
      for child in self.children:
540
        child.SetPhysicalID(target_node, nodes_ip)
541

    
542
    if self.logical_id is None and self.physical_id is not None:
543
      return
544
    if self.dev_type in constants.LDS_DRBD:
545
      pnode, snode, port, pminor, sminor, secret = self.logical_id
546
      if target_node not in (pnode, snode):
547
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
548
                                        target_node)
549
      pnode_ip = nodes_ip.get(pnode, None)
550
      snode_ip = nodes_ip.get(snode, None)
551
      if pnode_ip is None or snode_ip is None:
552
        raise errors.ConfigurationError("Can't find primary or secondary node"
553
                                        " for %s" % str(self))
554
      p_data = (pnode_ip, port)
555
      s_data = (snode_ip, port)
556
      if pnode == target_node:
557
        self.physical_id = p_data + s_data + (pminor, secret)
558
      else: # it must be secondary, we tested above
559
        self.physical_id = s_data + p_data + (sminor, secret)
560
    else:
561
      self.physical_id = self.logical_id
562
    return
563

    
564
  def ToDict(self):
565
    """Disk-specific conversion to standard python types.
566

567
    This replaces the children lists of objects with lists of
568
    standard python types.
569

570
    """
571
    bo = super(Disk, self).ToDict()
572

    
573
    for attr in ("children",):
574
      alist = bo.get(attr, None)
575
      if alist:
576
        bo[attr] = self._ContainerToDicts(alist)
577
    return bo
578

    
579
  @classmethod
580
  def FromDict(cls, val):
581
    """Custom function for Disks
582

583
    """
584
    obj = super(Disk, cls).FromDict(val)
585
    if obj.children:
586
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
587
    if obj.logical_id and isinstance(obj.logical_id, list):
588
      obj.logical_id = tuple(obj.logical_id)
589
    if obj.physical_id and isinstance(obj.physical_id, list):
590
      obj.physical_id = tuple(obj.physical_id)
591
    if obj.dev_type in constants.LDS_DRBD:
592
      # we need a tuple of length six here
593
      if len(obj.logical_id) < 6:
594
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
595
    return obj
596

    
597
  def __str__(self):
598
    """Custom str() formatter for disks.
599

600
    """
601
    if self.dev_type == constants.LD_LV:
602
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
603
    elif self.dev_type in constants.LDS_DRBD:
604
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
605
      val = "<DRBD8("
606
      if self.physical_id is None:
607
        phy = "unconfigured"
608
      else:
609
        phy = ("configured as %s:%s %s:%s" %
610
               (self.physical_id[0], self.physical_id[1],
611
                self.physical_id[2], self.physical_id[3]))
612

    
613
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
614
              (node_a, minor_a, node_b, minor_b, port, phy))
615
      if self.children and self.children.count(None) == 0:
616
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
617
      else:
618
        val += "no local storage"
619
    else:
620
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
621
             (self.dev_type, self.logical_id, self.physical_id, self.children))
622
    if self.iv_name is None:
623
      val += ", not visible"
624
    else:
625
      val += ", visible as /dev/%s" % self.iv_name
626
    if isinstance(self.size, int):
627
      val += ", size=%dm)>" % self.size
628
    else:
629
      val += ", size='%s')>" % (self.size,)
630
    return val
631

    
632
  def Verify(self):
633
    """Checks that this disk is correctly configured.
634

635
    """
636
    all_errors = []
637
    if self.mode not in constants.DISK_ACCESS_SET:
638
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
639
    return all_errors
640

    
641
  def UpgradeConfig(self):
642
    """Fill defaults for missing configuration values.
643

644
    """
645
    if self.children:
646
      for child in self.children:
647
        child.UpgradeConfig()
648
    # add here config upgrade for this disk
649

    
650

    
651
class Instance(TaggableObject):
652
  """Config object representing an instance."""
653
  __slots__ = [
654
    "name",
655
    "primary_node",
656
    "os",
657
    "hypervisor",
658
    "hvparams",
659
    "beparams",
660
    "admin_up",
661
    "nics",
662
    "disks",
663
    "disk_template",
664
    "network_port",
665
    "serial_no",
666
    ] + _TIMESTAMPS + _UUID
667

    
668
  def _ComputeSecondaryNodes(self):
669
    """Compute the list of secondary nodes.
670

671
    This is a simple wrapper over _ComputeAllNodes.
672

673
    """
674
    all_nodes = set(self._ComputeAllNodes())
675
    all_nodes.discard(self.primary_node)
676
    return tuple(all_nodes)
677

    
678
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
679
                             "List of secondary nodes")
680

    
681
  def _ComputeAllNodes(self):
682
    """Compute the list of all nodes.
683

684
    Since the data is already there (in the drbd disks), keeping it as
685
    a separate normal attribute is redundant and if not properly
686
    synchronised can cause problems. Thus it's better to compute it
687
    dynamically.
688

689
    """
690
    def _Helper(nodes, device):
691
      """Recursively computes nodes given a top device."""
692
      if device.dev_type in constants.LDS_DRBD:
693
        nodea, nodeb = device.logical_id[:2]
694
        nodes.add(nodea)
695
        nodes.add(nodeb)
696
      if device.children:
697
        for child in device.children:
698
          _Helper(nodes, child)
699

    
700
    all_nodes = set()
701
    all_nodes.add(self.primary_node)
702
    for device in self.disks:
703
      _Helper(all_nodes, device)
704
    return tuple(all_nodes)
705

    
706
  all_nodes = property(_ComputeAllNodes, None, None,
707
                       "List of all nodes of the instance")
708

    
709
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
710
    """Provide a mapping of nodes to LVs this instance owns.
711

712
    This function figures out what logical volumes should belong on
713
    which nodes, recursing through a device tree.
714

715
    @param lvmap: optional dictionary to receive the
716
        'node' : ['lv', ...] data.
717

718
    @return: None if lvmap arg is given, otherwise, a dictionary
719
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
720

721
    """
722
    if node == None:
723
      node = self.primary_node
724

    
725
    if lvmap is None:
726
      lvmap = { node : [] }
727
      ret = lvmap
728
    else:
729
      if not node in lvmap:
730
        lvmap[node] = []
731
      ret = None
732

    
733
    if not devs:
734
      devs = self.disks
735

    
736
    for dev in devs:
737
      if dev.dev_type == constants.LD_LV:
738
        lvmap[node].append(dev.logical_id[1])
739

    
740
      elif dev.dev_type in constants.LDS_DRBD:
741
        if dev.children:
742
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
743
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
744

    
745
      elif dev.children:
746
        self.MapLVsByNode(lvmap, dev.children, node)
747

    
748
    return ret
749

    
750
  def FindDisk(self, idx):
751
    """Find a disk given having a specified index.
752

753
    This is just a wrapper that does validation of the index.
754

755
    @type idx: int
756
    @param idx: the disk index
757
    @rtype: L{Disk}
758
    @return: the corresponding disk
759
    @raise errors.OpPrereqError: when the given index is not valid
760

761
    """
762
    try:
763
      idx = int(idx)
764
      return self.disks[idx]
765
    except (TypeError, ValueError), err:
766
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
767
                                 errors.ECODE_INVAL)
768
    except IndexError:
769
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
770
                                 " 0 to %d" % (idx, len(self.disks)),
771
                                 errors.ECODE_INVAL)
772

    
773
  def ToDict(self):
774
    """Instance-specific conversion to standard python types.
775

776
    This replaces the children lists of objects with lists of standard
777
    python types.
778

779
    """
780
    bo = super(Instance, self).ToDict()
781

    
782
    for attr in "nics", "disks":
783
      alist = bo.get(attr, None)
784
      if alist:
785
        nlist = self._ContainerToDicts(alist)
786
      else:
787
        nlist = []
788
      bo[attr] = nlist
789
    return bo
790

    
791
  @classmethod
792
  def FromDict(cls, val):
793
    """Custom function for instances.
794

795
    """
796
    obj = super(Instance, cls).FromDict(val)
797
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
798
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
799
    return obj
800

    
801
  def UpgradeConfig(self):
802
    """Fill defaults for missing configuration values.
803

804
    """
805
    for nic in self.nics:
806
      nic.UpgradeConfig()
807
    for disk in self.disks:
808
      disk.UpgradeConfig()
809
    if self.hvparams:
810
      for key in constants.HVC_GLOBALS:
811
        try:
812
          del self.hvparams[key]
813
        except KeyError:
814
          pass
815

    
816

    
817
class OS(ConfigObject):
818
  """Config object representing an operating system."""
819
  __slots__ = [
820
    "name",
821
    "path",
822
    "api_versions",
823
    "create_script",
824
    "export_script",
825
    "import_script",
826
    "rename_script",
827
    "supported_variants",
828
    ]
829

    
830

    
831
class Node(TaggableObject):
832
  """Config object representing a node."""
833
  __slots__ = [
834
    "name",
835
    "primary_ip",
836
    "secondary_ip",
837
    "serial_no",
838
    "master_candidate",
839
    "offline",
840
    "drained",
841
    ] + _TIMESTAMPS + _UUID
842

    
843

    
844
class Cluster(TaggableObject):
845
  """Config object representing the cluster."""
846
  __slots__ = [
847
    "serial_no",
848
    "rsahostkeypub",
849
    "highest_used_port",
850
    "tcpudp_port_pool",
851
    "mac_prefix",
852
    "volume_group_name",
853
    "default_bridge",
854
    "default_hypervisor",
855
    "master_node",
856
    "master_ip",
857
    "master_netdev",
858
    "cluster_name",
859
    "file_storage_dir",
860
    "enabled_hypervisors",
861
    "hvparams",
862
    "os_hvp",
863
    "beparams",
864
    "nicparams",
865
    "candidate_pool_size",
866
    "modify_etc_hosts",
867
    "modify_ssh_setup",
868
    "maintain_node_health",
869
    "uid_pool",
870
    ] + _TIMESTAMPS + _UUID
871

    
872
  def UpgradeConfig(self):
873
    """Fill defaults for missing configuration values.
874

875
    """
876
    # pylint: disable-msg=E0203
877
    # because these are "defined" via slots, not manually
878
    if self.hvparams is None:
879
      self.hvparams = constants.HVC_DEFAULTS
880
    else:
881
      for hypervisor in self.hvparams:
882
        self.hvparams[hypervisor] = FillDict(
883
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
884

    
885
    # TODO: Figure out if it's better to put this into OS than Cluster
886
    if self.os_hvp is None:
887
      self.os_hvp = {}
888

    
889
    self.beparams = UpgradeGroupedParams(self.beparams,
890
                                         constants.BEC_DEFAULTS)
891
    migrate_default_bridge = not self.nicparams
892
    self.nicparams = UpgradeGroupedParams(self.nicparams,
893
                                          constants.NICC_DEFAULTS)
894
    if migrate_default_bridge:
895
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
896
        self.default_bridge
897

    
898
    if self.modify_etc_hosts is None:
899
      self.modify_etc_hosts = True
900

    
901
    if self.modify_ssh_setup is None:
902
      self.modify_ssh_setup = True
903

    
904
    # default_bridge is no longer used it 2.1. The slot is left there to
905
    # support auto-upgrading, but will be removed in 2.2
906
    if self.default_bridge is not None:
907
      self.default_bridge = None
908

    
909
    # default_hypervisor is just the first enabled one in 2.1
910
    if self.default_hypervisor is not None:
911
      self.enabled_hypervisors = ([self.default_hypervisor] +
912
        [hvname for hvname in self.enabled_hypervisors
913
         if hvname != self.default_hypervisor])
914
      self.default_hypervisor = None
915

    
916
    # maintain_node_health added after 2.1.1
917
    if self.maintain_node_health is None:
918
      self.maintain_node_health = False
919

    
920
    if self.uid_pool is None:
921
      self.uid_pool = []
922

    
923
  def ToDict(self):
924
    """Custom function for cluster.
925

926
    """
927
    mydict = super(Cluster, self).ToDict()
928
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
929
    return mydict
930

    
931
  @classmethod
932
  def FromDict(cls, val):
933
    """Custom function for cluster.
934

935
    """
936
    obj = super(Cluster, cls).FromDict(val)
937
    if not isinstance(obj.tcpudp_port_pool, set):
938
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
939
    return obj
940

    
941
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
942
    """Get the default hypervisor parameters for the cluster.
943

944
    @param hypervisor: the hypervisor name
945
    @param os_name: if specified, we'll also update the defaults for this OS
946
    @param skip_keys: if passed, list of keys not to use
947
    @return: the defaults dict
948

949
    """
950
    if skip_keys is None:
951
      skip_keys = []
952

    
953
    fill_stack = [self.hvparams.get(hypervisor, {})]
954
    if os_name is not None:
955
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
956
      fill_stack.append(os_hvp)
957

    
958
    ret_dict = {}
959
    for o_dict in fill_stack:
960
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
961

    
962
    return ret_dict
963

    
964
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
965
    """Fill a given hvparams dict with cluster defaults.
966

967
    @type hv_name: string
968
    @param hv_name: the hypervisor to use
969
    @type os_name: string
970
    @param os_name: the OS to use for overriding the hypervisor defaults
971
    @type skip_globals: boolean
972
    @param skip_globals: if True, the global hypervisor parameters will
973
        not be filled
974
    @rtype: dict
975
    @return: a copy of the given hvparams with missing keys filled from
976
        the cluster defaults
977

978
    """
979
    if skip_globals:
980
      skip_keys = constants.HVC_GLOBALS
981
    else:
982
      skip_keys = []
983

    
984
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
985
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
986

    
987
  def FillHV(self, instance, skip_globals=False):
988
    """Fill an instance's hvparams dict with cluster defaults.
989

990
    @type instance: L{objects.Instance}
991
    @param instance: the instance parameter to fill
992
    @type skip_globals: boolean
993
    @param skip_globals: if True, the global hypervisor parameters will
994
        not be filled
995
    @rtype: dict
996
    @return: a copy of the instance's hvparams with missing keys filled from
997
        the cluster defaults
998

999
    """
1000
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1001
                             instance.hvparams, skip_globals)
1002

    
1003
  def SimpleFillBE(self, beparams):
1004
    """Fill a given beparams dict with cluster defaults.
1005

1006
    @type beparam: dict
1007
    @param beparam: the dict to fill
1008
    @rtype: dict
1009
    @return: a copy of the passed in beparams with missing keys filled
1010
        from the cluster defaults
1011

1012
    """
1013
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1014

    
1015
  def FillBE(self, instance):
1016
    """Fill an instance's beparams dict with cluster defaults.
1017

1018
    @type instance: L{objects.Instance}
1019
    @param instance: the instance parameter to fill
1020
    @rtype: dict
1021
    @return: a copy of the instance's beparams with missing keys filled from
1022
        the cluster defaults
1023

1024
    """
1025
    return self.SimpleFillBE(instance.beparams)
1026

    
1027
  def SimpleFillNIC(self, nicparams):
1028
    """Fill a given nicparams dict with cluster defaults.
1029

1030
    @type nicparam: dict
1031
    @param nicparam: the dict to fill
1032
    @rtype: dict
1033
    @return: a copy of the passed in nicparams with missing keys filled
1034
        from the cluster defaults
1035

1036
    """
1037
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1038

    
1039

    
1040
class BlockDevStatus(ConfigObject):
1041
  """Config object representing the status of a block device."""
1042
  __slots__ = [
1043
    "dev_path",
1044
    "major",
1045
    "minor",
1046
    "sync_percent",
1047
    "estimated_time",
1048
    "is_degraded",
1049
    "ldisk_status",
1050
    ]
1051

    
1052

    
1053
class ImportExportStatus(ConfigObject):
1054
  """Config object representing the status of an import or export."""
1055
  __slots__ = [
1056
    "recent_output",
1057
    "listen_port",
1058
    "connected",
1059
    "progress_mbytes",
1060
    "progress_throughput",
1061
    "progress_eta",
1062
    "progress_percent",
1063
    "exit_status",
1064
    "error_message",
1065
    ] + _TIMESTAMPS
1066

    
1067

    
1068
class ImportExportOptions(ConfigObject):
1069
  """Options for import/export daemon
1070

1071
  @ivar key_name: X509 key name (None for cluster certificate)
1072
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1073
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1074
  @ivar magic: Used to ensure the connection goes to the right disk
1075

1076
  """
1077
  __slots__ = [
1078
    "key_name",
1079
    "ca_pem",
1080
    "compress",
1081
    "magic",
1082
    ]
1083

    
1084

    
1085
class ConfdRequest(ConfigObject):
1086
  """Object holding a confd request.
1087

1088
  @ivar protocol: confd protocol version
1089
  @ivar type: confd query type
1090
  @ivar query: query request
1091
  @ivar rsalt: requested reply salt
1092

1093
  """
1094
  __slots__ = [
1095
    "protocol",
1096
    "type",
1097
    "query",
1098
    "rsalt",
1099
    ]
1100

    
1101

    
1102
class ConfdReply(ConfigObject):
1103
  """Object holding a confd reply.
1104

1105
  @ivar protocol: confd protocol version
1106
  @ivar status: reply status code (ok, error)
1107
  @ivar answer: confd query reply
1108
  @ivar serial: configuration serial number
1109

1110
  """
1111
  __slots__ = [
1112
    "protocol",
1113
    "status",
1114
    "answer",
1115
    "serial",
1116
    ]
1117

    
1118

    
1119
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1120
  """Simple wrapper over ConfigParse that allows serialization.
1121

1122
  This class is basically ConfigParser.SafeConfigParser with two
1123
  additional methods that allow it to serialize/unserialize to/from a
1124
  buffer.
1125

1126
  """
1127
  def Dumps(self):
1128
    """Dump this instance and return the string representation."""
1129
    buf = StringIO()
1130
    self.write(buf)
1131
    return buf.getvalue()
1132

    
1133
  @classmethod
1134
  def Loads(cls, data):
1135
    """Load data from a string."""
1136
    buf = StringIO(data)
1137
    cfp = cls()
1138
    cfp.readfp(buf)
1139
    return cfp