Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ adb6d685

History | View | Annotate | Download (34 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44

    
45
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46
           "OS", "Node", "Cluster", "FillDict"]
47

    
48
_TIMESTAMPS = ["ctime", "mtime"]
49
_UUID = ["uuid"]
50

    
51

    
52
def FillDict(defaults_dict, custom_dict, skip_keys=None):
53
  """Basic function to apply settings on top a default dict.
54

55
  @type defaults_dict: dict
56
  @param defaults_dict: dictionary holding the default values
57
  @type custom_dict: dict
58
  @param custom_dict: dictionary holding customized value
59
  @type skip_keys: list
60
  @param skip_keys: which keys not to fill
61
  @rtype: dict
62
  @return: dict with the 'full' values
63

64
  """
65
  ret_dict = copy.deepcopy(defaults_dict)
66
  ret_dict.update(custom_dict)
67
  if skip_keys:
68
    for k in skip_keys:
69
      try:
70
        del ret_dict[k]
71
      except KeyError:
72
        pass
73
  return ret_dict
74

    
75

    
76
def UpgradeGroupedParams(target, defaults):
77
  """Update all groups for the target parameter.
78

79
  @type target: dict of dicts
80
  @param target: {group: {parameter: value}}
81
  @type defaults: dict
82
  @param defaults: default parameter values
83

84
  """
85
  if target is None:
86
    target = {constants.PP_DEFAULT: defaults}
87
  else:
88
    for group in target:
89
      target[group] = FillDict(defaults, target[group])
90
  return target
91

    
92

    
93
class ConfigObject(object):
94
  """A generic config object.
95

96
  It has the following properties:
97

98
    - provides somewhat safe recursive unpickling and pickling for its classes
99
    - unset attributes which are defined in slots are always returned
100
      as None instead of raising an error
101

102
  Classes derived from this must always declare __slots__ (we use many
103
  config objects and the memory reduction is useful)
104

105
  """
106
  __slots__ = []
107

    
108
  def __init__(self, **kwargs):
109
    for k, v in kwargs.iteritems():
110
      setattr(self, k, v)
111

    
112
  def __getattr__(self, name):
113
    if name not in self._all_slots():
114
      raise AttributeError("Invalid object attribute %s.%s" %
115
                           (type(self).__name__, name))
116
    return None
117

    
118
  def __setstate__(self, state):
119
    slots = self._all_slots()
120
    for name in state:
121
      if name in slots:
122
        setattr(self, name, state[name])
123

    
124
  @classmethod
125
  def _all_slots(cls):
126
    """Compute the list of all declared slots for a class.
127

128
    """
129
    slots = []
130
    for parent in cls.__mro__:
131
      slots.extend(getattr(parent, "__slots__", []))
132
    return slots
133

    
134
  def ToDict(self):
135
    """Convert to a dict holding only standard python types.
136

137
    The generic routine just dumps all of this object's attributes in
138
    a dict. It does not work if the class has children who are
139
    ConfigObjects themselves (e.g. the nics list in an Instance), in
140
    which case the object should subclass the function in order to
141
    make sure all objects returned are only standard python types.
142

143
    """
144
    result = {}
145
    for name in self._all_slots():
146
      value = getattr(self, name, None)
147
      if value is not None:
148
        result[name] = value
149
    return result
150

    
151
  __getstate__ = ToDict
152

    
153
  @classmethod
154
  def FromDict(cls, val):
155
    """Create an object from a dictionary.
156

157
    This generic routine takes a dict, instantiates a new instance of
158
    the given class, and sets attributes based on the dict content.
159

160
    As for `ToDict`, this does not work if the class has children
161
    who are ConfigObjects themselves (e.g. the nics list in an
162
    Instance), in which case the object should subclass the function
163
    and alter the objects.
164

165
    """
166
    if not isinstance(val, dict):
167
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
168
                                      " expected dict, got %s" % type(val))
169
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
170
    obj = cls(**val_str) # pylint: disable-msg=W0142
171
    return obj
172

    
173
  @staticmethod
174
  def _ContainerToDicts(container):
175
    """Convert the elements of a container to standard python types.
176

177
    This method converts a container with elements derived from
178
    ConfigData to standard python types. If the container is a dict,
179
    we don't touch the keys, only the values.
180

181
    """
182
    if isinstance(container, dict):
183
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
184
    elif isinstance(container, (list, tuple, set, frozenset)):
185
      ret = [elem.ToDict() for elem in container]
186
    else:
187
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
188
                      type(container))
189
    return ret
190

    
191
  @staticmethod
192
  def _ContainerFromDicts(source, c_type, e_type):
193
    """Convert a container from standard python types.
194

195
    This method converts a container with standard python types to
196
    ConfigData objects. If the container is a dict, we don't touch the
197
    keys, only the values.
198

199
    """
200
    if not isinstance(c_type, type):
201
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
202
                      " not a type" % type(c_type))
203
    if c_type is dict:
204
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
205
    elif c_type in (list, tuple, set, frozenset):
206
      ret = c_type([e_type.FromDict(elem) for elem in source])
207
    else:
208
      raise TypeError("Invalid container type %s passed to"
209
                      " _ContainerFromDicts" % c_type)
210
    return ret
211

    
212
  def Copy(self):
213
    """Makes a deep copy of the current object and its children.
214

215
    """
216
    dict_form = self.ToDict()
217
    clone_obj = self.__class__.FromDict(dict_form)
218
    return clone_obj
219

    
220
  def __repr__(self):
221
    """Implement __repr__ for ConfigObjects."""
222
    return repr(self.ToDict())
223

    
224
  def UpgradeConfig(self):
225
    """Fill defaults for missing configuration values.
226

227
    This method will be called at configuration load time, and its
228
    implementation will be object dependent.
229

230
    """
231
    pass
232

    
233

    
234
class TaggableObject(ConfigObject):
235
  """An generic class supporting tags.
236

237
  """
238
  __slots__ = ["tags"]
239
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
240

    
241
  @classmethod
242
  def ValidateTag(cls, tag):
243
    """Check if a tag is valid.
244

245
    If the tag is invalid, an errors.TagError will be raised. The
246
    function has no return value.
247

248
    """
249
    if not isinstance(tag, basestring):
250
      raise errors.TagError("Invalid tag type (not a string)")
251
    if len(tag) > constants.MAX_TAG_LEN:
252
      raise errors.TagError("Tag too long (>%d characters)" %
253
                            constants.MAX_TAG_LEN)
254
    if not tag:
255
      raise errors.TagError("Tags cannot be empty")
256
    if not cls.VALID_TAG_RE.match(tag):
257
      raise errors.TagError("Tag contains invalid characters")
258

    
259
  def GetTags(self):
260
    """Return the tags list.
261

262
    """
263
    tags = getattr(self, "tags", None)
264
    if tags is None:
265
      tags = self.tags = set()
266
    return tags
267

    
268
  def AddTag(self, tag):
269
    """Add a new tag.
270

271
    """
272
    self.ValidateTag(tag)
273
    tags = self.GetTags()
274
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
275
      raise errors.TagError("Too many tags")
276
    self.GetTags().add(tag)
277

    
278
  def RemoveTag(self, tag):
279
    """Remove a tag.
280

281
    """
282
    self.ValidateTag(tag)
283
    tags = self.GetTags()
284
    try:
285
      tags.remove(tag)
286
    except KeyError:
287
      raise errors.TagError("Tag not found")
288

    
289
  def ToDict(self):
290
    """Taggable-object-specific conversion to standard python types.
291

292
    This replaces the tags set with a list.
293

294
    """
295
    bo = super(TaggableObject, self).ToDict()
296

    
297
    tags = bo.get("tags", None)
298
    if isinstance(tags, set):
299
      bo["tags"] = list(tags)
300
    return bo
301

    
302
  @classmethod
303
  def FromDict(cls, val):
304
    """Custom function for instances.
305

306
    """
307
    obj = super(TaggableObject, cls).FromDict(val)
308
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
309
      obj.tags = set(obj.tags)
310
    return obj
311

    
312

    
313
class ConfigData(ConfigObject):
314
  """Top-level config object."""
315
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
316
               _TIMESTAMPS)
317

    
318
  def ToDict(self):
319
    """Custom function for top-level config data.
320

321
    This just replaces the list of instances, nodes and the cluster
322
    with standard python types.
323

324
    """
325
    mydict = super(ConfigData, self).ToDict()
326
    mydict["cluster"] = mydict["cluster"].ToDict()
327
    for key in "nodes", "instances":
328
      mydict[key] = self._ContainerToDicts(mydict[key])
329

    
330
    return mydict
331

    
332
  @classmethod
333
  def FromDict(cls, val):
334
    """Custom function for top-level config data
335

336
    """
337
    obj = super(ConfigData, cls).FromDict(val)
338
    obj.cluster = Cluster.FromDict(obj.cluster)
339
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
340
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
341
    return obj
342

    
343
  def UpgradeConfig(self):
344
    """Fill defaults for missing configuration values.
345

346
    """
347
    self.cluster.UpgradeConfig()
348
    for node in self.nodes.values():
349
      node.UpgradeConfig()
350
    for instance in self.instances.values():
351
      instance.UpgradeConfig()
352

    
353

    
354
class NIC(ConfigObject):
355
  """Config object representing a network card."""
356
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
357

    
358
  @classmethod
359
  def CheckParameterSyntax(cls, nicparams):
360
    """Check the given parameters for validity.
361

362
    @type nicparams:  dict
363
    @param nicparams: dictionary with parameter names/value
364
    @raise errors.ConfigurationError: when a parameter is not valid
365

366
    """
367
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
368
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
369
      raise errors.ConfigurationError(err)
370

    
371
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
372
        not nicparams[constants.NIC_LINK]):
373
      err = "Missing bridged nic link"
374
      raise errors.ConfigurationError(err)
375

    
376
  def UpgradeConfig(self):
377
    """Fill defaults for missing configuration values.
378

379
    """
380
    if self.nicparams is None:
381
      self.nicparams = {}
382
      if self.bridge is not None:
383
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
384
        self.nicparams[constants.NIC_LINK] = self.bridge
385
    # bridge is no longer used it 2.1. The slot is left there to support
386
    # upgrading, but will be removed in 2.2
387
    if self.bridge is not None:
388
      self.bridge = None
389

    
390

    
391
class Disk(ConfigObject):
392
  """Config object representing a block device."""
393
  __slots__ = ["dev_type", "logical_id", "physical_id",
394
               "children", "iv_name", "size", "mode"]
395

    
396
  def CreateOnSecondary(self):
397
    """Test if this device needs to be created on a secondary node."""
398
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
399

    
400
  def AssembleOnSecondary(self):
401
    """Test if this device needs to be assembled on a secondary node."""
402
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
403

    
404
  def OpenOnSecondary(self):
405
    """Test if this device needs to be opened on a secondary node."""
406
    return self.dev_type in (constants.LD_LV,)
407

    
408
  def StaticDevPath(self):
409
    """Return the device path if this device type has a static one.
410

411
    Some devices (LVM for example) live always at the same /dev/ path,
412
    irrespective of their status. For such devices, we return this
413
    path, for others we return None.
414

415
    @warning: The path returned is not a normalized pathname; callers
416
        should check that it is a valid path.
417

418
    """
419
    if self.dev_type == constants.LD_LV:
420
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
421
    return None
422

    
423
  def ChildrenNeeded(self):
424
    """Compute the needed number of children for activation.
425

426
    This method will return either -1 (all children) or a positive
427
    number denoting the minimum number of children needed for
428
    activation (only mirrored devices will usually return >=0).
429

430
    Currently, only DRBD8 supports diskless activation (therefore we
431
    return 0), for all other we keep the previous semantics and return
432
    -1.
433

434
    """
435
    if self.dev_type == constants.LD_DRBD8:
436
      return 0
437
    return -1
438

    
439
  def GetNodes(self, node):
440
    """This function returns the nodes this device lives on.
441

442
    Given the node on which the parent of the device lives on (or, in
443
    case of a top-level device, the primary node of the devices'
444
    instance), this function will return a list of nodes on which this
445
    devices needs to (or can) be assembled.
446

447
    """
448
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
449
      result = [node]
450
    elif self.dev_type in constants.LDS_DRBD:
451
      result = [self.logical_id[0], self.logical_id[1]]
452
      if node not in result:
453
        raise errors.ConfigurationError("DRBD device passed unknown node")
454
    else:
455
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
456
    return result
457

    
458
  def ComputeNodeTree(self, parent_node):
459
    """Compute the node/disk tree for this disk and its children.
460

461
    This method, given the node on which the parent disk lives, will
462
    return the list of all (node, disk) pairs which describe the disk
463
    tree in the most compact way. For example, a drbd/lvm stack
464
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
465
    which represents all the top-level devices on the nodes.
466

467
    """
468
    my_nodes = self.GetNodes(parent_node)
469
    result = [(node, self) for node in my_nodes]
470
    if not self.children:
471
      # leaf device
472
      return result
473
    for node in my_nodes:
474
      for child in self.children:
475
        child_result = child.ComputeNodeTree(node)
476
        if len(child_result) == 1:
477
          # child (and all its descendants) is simple, doesn't split
478
          # over multiple hosts, so we don't need to describe it, our
479
          # own entry for this node describes it completely
480
          continue
481
        else:
482
          # check if child nodes differ from my nodes; note that
483
          # subdisk can differ from the child itself, and be instead
484
          # one of its descendants
485
          for subnode, subdisk in child_result:
486
            if subnode not in my_nodes:
487
              result.append((subnode, subdisk))
488
            # otherwise child is under our own node, so we ignore this
489
            # entry (but probably the other results in the list will
490
            # be different)
491
    return result
492

    
493
  def RecordGrow(self, amount):
494
    """Update the size of this disk after growth.
495

496
    This method recurses over the disks's children and updates their
497
    size correspondigly. The method needs to be kept in sync with the
498
    actual algorithms from bdev.
499

500
    """
501
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
502
      self.size += amount
503
    elif self.dev_type == constants.LD_DRBD8:
504
      if self.children:
505
        self.children[0].RecordGrow(amount)
506
      self.size += amount
507
    else:
508
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
509
                                   " disk type %s" % self.dev_type)
510

    
511
  def UnsetSize(self):
512
    """Sets recursively the size to zero for the disk and its children.
513

514
    """
515
    if self.children:
516
      for child in self.children:
517
        child.UnsetSize()
518
    self.size = 0
519

    
520
  def SetPhysicalID(self, target_node, nodes_ip):
521
    """Convert the logical ID to the physical ID.
522

523
    This is used only for drbd, which needs ip/port configuration.
524

525
    The routine descends down and updates its children also, because
526
    this helps when the only the top device is passed to the remote
527
    node.
528

529
    Arguments:
530
      - target_node: the node we wish to configure for
531
      - nodes_ip: a mapping of node name to ip
532

533
    The target_node must exist in in nodes_ip, and must be one of the
534
    nodes in the logical ID for each of the DRBD devices encountered
535
    in the disk tree.
536

537
    """
538
    if self.children:
539
      for child in self.children:
540
        child.SetPhysicalID(target_node, nodes_ip)
541

    
542
    if self.logical_id is None and self.physical_id is not None:
543
      return
544
    if self.dev_type in constants.LDS_DRBD:
545
      pnode, snode, port, pminor, sminor, secret = self.logical_id
546
      if target_node not in (pnode, snode):
547
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
548
                                        target_node)
549
      pnode_ip = nodes_ip.get(pnode, None)
550
      snode_ip = nodes_ip.get(snode, None)
551
      if pnode_ip is None or snode_ip is None:
552
        raise errors.ConfigurationError("Can't find primary or secondary node"
553
                                        " for %s" % str(self))
554
      p_data = (pnode_ip, port)
555
      s_data = (snode_ip, port)
556
      if pnode == target_node:
557
        self.physical_id = p_data + s_data + (pminor, secret)
558
      else: # it must be secondary, we tested above
559
        self.physical_id = s_data + p_data + (sminor, secret)
560
    else:
561
      self.physical_id = self.logical_id
562
    return
563

    
564
  def ToDict(self):
565
    """Disk-specific conversion to standard python types.
566

567
    This replaces the children lists of objects with lists of
568
    standard python types.
569

570
    """
571
    bo = super(Disk, self).ToDict()
572

    
573
    for attr in ("children",):
574
      alist = bo.get(attr, None)
575
      if alist:
576
        bo[attr] = self._ContainerToDicts(alist)
577
    return bo
578

    
579
  @classmethod
580
  def FromDict(cls, val):
581
    """Custom function for Disks
582

583
    """
584
    obj = super(Disk, cls).FromDict(val)
585
    if obj.children:
586
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
587
    if obj.logical_id and isinstance(obj.logical_id, list):
588
      obj.logical_id = tuple(obj.logical_id)
589
    if obj.physical_id and isinstance(obj.physical_id, list):
590
      obj.physical_id = tuple(obj.physical_id)
591
    if obj.dev_type in constants.LDS_DRBD:
592
      # we need a tuple of length six here
593
      if len(obj.logical_id) < 6:
594
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
595
    return obj
596

    
597
  def __str__(self):
598
    """Custom str() formatter for disks.
599

600
    """
601
    if self.dev_type == constants.LD_LV:
602
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
603
    elif self.dev_type in constants.LDS_DRBD:
604
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
605
      val = "<DRBD8("
606
      if self.physical_id is None:
607
        phy = "unconfigured"
608
      else:
609
        phy = ("configured as %s:%s %s:%s" %
610
               (self.physical_id[0], self.physical_id[1],
611
                self.physical_id[2], self.physical_id[3]))
612

    
613
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
614
              (node_a, minor_a, node_b, minor_b, port, phy))
615
      if self.children and self.children.count(None) == 0:
616
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
617
      else:
618
        val += "no local storage"
619
    else:
620
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
621
             (self.dev_type, self.logical_id, self.physical_id, self.children))
622
    if self.iv_name is None:
623
      val += ", not visible"
624
    else:
625
      val += ", visible as /dev/%s" % self.iv_name
626
    if isinstance(self.size, int):
627
      val += ", size=%dm)>" % self.size
628
    else:
629
      val += ", size='%s')>" % (self.size,)
630
    return val
631

    
632
  def Verify(self):
633
    """Checks that this disk is correctly configured.
634

635
    """
636
    all_errors = []
637
    if self.mode not in constants.DISK_ACCESS_SET:
638
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
639
    return all_errors
640

    
641
  def UpgradeConfig(self):
642
    """Fill defaults for missing configuration values.
643

644
    """
645
    if self.children:
646
      for child in self.children:
647
        child.UpgradeConfig()
648
    # add here config upgrade for this disk
649

    
650

    
651
class Instance(TaggableObject):
652
  """Config object representing an instance."""
653
  __slots__ = [
654
    "name",
655
    "primary_node",
656
    "os",
657
    "hypervisor",
658
    "hvparams",
659
    "beparams",
660
    "osparams",
661
    "admin_up",
662
    "nics",
663
    "disks",
664
    "disk_template",
665
    "network_port",
666
    "serial_no",
667
    ] + _TIMESTAMPS + _UUID
668

    
669
  def _ComputeSecondaryNodes(self):
670
    """Compute the list of secondary nodes.
671

672
    This is a simple wrapper over _ComputeAllNodes.
673

674
    """
675
    all_nodes = set(self._ComputeAllNodes())
676
    all_nodes.discard(self.primary_node)
677
    return tuple(all_nodes)
678

    
679
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
680
                             "List of secondary nodes")
681

    
682
  def _ComputeAllNodes(self):
683
    """Compute the list of all nodes.
684

685
    Since the data is already there (in the drbd disks), keeping it as
686
    a separate normal attribute is redundant and if not properly
687
    synchronised can cause problems. Thus it's better to compute it
688
    dynamically.
689

690
    """
691
    def _Helper(nodes, device):
692
      """Recursively computes nodes given a top device."""
693
      if device.dev_type in constants.LDS_DRBD:
694
        nodea, nodeb = device.logical_id[:2]
695
        nodes.add(nodea)
696
        nodes.add(nodeb)
697
      if device.children:
698
        for child in device.children:
699
          _Helper(nodes, child)
700

    
701
    all_nodes = set()
702
    all_nodes.add(self.primary_node)
703
    for device in self.disks:
704
      _Helper(all_nodes, device)
705
    return tuple(all_nodes)
706

    
707
  all_nodes = property(_ComputeAllNodes, None, None,
708
                       "List of all nodes of the instance")
709

    
710
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
711
    """Provide a mapping of nodes to LVs this instance owns.
712

713
    This function figures out what logical volumes should belong on
714
    which nodes, recursing through a device tree.
715

716
    @param lvmap: optional dictionary to receive the
717
        'node' : ['lv', ...] data.
718

719
    @return: None if lvmap arg is given, otherwise, a dictionary
720
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
721

722
    """
723
    if node == None:
724
      node = self.primary_node
725

    
726
    if lvmap is None:
727
      lvmap = { node : [] }
728
      ret = lvmap
729
    else:
730
      if not node in lvmap:
731
        lvmap[node] = []
732
      ret = None
733

    
734
    if not devs:
735
      devs = self.disks
736

    
737
    for dev in devs:
738
      if dev.dev_type == constants.LD_LV:
739
        lvmap[node].append(dev.logical_id[1])
740

    
741
      elif dev.dev_type in constants.LDS_DRBD:
742
        if dev.children:
743
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
744
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
745

    
746
      elif dev.children:
747
        self.MapLVsByNode(lvmap, dev.children, node)
748

    
749
    return ret
750

    
751
  def FindDisk(self, idx):
752
    """Find a disk given having a specified index.
753

754
    This is just a wrapper that does validation of the index.
755

756
    @type idx: int
757
    @param idx: the disk index
758
    @rtype: L{Disk}
759
    @return: the corresponding disk
760
    @raise errors.OpPrereqError: when the given index is not valid
761

762
    """
763
    try:
764
      idx = int(idx)
765
      return self.disks[idx]
766
    except (TypeError, ValueError), err:
767
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
768
                                 errors.ECODE_INVAL)
769
    except IndexError:
770
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
771
                                 " 0 to %d" % (idx, len(self.disks)),
772
                                 errors.ECODE_INVAL)
773

    
774
  def ToDict(self):
775
    """Instance-specific conversion to standard python types.
776

777
    This replaces the children lists of objects with lists of standard
778
    python types.
779

780
    """
781
    bo = super(Instance, self).ToDict()
782

    
783
    for attr in "nics", "disks":
784
      alist = bo.get(attr, None)
785
      if alist:
786
        nlist = self._ContainerToDicts(alist)
787
      else:
788
        nlist = []
789
      bo[attr] = nlist
790
    return bo
791

    
792
  @classmethod
793
  def FromDict(cls, val):
794
    """Custom function for instances.
795

796
    """
797
    obj = super(Instance, cls).FromDict(val)
798
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
799
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
800
    return obj
801

    
802
  def UpgradeConfig(self):
803
    """Fill defaults for missing configuration values.
804

805
    """
806
    for nic in self.nics:
807
      nic.UpgradeConfig()
808
    for disk in self.disks:
809
      disk.UpgradeConfig()
810
    if self.hvparams:
811
      for key in constants.HVC_GLOBALS:
812
        try:
813
          del self.hvparams[key]
814
        except KeyError:
815
          pass
816
    if self.osparams is None:
817
      self.osparams = {}
818

    
819

    
820
class OS(ConfigObject):
821
  """Config object representing an operating system.
822

823
  @type supported_parameters: list
824
  @ivar supported_parameters: a list of tuples, name and description,
825
      containing the supported parameters by this OS
826

827
  """
828
  __slots__ = [
829
    "name",
830
    "path",
831
    "api_versions",
832
    "create_script",
833
    "export_script",
834
    "import_script",
835
    "rename_script",
836
    "verify_script",
837
    "supported_variants",
838
    "supported_parameters",
839
    ]
840

    
841

    
842
class Node(TaggableObject):
843
  """Config object representing a node."""
844
  __slots__ = [
845
    "name",
846
    "primary_ip",
847
    "secondary_ip",
848
    "serial_no",
849
    "master_candidate",
850
    "offline",
851
    "drained",
852
    ] + _TIMESTAMPS + _UUID
853

    
854

    
855
class Cluster(TaggableObject):
856
  """Config object representing the cluster."""
857
  __slots__ = [
858
    "serial_no",
859
    "rsahostkeypub",
860
    "highest_used_port",
861
    "tcpudp_port_pool",
862
    "mac_prefix",
863
    "volume_group_name",
864
    "default_bridge",
865
    "default_hypervisor",
866
    "master_node",
867
    "master_ip",
868
    "master_netdev",
869
    "cluster_name",
870
    "file_storage_dir",
871
    "enabled_hypervisors",
872
    "hvparams",
873
    "os_hvp",
874
    "beparams",
875
    "osparams",
876
    "nicparams",
877
    "candidate_pool_size",
878
    "modify_etc_hosts",
879
    "modify_ssh_setup",
880
    "maintain_node_health",
881
    "uid_pool",
882
    ] + _TIMESTAMPS + _UUID
883

    
884
  def UpgradeConfig(self):
885
    """Fill defaults for missing configuration values.
886

887
    """
888
    # pylint: disable-msg=E0203
889
    # because these are "defined" via slots, not manually
890
    if self.hvparams is None:
891
      self.hvparams = constants.HVC_DEFAULTS
892
    else:
893
      for hypervisor in self.hvparams:
894
        self.hvparams[hypervisor] = FillDict(
895
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
896

    
897
    if self.os_hvp is None:
898
      self.os_hvp = {}
899

    
900
    # osparams added before 2.2
901
    if self.osparams is None:
902
      self.osparams = {}
903

    
904
    self.beparams = UpgradeGroupedParams(self.beparams,
905
                                         constants.BEC_DEFAULTS)
906
    migrate_default_bridge = not self.nicparams
907
    self.nicparams = UpgradeGroupedParams(self.nicparams,
908
                                          constants.NICC_DEFAULTS)
909
    if migrate_default_bridge:
910
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
911
        self.default_bridge
912

    
913
    if self.modify_etc_hosts is None:
914
      self.modify_etc_hosts = True
915

    
916
    if self.modify_ssh_setup is None:
917
      self.modify_ssh_setup = True
918

    
919
    # default_bridge is no longer used it 2.1. The slot is left there to
920
    # support auto-upgrading, but will be removed in 2.2
921
    if self.default_bridge is not None:
922
      self.default_bridge = None
923

    
924
    # default_hypervisor is just the first enabled one in 2.1
925
    if self.default_hypervisor is not None:
926
      self.enabled_hypervisors = ([self.default_hypervisor] +
927
        [hvname for hvname in self.enabled_hypervisors
928
         if hvname != self.default_hypervisor])
929
      self.default_hypervisor = None
930

    
931
    # maintain_node_health added after 2.1.1
932
    if self.maintain_node_health is None:
933
      self.maintain_node_health = False
934

    
935
    if self.uid_pool is None:
936
      self.uid_pool = []
937

    
938
  def ToDict(self):
939
    """Custom function for cluster.
940

941
    """
942
    mydict = super(Cluster, self).ToDict()
943
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
944
    return mydict
945

    
946
  @classmethod
947
  def FromDict(cls, val):
948
    """Custom function for cluster.
949

950
    """
951
    obj = super(Cluster, cls).FromDict(val)
952
    if not isinstance(obj.tcpudp_port_pool, set):
953
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
954
    return obj
955

    
956
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
957
    """Get the default hypervisor parameters for the cluster.
958

959
    @param hypervisor: the hypervisor name
960
    @param os_name: if specified, we'll also update the defaults for this OS
961
    @param skip_keys: if passed, list of keys not to use
962
    @return: the defaults dict
963

964
    """
965
    if skip_keys is None:
966
      skip_keys = []
967

    
968
    fill_stack = [self.hvparams.get(hypervisor, {})]
969
    if os_name is not None:
970
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
971
      fill_stack.append(os_hvp)
972

    
973
    ret_dict = {}
974
    for o_dict in fill_stack:
975
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
976

    
977
    return ret_dict
978

    
979
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
980
    """Fill a given hvparams dict with cluster defaults.
981

982
    @type hv_name: string
983
    @param hv_name: the hypervisor to use
984
    @type os_name: string
985
    @param os_name: the OS to use for overriding the hypervisor defaults
986
    @type skip_globals: boolean
987
    @param skip_globals: if True, the global hypervisor parameters will
988
        not be filled
989
    @rtype: dict
990
    @return: a copy of the given hvparams with missing keys filled from
991
        the cluster defaults
992

993
    """
994
    if skip_globals:
995
      skip_keys = constants.HVC_GLOBALS
996
    else:
997
      skip_keys = []
998

    
999
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1000
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1001

    
1002
  def FillHV(self, instance, skip_globals=False):
1003
    """Fill an instance's hvparams dict with cluster defaults.
1004

1005
    @type instance: L{objects.Instance}
1006
    @param instance: the instance parameter to fill
1007
    @type skip_globals: boolean
1008
    @param skip_globals: if True, the global hypervisor parameters will
1009
        not be filled
1010
    @rtype: dict
1011
    @return: a copy of the instance's hvparams with missing keys filled from
1012
        the cluster defaults
1013

1014
    """
1015
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1016
                             instance.hvparams, skip_globals)
1017

    
1018
  def SimpleFillBE(self, beparams):
1019
    """Fill a given beparams dict with cluster defaults.
1020

1021
    @type beparams: dict
1022
    @param beparams: the dict to fill
1023
    @rtype: dict
1024
    @return: a copy of the passed in beparams with missing keys filled
1025
        from the cluster defaults
1026

1027
    """
1028
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1029

    
1030
  def FillBE(self, instance):
1031
    """Fill an instance's beparams dict with cluster defaults.
1032

1033
    @type instance: L{objects.Instance}
1034
    @param instance: the instance parameter to fill
1035
    @rtype: dict
1036
    @return: a copy of the instance's beparams with missing keys filled from
1037
        the cluster defaults
1038

1039
    """
1040
    return self.SimpleFillBE(instance.beparams)
1041

    
1042
  def SimpleFillNIC(self, nicparams):
1043
    """Fill a given nicparams dict with cluster defaults.
1044

1045
    @type nicparams: dict
1046
    @param nicparams: the dict to fill
1047
    @rtype: dict
1048
    @return: a copy of the passed in nicparams with missing keys filled
1049
        from the cluster defaults
1050

1051
    """
1052
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1053

    
1054
  def SimpleFillOS(self, os_name, os_params):
1055
    """Fill an instance's osparams dict with cluster defaults.
1056

1057
    @type os_name: string
1058
    @param os_name: the OS name to use
1059
    @type os_params: dict
1060
    @param os_params: the dict to fill with default values
1061
    @rtype: dict
1062
    @return: a copy of the instance's osparams with missing keys filled from
1063
        the cluster defaults
1064

1065
    """
1066
    name_only = os_name.split("+", 1)[0]
1067
    # base OS
1068
    result = self.osparams.get(name_only, {})
1069
    # OS with variant
1070
    result = FillDict(result, self.osparams.get(os_name, {}))
1071
    # specified params
1072
    return FillDict(result, os_params)
1073

    
1074

    
1075
class BlockDevStatus(ConfigObject):
1076
  """Config object representing the status of a block device."""
1077
  __slots__ = [
1078
    "dev_path",
1079
    "major",
1080
    "minor",
1081
    "sync_percent",
1082
    "estimated_time",
1083
    "is_degraded",
1084
    "ldisk_status",
1085
    ]
1086

    
1087

    
1088
class ImportExportStatus(ConfigObject):
1089
  """Config object representing the status of an import or export."""
1090
  __slots__ = [
1091
    "recent_output",
1092
    "listen_port",
1093
    "connected",
1094
    "progress_mbytes",
1095
    "progress_throughput",
1096
    "progress_eta",
1097
    "progress_percent",
1098
    "exit_status",
1099
    "error_message",
1100
    ] + _TIMESTAMPS
1101

    
1102

    
1103
class ImportExportOptions(ConfigObject):
1104
  """Options for import/export daemon
1105

1106
  @ivar key_name: X509 key name (None for cluster certificate)
1107
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1108
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1109
  @ivar magic: Used to ensure the connection goes to the right disk
1110

1111
  """
1112
  __slots__ = [
1113
    "key_name",
1114
    "ca_pem",
1115
    "compress",
1116
    "magic",
1117
    ]
1118

    
1119

    
1120
class ConfdRequest(ConfigObject):
1121
  """Object holding a confd request.
1122

1123
  @ivar protocol: confd protocol version
1124
  @ivar type: confd query type
1125
  @ivar query: query request
1126
  @ivar rsalt: requested reply salt
1127

1128
  """
1129
  __slots__ = [
1130
    "protocol",
1131
    "type",
1132
    "query",
1133
    "rsalt",
1134
    ]
1135

    
1136

    
1137
class ConfdReply(ConfigObject):
1138
  """Object holding a confd reply.
1139

1140
  @ivar protocol: confd protocol version
1141
  @ivar status: reply status code (ok, error)
1142
  @ivar answer: confd query reply
1143
  @ivar serial: configuration serial number
1144

1145
  """
1146
  __slots__ = [
1147
    "protocol",
1148
    "status",
1149
    "answer",
1150
    "serial",
1151
    ]
1152

    
1153

    
1154
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1155
  """Simple wrapper over ConfigParse that allows serialization.
1156

1157
  This class is basically ConfigParser.SafeConfigParser with two
1158
  additional methods that allow it to serialize/unserialize to/from a
1159
  buffer.
1160

1161
  """
1162
  def Dumps(self):
1163
    """Dump this instance and return the string representation."""
1164
    buf = StringIO()
1165
    self.write(buf)
1166
    return buf.getvalue()
1167

    
1168
  @classmethod
1169
  def Loads(cls, data):
1170
    """Load data from a string."""
1171
    buf = StringIO(data)
1172
    cfp = cls()
1173
    cfp.readfp(buf)
1174
    return cfp