Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 7736a5f2

History | View | Annotate | Download (29.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
_TIMESTAMPS = ["ctime", "mtime"]
43
_UUID = ["uuid"]
44

    
45
def FillDict(defaults_dict, custom_dict, skip_keys=[]):
46
  """Basic function to apply settings on top a default dict.
47

48
  @type defaults_dict: dict
49
  @param defaults_dict: dictionary holding the default values
50
  @type custom_dict: dict
51
  @param custom_dict: dictionary holding customized value
52
  @type skip_keys: list
53
  @param skip_keys: which keys not to fill
54
  @rtype: dict
55
  @return: dict with the 'full' values
56

57
  """
58
  ret_dict = copy.deepcopy(defaults_dict)
59
  ret_dict.update(custom_dict)
60
  for k in skip_keys:
61
    try:
62
      del ret_dict[k]
63
    except KeyError:
64
      pass
65
  return ret_dict
66

    
67

    
68
def UpgradeGroupedParams(target, defaults):
69
  """Update all groups for the target parameter.
70

71
  @type target: dict of dicts
72
  @param target: {group: {parameter: value}}
73
  @type defaults: dict
74
  @param defaults: default parameter values
75

76
  """
77
  if target is None:
78
    target = {constants.PP_DEFAULT: defaults}
79
  else:
80
    for group in target:
81
      target[group] = FillDict(defaults, target[group])
82
  return target
83

    
84

    
85
class ConfigObject(object):
86
  """A generic config object.
87

88
  It has the following properties:
89

90
    - provides somewhat safe recursive unpickling and pickling for its classes
91
    - unset attributes which are defined in slots are always returned
92
      as None instead of raising an error
93

94
  Classes derived from this must always declare __slots__ (we use many
95
  config objects and the memory reduction is useful)
96

97
  """
98
  __slots__ = []
99

    
100
  def __init__(self, **kwargs):
101
    for k, v in kwargs.iteritems():
102
      setattr(self, k, v)
103

    
104
  def __getattr__(self, name):
105
    if name not in self.__slots__:
106
      raise AttributeError("Invalid object attribute %s.%s" %
107
                           (type(self).__name__, name))
108
    return None
109

    
110
  def __setstate__(self, state):
111
    for name in state:
112
      if name in self.__slots__:
113
        setattr(self, name, state[name])
114

    
115
  def ToDict(self):
116
    """Convert to a dict holding only standard python types.
117

118
    The generic routine just dumps all of this object's attributes in
119
    a dict. It does not work if the class has children who are
120
    ConfigObjects themselves (e.g. the nics list in an Instance), in
121
    which case the object should subclass the function in order to
122
    make sure all objects returned are only standard python types.
123

124
    """
125
    result = {}
126
    for name in self.__slots__:
127
      value = getattr(self, name, None)
128
      if value is not None:
129
        result[name] = value
130
    return result
131

    
132
  __getstate__ = ToDict
133

    
134
  @classmethod
135
  def FromDict(cls, val):
136
    """Create an object from a dictionary.
137

138
    This generic routine takes a dict, instantiates a new instance of
139
    the given class, and sets attributes based on the dict content.
140

141
    As for `ToDict`, this does not work if the class has children
142
    who are ConfigObjects themselves (e.g. the nics list in an
143
    Instance), in which case the object should subclass the function
144
    and alter the objects.
145

146
    """
147
    if not isinstance(val, dict):
148
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
149
                                      " expected dict, got %s" % type(val))
150
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
151
    obj = cls(**val_str)
152
    return obj
153

    
154
  @staticmethod
155
  def _ContainerToDicts(container):
156
    """Convert the elements of a container to standard python types.
157

158
    This method converts a container with elements derived from
159
    ConfigData to standard python types. If the container is a dict,
160
    we don't touch the keys, only the values.
161

162
    """
163
    if isinstance(container, dict):
164
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
165
    elif isinstance(container, (list, tuple, set, frozenset)):
166
      ret = [elem.ToDict() for elem in container]
167
    else:
168
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
169
                      type(container))
170
    return ret
171

    
172
  @staticmethod
173
  def _ContainerFromDicts(source, c_type, e_type):
174
    """Convert a container from standard python types.
175

176
    This method converts a container with standard python types to
177
    ConfigData objects. If the container is a dict, we don't touch the
178
    keys, only the values.
179

180
    """
181
    if not isinstance(c_type, type):
182
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
183
                      " not a type" % type(c_type))
184
    if c_type is dict:
185
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
186
    elif c_type in (list, tuple, set, frozenset):
187
      ret = c_type([e_type.FromDict(elem) for elem in source])
188
    else:
189
      raise TypeError("Invalid container type %s passed to"
190
                      " _ContainerFromDicts" % c_type)
191
    return ret
192

    
193
  def Copy(self):
194
    """Makes a deep copy of the current object and its children.
195

196
    """
197
    dict_form = self.ToDict()
198
    clone_obj = self.__class__.FromDict(dict_form)
199
    return clone_obj
200

    
201
  def __repr__(self):
202
    """Implement __repr__ for ConfigObjects."""
203
    return repr(self.ToDict())
204

    
205
  def UpgradeConfig(self):
206
    """Fill defaults for missing configuration values.
207

208
    This method will be called at configuration load time, and its
209
    implementation will be object dependent.
210

211
    """
212
    pass
213

    
214

    
215
class TaggableObject(ConfigObject):
216
  """An generic class supporting tags.
217

218
  """
219
  __slots__ = ConfigObject.__slots__ + ["tags"]
220
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
221

    
222
  @classmethod
223
  def ValidateTag(cls, tag):
224
    """Check if a tag is valid.
225

226
    If the tag is invalid, an errors.TagError will be raised. The
227
    function has no return value.
228

229
    """
230
    if not isinstance(tag, basestring):
231
      raise errors.TagError("Invalid tag type (not a string)")
232
    if len(tag) > constants.MAX_TAG_LEN:
233
      raise errors.TagError("Tag too long (>%d characters)" %
234
                            constants.MAX_TAG_LEN)
235
    if not tag:
236
      raise errors.TagError("Tags cannot be empty")
237
    if not cls.VALID_TAG_RE.match(tag):
238
      raise errors.TagError("Tag contains invalid characters")
239

    
240
  def GetTags(self):
241
    """Return the tags list.
242

243
    """
244
    tags = getattr(self, "tags", None)
245
    if tags is None:
246
      tags = self.tags = set()
247
    return tags
248

    
249
  def AddTag(self, tag):
250
    """Add a new tag.
251

252
    """
253
    self.ValidateTag(tag)
254
    tags = self.GetTags()
255
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
256
      raise errors.TagError("Too many tags")
257
    self.GetTags().add(tag)
258

    
259
  def RemoveTag(self, tag):
260
    """Remove a tag.
261

262
    """
263
    self.ValidateTag(tag)
264
    tags = self.GetTags()
265
    try:
266
      tags.remove(tag)
267
    except KeyError:
268
      raise errors.TagError("Tag not found")
269

    
270
  def ToDict(self):
271
    """Taggable-object-specific conversion to standard python types.
272

273
    This replaces the tags set with a list.
274

275
    """
276
    bo = super(TaggableObject, self).ToDict()
277

    
278
    tags = bo.get("tags", None)
279
    if isinstance(tags, set):
280
      bo["tags"] = list(tags)
281
    return bo
282

    
283
  @classmethod
284
  def FromDict(cls, val):
285
    """Custom function for instances.
286

287
    """
288
    obj = super(TaggableObject, cls).FromDict(val)
289
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
290
      obj.tags = set(obj.tags)
291
    return obj
292

    
293

    
294
class ConfigData(ConfigObject):
295
  """Top-level config object."""
296
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
297
               _TIMESTAMPS)
298

    
299
  def ToDict(self):
300
    """Custom function for top-level config data.
301

302
    This just replaces the list of instances, nodes and the cluster
303
    with standard python types.
304

305
    """
306
    mydict = super(ConfigData, self).ToDict()
307
    mydict["cluster"] = mydict["cluster"].ToDict()
308
    for key in "nodes", "instances":
309
      mydict[key] = self._ContainerToDicts(mydict[key])
310

    
311
    return mydict
312

    
313
  @classmethod
314
  def FromDict(cls, val):
315
    """Custom function for top-level config data
316

317
    """
318
    obj = super(ConfigData, cls).FromDict(val)
319
    obj.cluster = Cluster.FromDict(obj.cluster)
320
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
321
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
322
    return obj
323

    
324
  def UpgradeConfig(self):
325
    """Fill defaults for missing configuration values.
326

327
    """
328
    self.cluster.UpgradeConfig()
329
    for node in self.nodes.values():
330
      node.UpgradeConfig()
331
    for instance in self.instances.values():
332
      instance.UpgradeConfig()
333

    
334

    
335
class NIC(ConfigObject):
336
  """Config object representing a network card."""
337
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
338

    
339
  @classmethod
340
  def CheckParameterSyntax(cls, nicparams):
341
    """Check the given parameters for validity.
342

343
    @type nicparams:  dict
344
    @param nicparams: dictionary with parameter names/value
345
    @raise errors.ConfigurationError: when a parameter is not valid
346

347
    """
348
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
349
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
350
      raise errors.ConfigurationError(err)
351

    
352
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
353
        not nicparams[constants.NIC_LINK]):
354
      err = "Missing bridged nic link"
355
      raise errors.ConfigurationError(err)
356

    
357
  def UpgradeConfig(self):
358
    """Fill defaults for missing configuration values.
359

360
    """
361
    if self.nicparams is None:
362
      self.nicparams = {}
363
      if self.bridge is not None:
364
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
365
        self.nicparams[constants.NIC_LINK] = self.bridge
366
    # bridge is no longer used it 2.1. The slot is left there to support
367
    # upgrading, but will be removed in 2.2
368
    if self.bridge is not None:
369
      self.bridge = None
370

    
371

    
372
class Disk(ConfigObject):
373
  """Config object representing a block device."""
374
  __slots__ = ["dev_type", "logical_id", "physical_id",
375
               "children", "iv_name", "size", "mode"]
376

    
377
  def CreateOnSecondary(self):
378
    """Test if this device needs to be created on a secondary node."""
379
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
380

    
381
  def AssembleOnSecondary(self):
382
    """Test if this device needs to be assembled on a secondary node."""
383
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
384

    
385
  def OpenOnSecondary(self):
386
    """Test if this device needs to be opened on a secondary node."""
387
    return self.dev_type in (constants.LD_LV,)
388

    
389
  def StaticDevPath(self):
390
    """Return the device path if this device type has a static one.
391

392
    Some devices (LVM for example) live always at the same /dev/ path,
393
    irrespective of their status. For such devices, we return this
394
    path, for others we return None.
395

396
    """
397
    if self.dev_type == constants.LD_LV:
398
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
399
    return None
400

    
401
  def ChildrenNeeded(self):
402
    """Compute the needed number of children for activation.
403

404
    This method will return either -1 (all children) or a positive
405
    number denoting the minimum number of children needed for
406
    activation (only mirrored devices will usually return >=0).
407

408
    Currently, only DRBD8 supports diskless activation (therefore we
409
    return 0), for all other we keep the previous semantics and return
410
    -1.
411

412
    """
413
    if self.dev_type == constants.LD_DRBD8:
414
      return 0
415
    return -1
416

    
417
  def GetNodes(self, node):
418
    """This function returns the nodes this device lives on.
419

420
    Given the node on which the parent of the device lives on (or, in
421
    case of a top-level device, the primary node of the devices'
422
    instance), this function will return a list of nodes on which this
423
    devices needs to (or can) be assembled.
424

425
    """
426
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
427
      result = [node]
428
    elif self.dev_type in constants.LDS_DRBD:
429
      result = [self.logical_id[0], self.logical_id[1]]
430
      if node not in result:
431
        raise errors.ConfigurationError("DRBD device passed unknown node")
432
    else:
433
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
434
    return result
435

    
436
  def ComputeNodeTree(self, parent_node):
437
    """Compute the node/disk tree for this disk and its children.
438

439
    This method, given the node on which the parent disk lives, will
440
    return the list of all (node, disk) pairs which describe the disk
441
    tree in the most compact way. For example, a drbd/lvm stack
442
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
443
    which represents all the top-level devices on the nodes.
444

445
    """
446
    my_nodes = self.GetNodes(parent_node)
447
    result = [(node, self) for node in my_nodes]
448
    if not self.children:
449
      # leaf device
450
      return result
451
    for node in my_nodes:
452
      for child in self.children:
453
        child_result = child.ComputeNodeTree(node)
454
        if len(child_result) == 1:
455
          # child (and all its descendants) is simple, doesn't split
456
          # over multiple hosts, so we don't need to describe it, our
457
          # own entry for this node describes it completely
458
          continue
459
        else:
460
          # check if child nodes differ from my nodes; note that
461
          # subdisk can differ from the child itself, and be instead
462
          # one of its descendants
463
          for subnode, subdisk in child_result:
464
            if subnode not in my_nodes:
465
              result.append((subnode, subdisk))
466
            # otherwise child is under our own node, so we ignore this
467
            # entry (but probably the other results in the list will
468
            # be different)
469
    return result
470

    
471
  def RecordGrow(self, amount):
472
    """Update the size of this disk after growth.
473

474
    This method recurses over the disks's children and updates their
475
    size correspondigly. The method needs to be kept in sync with the
476
    actual algorithms from bdev.
477

478
    """
479
    if self.dev_type == constants.LD_LV:
480
      self.size += amount
481
    elif self.dev_type == constants.LD_DRBD8:
482
      if self.children:
483
        self.children[0].RecordGrow(amount)
484
      self.size += amount
485
    else:
486
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
487
                                   " disk type %s" % self.dev_type)
488

    
489
  def UnsetSize(self):
490
    """Sets recursively the size to zero for the disk and its children.
491

492
    """
493
    if self.children:
494
      for child in self.children:
495
        child.UnsetSize()
496
    self.size = 0
497

    
498
  def SetPhysicalID(self, target_node, nodes_ip):
499
    """Convert the logical ID to the physical ID.
500

501
    This is used only for drbd, which needs ip/port configuration.
502

503
    The routine descends down and updates its children also, because
504
    this helps when the only the top device is passed to the remote
505
    node.
506

507
    Arguments:
508
      - target_node: the node we wish to configure for
509
      - nodes_ip: a mapping of node name to ip
510

511
    The target_node must exist in in nodes_ip, and must be one of the
512
    nodes in the logical ID for each of the DRBD devices encountered
513
    in the disk tree.
514

515
    """
516
    if self.children:
517
      for child in self.children:
518
        child.SetPhysicalID(target_node, nodes_ip)
519

    
520
    if self.logical_id is None and self.physical_id is not None:
521
      return
522
    if self.dev_type in constants.LDS_DRBD:
523
      pnode, snode, port, pminor, sminor, secret = self.logical_id
524
      if target_node not in (pnode, snode):
525
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
526
                                        target_node)
527
      pnode_ip = nodes_ip.get(pnode, None)
528
      snode_ip = nodes_ip.get(snode, None)
529
      if pnode_ip is None or snode_ip is None:
530
        raise errors.ConfigurationError("Can't find primary or secondary node"
531
                                        " for %s" % str(self))
532
      p_data = (pnode_ip, port)
533
      s_data = (snode_ip, port)
534
      if pnode == target_node:
535
        self.physical_id = p_data + s_data + (pminor, secret)
536
      else: # it must be secondary, we tested above
537
        self.physical_id = s_data + p_data + (sminor, secret)
538
    else:
539
      self.physical_id = self.logical_id
540
    return
541

    
542
  def ToDict(self):
543
    """Disk-specific conversion to standard python types.
544

545
    This replaces the children lists of objects with lists of
546
    standard python types.
547

548
    """
549
    bo = super(Disk, self).ToDict()
550

    
551
    for attr in ("children",):
552
      alist = bo.get(attr, None)
553
      if alist:
554
        bo[attr] = self._ContainerToDicts(alist)
555
    return bo
556

    
557
  @classmethod
558
  def FromDict(cls, val):
559
    """Custom function for Disks
560

561
    """
562
    obj = super(Disk, cls).FromDict(val)
563
    if obj.children:
564
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
565
    if obj.logical_id and isinstance(obj.logical_id, list):
566
      obj.logical_id = tuple(obj.logical_id)
567
    if obj.physical_id and isinstance(obj.physical_id, list):
568
      obj.physical_id = tuple(obj.physical_id)
569
    if obj.dev_type in constants.LDS_DRBD:
570
      # we need a tuple of length six here
571
      if len(obj.logical_id) < 6:
572
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
573
    return obj
574

    
575
  def __str__(self):
576
    """Custom str() formatter for disks.
577

578
    """
579
    if self.dev_type == constants.LD_LV:
580
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
581
    elif self.dev_type in constants.LDS_DRBD:
582
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
583
      val = "<DRBD8("
584
      if self.physical_id is None:
585
        phy = "unconfigured"
586
      else:
587
        phy = ("configured as %s:%s %s:%s" %
588
               (self.physical_id[0], self.physical_id[1],
589
                self.physical_id[2], self.physical_id[3]))
590

    
591
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
592
              (node_a, minor_a, node_b, minor_b, port, phy))
593
      if self.children and self.children.count(None) == 0:
594
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
595
      else:
596
        val += "no local storage"
597
    else:
598
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
599
             (self.dev_type, self.logical_id, self.physical_id, self.children))
600
    if self.iv_name is None:
601
      val += ", not visible"
602
    else:
603
      val += ", visible as /dev/%s" % self.iv_name
604
    if isinstance(self.size, int):
605
      val += ", size=%dm)>" % self.size
606
    else:
607
      val += ", size='%s')>" % (self.size,)
608
    return val
609

    
610
  def Verify(self):
611
    """Checks that this disk is correctly configured.
612

613
    """
614
    all_errors = []
615
    if self.mode not in constants.DISK_ACCESS_SET:
616
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
617
    return all_errors
618

    
619
  def UpgradeConfig(self):
620
    """Fill defaults for missing configuration values.
621

622
    """
623
    if self.children:
624
      for child in self.children:
625
        child.UpgradeConfig()
626
    # add here config upgrade for this disk
627

    
628

    
629
class Instance(TaggableObject):
630
  """Config object representing an instance."""
631
  __slots__ = TaggableObject.__slots__ + [
632
    "name",
633
    "primary_node",
634
    "os",
635
    "hypervisor",
636
    "hvparams",
637
    "beparams",
638
    "admin_up",
639
    "nics",
640
    "disks",
641
    "disk_template",
642
    "network_port",
643
    "serial_no",
644
    ] + _TIMESTAMPS + _UUID
645

    
646
  def _ComputeSecondaryNodes(self):
647
    """Compute the list of secondary nodes.
648

649
    This is a simple wrapper over _ComputeAllNodes.
650

651
    """
652
    all_nodes = set(self._ComputeAllNodes())
653
    all_nodes.discard(self.primary_node)
654
    return tuple(all_nodes)
655

    
656
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
657
                             "List of secondary nodes")
658

    
659
  def _ComputeAllNodes(self):
660
    """Compute the list of all nodes.
661

662
    Since the data is already there (in the drbd disks), keeping it as
663
    a separate normal attribute is redundant and if not properly
664
    synchronised can cause problems. Thus it's better to compute it
665
    dynamically.
666

667
    """
668
    def _Helper(nodes, device):
669
      """Recursively computes nodes given a top device."""
670
      if device.dev_type in constants.LDS_DRBD:
671
        nodea, nodeb = device.logical_id[:2]
672
        nodes.add(nodea)
673
        nodes.add(nodeb)
674
      if device.children:
675
        for child in device.children:
676
          _Helper(nodes, child)
677

    
678
    all_nodes = set()
679
    all_nodes.add(self.primary_node)
680
    for device in self.disks:
681
      _Helper(all_nodes, device)
682
    return tuple(all_nodes)
683

    
684
  all_nodes = property(_ComputeAllNodes, None, None,
685
                       "List of all nodes of the instance")
686

    
687
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
688
    """Provide a mapping of nodes to LVs this instance owns.
689

690
    This function figures out what logical volumes should belong on
691
    which nodes, recursing through a device tree.
692

693
    @param lvmap: optional dictionary to receive the
694
        'node' : ['lv', ...] data.
695

696
    @return: None if lvmap arg is given, otherwise, a dictionary
697
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
698

699
    """
700
    if node == None:
701
      node = self.primary_node
702

    
703
    if lvmap is None:
704
      lvmap = { node : [] }
705
      ret = lvmap
706
    else:
707
      if not node in lvmap:
708
        lvmap[node] = []
709
      ret = None
710

    
711
    if not devs:
712
      devs = self.disks
713

    
714
    for dev in devs:
715
      if dev.dev_type == constants.LD_LV:
716
        lvmap[node].append(dev.logical_id[1])
717

    
718
      elif dev.dev_type in constants.LDS_DRBD:
719
        if dev.children:
720
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
721
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
722

    
723
      elif dev.children:
724
        self.MapLVsByNode(lvmap, dev.children, node)
725

    
726
    return ret
727

    
728
  def FindDisk(self, idx):
729
    """Find a disk given having a specified index.
730

731
    This is just a wrapper that does validation of the index.
732

733
    @type idx: int
734
    @param idx: the disk index
735
    @rtype: L{Disk}
736
    @return: the corresponding disk
737
    @raise errors.OpPrereqError: when the given index is not valid
738

739
    """
740
    try:
741
      idx = int(idx)
742
      return self.disks[idx]
743
    except ValueError, err:
744
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
745
                                 errors.ECODE_INVAL)
746
    except IndexError:
747
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
748
                                 " 0 to %d" % (idx, len(self.disks)),
749
                                 errors.ECODE_INVAL)
750

    
751
  def ToDict(self):
752
    """Instance-specific conversion to standard python types.
753

754
    This replaces the children lists of objects with lists of standard
755
    python types.
756

757
    """
758
    bo = super(Instance, self).ToDict()
759

    
760
    for attr in "nics", "disks":
761
      alist = bo.get(attr, None)
762
      if alist:
763
        nlist = self._ContainerToDicts(alist)
764
      else:
765
        nlist = []
766
      bo[attr] = nlist
767
    return bo
768

    
769
  @classmethod
770
  def FromDict(cls, val):
771
    """Custom function for instances.
772

773
    """
774
    obj = super(Instance, cls).FromDict(val)
775
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
776
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
777
    return obj
778

    
779
  def UpgradeConfig(self):
780
    """Fill defaults for missing configuration values.
781

782
    """
783
    for nic in self.nics:
784
      nic.UpgradeConfig()
785
    for disk in self.disks:
786
      disk.UpgradeConfig()
787
    if self.hvparams:
788
      for key in constants.HVC_GLOBALS:
789
        try:
790
          del self.hvparams[key]
791
        except KeyError:
792
          pass
793

    
794

    
795
class OS(ConfigObject):
796
  """Config object representing an operating system."""
797
  __slots__ = [
798
    "name",
799
    "path",
800
    "api_versions",
801
    "create_script",
802
    "export_script",
803
    "import_script",
804
    "rename_script",
805
    "supported_variants",
806
    ]
807

    
808

    
809
class Node(TaggableObject):
810
  """Config object representing a node."""
811
  __slots__ = TaggableObject.__slots__ + [
812
    "name",
813
    "primary_ip",
814
    "secondary_ip",
815
    "serial_no",
816
    "master_candidate",
817
    "offline",
818
    "drained",
819
    ] + _TIMESTAMPS + _UUID
820

    
821

    
822
class Cluster(TaggableObject):
823
  """Config object representing the cluster."""
824
  __slots__ = TaggableObject.__slots__ + [
825
    "serial_no",
826
    "rsahostkeypub",
827
    "highest_used_port",
828
    "tcpudp_port_pool",
829
    "mac_prefix",
830
    "volume_group_name",
831
    "default_bridge",
832
    "default_hypervisor",
833
    "master_node",
834
    "master_ip",
835
    "master_netdev",
836
    "cluster_name",
837
    "file_storage_dir",
838
    "enabled_hypervisors",
839
    "hvparams",
840
    "beparams",
841
    "nicparams",
842
    "candidate_pool_size",
843
    "modify_etc_hosts",
844
    "modify_ssh_setup",
845
    ] + _TIMESTAMPS + _UUID
846

    
847
  def UpgradeConfig(self):
848
    """Fill defaults for missing configuration values.
849

850
    """
851
    if self.hvparams is None:
852
      self.hvparams = constants.HVC_DEFAULTS
853
    else:
854
      for hypervisor in self.hvparams:
855
        self.hvparams[hypervisor] = FillDict(
856
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
857

    
858
    self.beparams = UpgradeGroupedParams(self.beparams,
859
                                         constants.BEC_DEFAULTS)
860
    migrate_default_bridge = not self.nicparams
861
    self.nicparams = UpgradeGroupedParams(self.nicparams,
862
                                          constants.NICC_DEFAULTS)
863
    if migrate_default_bridge:
864
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
865
        self.default_bridge
866

    
867
    if self.modify_etc_hosts is None:
868
      self.modify_etc_hosts = True
869

    
870
    if self.modify_ssh_setup is None:
871
      self.modify_ssh_setup = True
872

    
873
    # default_bridge is no longer used it 2.1. The slot is left there to
874
    # support auto-upgrading, but will be removed in 2.2
875
    if self.default_bridge is not None:
876
      self.default_bridge = None
877

    
878
    # default_hypervisor is just the first enabled one in 2.1
879
    if self.default_hypervisor is not None:
880
      self.enabled_hypervisors = ([self.default_hypervisor] +
881
        [hvname for hvname in self.enabled_hypervisors
882
         if hvname != self.default_hypervisor])
883
      self.default_hypervisor = None
884

    
885
  def ToDict(self):
886
    """Custom function for cluster.
887

888
    """
889
    mydict = super(Cluster, self).ToDict()
890
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
891
    return mydict
892

    
893
  @classmethod
894
  def FromDict(cls, val):
895
    """Custom function for cluster.
896

897
    """
898
    obj = super(Cluster, cls).FromDict(val)
899
    if not isinstance(obj.tcpudp_port_pool, set):
900
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
901
    return obj
902

    
903
  def FillHV(self, instance, skip_globals=False):
904
    """Fill an instance's hvparams dict.
905

906
    @type instance: L{objects.Instance}
907
    @param instance: the instance parameter to fill
908
    @type skip_globals: boolean
909
    @param skip_globals: if True, the global hypervisor parameters will
910
        not be filled
911
    @rtype: dict
912
    @return: a copy of the instance's hvparams with missing keys filled from
913
        the cluster defaults
914

915
    """
916
    if skip_globals:
917
      skip_keys = constants.HVC_GLOBALS
918
    else:
919
      skip_keys = []
920
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
921
                    instance.hvparams, skip_keys=skip_keys)
922

    
923
  def FillBE(self, instance):
924
    """Fill an instance's beparams dict.
925

926
    @type instance: L{objects.Instance}
927
    @param instance: the instance parameter to fill
928
    @rtype: dict
929
    @return: a copy of the instance's beparams with missing keys filled from
930
        the cluster defaults
931

932
    """
933
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
934
                    instance.beparams)
935

    
936

    
937
class BlockDevStatus(ConfigObject):
938
  """Config object representing the status of a block device."""
939
  __slots__ = [
940
    "dev_path",
941
    "major",
942
    "minor",
943
    "sync_percent",
944
    "estimated_time",
945
    "is_degraded",
946
    "ldisk_status",
947
    ]
948

    
949

    
950
class ConfdRequest(ConfigObject):
951
  """Object holding a confd request.
952

953
  @ivar protocol: confd protocol version
954
  @ivar type: confd query type
955
  @ivar query: query request
956
  @ivar rsalt: requested reply salt
957

958
  """
959
  __slots__ = [
960
    "protocol",
961
    "type",
962
    "query",
963
    "rsalt",
964
    ]
965

    
966

    
967
class ConfdReply(ConfigObject):
968
  """Object holding a confd reply.
969

970
  @ivar protocol: confd protocol version
971
  @ivar status: reply status code (ok, error)
972
  @ivar answer: confd query reply
973
  @ivar serial: configuration serial number
974

975
  """
976
  __slots__ = [
977
    "protocol",
978
    "status",
979
    "answer",
980
    "serial",
981
    ]
982

    
983

    
984
class SerializableConfigParser(ConfigParser.SafeConfigParser):
985
  """Simple wrapper over ConfigParse that allows serialization.
986

987
  This class is basically ConfigParser.SafeConfigParser with two
988
  additional methods that allow it to serialize/unserialize to/from a
989
  buffer.
990

991
  """
992
  def Dumps(self):
993
    """Dump this instance and return the string representation."""
994
    buf = StringIO()
995
    self.write(buf)
996
    return buf.getvalue()
997

    
998
  @staticmethod
999
  def Loads(data):
1000
    """Load data from a string."""
1001
    buf = StringIO(data)
1002
    cfp = SerializableConfigParser()
1003
    cfp.readfp(buf)
1004
    return cfp