Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ a3db74e4

History | View | Annotate | Download (28.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
_TIMESTAMPS = ["ctime", "mtime"]
43

    
44
def FillDict(defaults_dict, custom_dict):
45
  """Basic function to apply settings on top a default dict.
46

47
  @type defaults_dict: dict
48
  @param defaults_dict: dictionary holding the default values
49
  @type custom_dict: dict
50
  @param custom_dict: dictionary holding customized value
51
  @rtype: dict
52
  @return: dict with the 'full' values
53

54
  """
55
  ret_dict = copy.deepcopy(defaults_dict)
56
  ret_dict.update(custom_dict)
57
  return ret_dict
58

    
59

    
60
def UpgradeGroupedParams(target, defaults):
61
  """Update all groups for the target parameter.
62

63
  @type target: dict of dicts
64
  @param target: {group: {parameter: value}}
65
  @type defaults: dict
66
  @param defaults: default parameter values
67

68
  """
69
  if target is None:
70
    target = {constants.PP_DEFAULT: defaults}
71
  else:
72
    for group in target:
73
      target[group] = FillDict(defaults, target[group])
74
  return target
75

    
76

    
77
class ConfigObject(object):
78
  """A generic config object.
79

80
  It has the following properties:
81

82
    - provides somewhat safe recursive unpickling and pickling for its classes
83
    - unset attributes which are defined in slots are always returned
84
      as None instead of raising an error
85

86
  Classes derived from this must always declare __slots__ (we use many
87
  config objects and the memory reduction is useful)
88

89
  """
90
  __slots__ = []
91

    
92
  def __init__(self, **kwargs):
93
    for k, v in kwargs.iteritems():
94
      setattr(self, k, v)
95

    
96
  def __getattr__(self, name):
97
    if name not in self.__slots__:
98
      raise AttributeError("Invalid object attribute %s.%s" %
99
                           (type(self).__name__, name))
100
    return None
101

    
102
  def __setstate__(self, state):
103
    for name in state:
104
      if name in self.__slots__:
105
        setattr(self, name, state[name])
106

    
107
  def ToDict(self):
108
    """Convert to a dict holding only standard python types.
109

110
    The generic routine just dumps all of this object's attributes in
111
    a dict. It does not work if the class has children who are
112
    ConfigObjects themselves (e.g. the nics list in an Instance), in
113
    which case the object should subclass the function in order to
114
    make sure all objects returned are only standard python types.
115

116
    """
117
    result = {}
118
    for name in self.__slots__:
119
      value = getattr(self, name, None)
120
      if value is not None:
121
        result[name] = value
122
    return result
123

    
124
  __getstate__ = ToDict
125

    
126
  @classmethod
127
  def FromDict(cls, val):
128
    """Create an object from a dictionary.
129

130
    This generic routine takes a dict, instantiates a new instance of
131
    the given class, and sets attributes based on the dict content.
132

133
    As for `ToDict`, this does not work if the class has children
134
    who are ConfigObjects themselves (e.g. the nics list in an
135
    Instance), in which case the object should subclass the function
136
    and alter the objects.
137

138
    """
139
    if not isinstance(val, dict):
140
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
141
                                      " expected dict, got %s" % type(val))
142
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
143
    obj = cls(**val_str)
144
    return obj
145

    
146
  @staticmethod
147
  def _ContainerToDicts(container):
148
    """Convert the elements of a container to standard python types.
149

150
    This method converts a container with elements derived from
151
    ConfigData to standard python types. If the container is a dict,
152
    we don't touch the keys, only the values.
153

154
    """
155
    if isinstance(container, dict):
156
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
157
    elif isinstance(container, (list, tuple, set, frozenset)):
158
      ret = [elem.ToDict() for elem in container]
159
    else:
160
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
161
                      type(container))
162
    return ret
163

    
164
  @staticmethod
165
  def _ContainerFromDicts(source, c_type, e_type):
166
    """Convert a container from standard python types.
167

168
    This method converts a container with standard python types to
169
    ConfigData objects. If the container is a dict, we don't touch the
170
    keys, only the values.
171

172
    """
173
    if not isinstance(c_type, type):
174
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
175
                      " not a type" % type(c_type))
176
    if c_type is dict:
177
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
178
    elif c_type in (list, tuple, set, frozenset):
179
      ret = c_type([e_type.FromDict(elem) for elem in source])
180
    else:
181
      raise TypeError("Invalid container type %s passed to"
182
                      " _ContainerFromDicts" % c_type)
183
    return ret
184

    
185
  def Copy(self):
186
    """Makes a deep copy of the current object and its children.
187

188
    """
189
    dict_form = self.ToDict()
190
    clone_obj = self.__class__.FromDict(dict_form)
191
    return clone_obj
192

    
193
  def __repr__(self):
194
    """Implement __repr__ for ConfigObjects."""
195
    return repr(self.ToDict())
196

    
197
  def UpgradeConfig(self):
198
    """Fill defaults for missing configuration values.
199

200
    This method will be called at configuration load time, and its
201
    implementation will be object dependent.
202

203
    """
204
    pass
205

    
206

    
207
class TaggableObject(ConfigObject):
208
  """An generic class supporting tags.
209

210
  """
211
  __slots__ = ConfigObject.__slots__ + ["tags"]
212

    
213
  @staticmethod
214
  def ValidateTag(tag):
215
    """Check if a tag is valid.
216

217
    If the tag is invalid, an errors.TagError will be raised. The
218
    function has no return value.
219

220
    """
221
    if not isinstance(tag, basestring):
222
      raise errors.TagError("Invalid tag type (not a string)")
223
    if len(tag) > constants.MAX_TAG_LEN:
224
      raise errors.TagError("Tag too long (>%d characters)" %
225
                            constants.MAX_TAG_LEN)
226
    if not tag:
227
      raise errors.TagError("Tags cannot be empty")
228
    if not re.match("^[\w.+*/:-]+$", tag):
229
      raise errors.TagError("Tag contains invalid characters")
230

    
231
  def GetTags(self):
232
    """Return the tags list.
233

234
    """
235
    tags = getattr(self, "tags", None)
236
    if tags is None:
237
      tags = self.tags = set()
238
    return tags
239

    
240
  def AddTag(self, tag):
241
    """Add a new tag.
242

243
    """
244
    self.ValidateTag(tag)
245
    tags = self.GetTags()
246
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
247
      raise errors.TagError("Too many tags")
248
    self.GetTags().add(tag)
249

    
250
  def RemoveTag(self, tag):
251
    """Remove a tag.
252

253
    """
254
    self.ValidateTag(tag)
255
    tags = self.GetTags()
256
    try:
257
      tags.remove(tag)
258
    except KeyError:
259
      raise errors.TagError("Tag not found")
260

    
261
  def ToDict(self):
262
    """Taggable-object-specific conversion to standard python types.
263

264
    This replaces the tags set with a list.
265

266
    """
267
    bo = super(TaggableObject, self).ToDict()
268

    
269
    tags = bo.get("tags", None)
270
    if isinstance(tags, set):
271
      bo["tags"] = list(tags)
272
    return bo
273

    
274
  @classmethod
275
  def FromDict(cls, val):
276
    """Custom function for instances.
277

278
    """
279
    obj = super(TaggableObject, cls).FromDict(val)
280
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
281
      obj.tags = set(obj.tags)
282
    return obj
283

    
284

    
285
class ConfigData(ConfigObject):
286
  """Top-level config object."""
287
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
288
               _TIMESTAMPS)
289

    
290
  def ToDict(self):
291
    """Custom function for top-level config data.
292

293
    This just replaces the list of instances, nodes and the cluster
294
    with standard python types.
295

296
    """
297
    mydict = super(ConfigData, self).ToDict()
298
    mydict["cluster"] = mydict["cluster"].ToDict()
299
    for key in "nodes", "instances":
300
      mydict[key] = self._ContainerToDicts(mydict[key])
301

    
302
    return mydict
303

    
304
  @classmethod
305
  def FromDict(cls, val):
306
    """Custom function for top-level config data
307

308
    """
309
    obj = super(ConfigData, cls).FromDict(val)
310
    obj.cluster = Cluster.FromDict(obj.cluster)
311
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
312
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
313
    return obj
314

    
315
  def UpgradeConfig(self):
316
    """Fill defaults for missing configuration values.
317

318
    """
319
    self.cluster.UpgradeConfig()
320
    for node in self.nodes.values():
321
      node.UpgradeConfig()
322
    for instance in self.instances.values():
323
      instance.UpgradeConfig()
324

    
325

    
326
class NIC(ConfigObject):
327
  """Config object representing a network card."""
328
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
329

    
330
  @classmethod
331
  def CheckParameterSyntax(cls, nicparams):
332
    """Check the given parameters for validity.
333

334
    @type nicparams:  dict
335
    @param nicparams: dictionary with parameter names/value
336
    @raise errors.ConfigurationError: when a parameter is not valid
337

338
    """
339
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
340
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
341
      raise errors.ConfigurationError(err)
342

    
343
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
344
        not nicparams[constants.NIC_LINK]):
345
      err = "Missing bridged nic link"
346
      raise errors.ConfigurationError(err)
347

    
348
  def UpgradeConfig(self):
349
    """Fill defaults for missing configuration values.
350

351
    """
352
    if self.nicparams is None:
353
      self.nicparams = {}
354
      if self.bridge is not None:
355
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
356
        self.nicparams[constants.NIC_LINK] = self.bridge
357
    # bridge is no longer used it 2.1. The slot is left there to support
358
    # upgrading, but will be removed in 2.2
359
    if self.bridge is not None:
360
      self.bridge = None
361

    
362

    
363
class Disk(ConfigObject):
364
  """Config object representing a block device."""
365
  __slots__ = ["dev_type", "logical_id", "physical_id",
366
               "children", "iv_name", "size", "mode"]
367

    
368
  def CreateOnSecondary(self):
369
    """Test if this device needs to be created on a secondary node."""
370
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
371

    
372
  def AssembleOnSecondary(self):
373
    """Test if this device needs to be assembled on a secondary node."""
374
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
375

    
376
  def OpenOnSecondary(self):
377
    """Test if this device needs to be opened on a secondary node."""
378
    return self.dev_type in (constants.LD_LV,)
379

    
380
  def StaticDevPath(self):
381
    """Return the device path if this device type has a static one.
382

383
    Some devices (LVM for example) live always at the same /dev/ path,
384
    irrespective of their status. For such devices, we return this
385
    path, for others we return None.
386

387
    """
388
    if self.dev_type == constants.LD_LV:
389
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
390
    return None
391

    
392
  def ChildrenNeeded(self):
393
    """Compute the needed number of children for activation.
394

395
    This method will return either -1 (all children) or a positive
396
    number denoting the minimum number of children needed for
397
    activation (only mirrored devices will usually return >=0).
398

399
    Currently, only DRBD8 supports diskless activation (therefore we
400
    return 0), for all other we keep the previous semantics and return
401
    -1.
402

403
    """
404
    if self.dev_type == constants.LD_DRBD8:
405
      return 0
406
    return -1
407

    
408
  def GetNodes(self, node):
409
    """This function returns the nodes this device lives on.
410

411
    Given the node on which the parent of the device lives on (or, in
412
    case of a top-level device, the primary node of the devices'
413
    instance), this function will return a list of nodes on which this
414
    devices needs to (or can) be assembled.
415

416
    """
417
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
418
      result = [node]
419
    elif self.dev_type in constants.LDS_DRBD:
420
      result = [self.logical_id[0], self.logical_id[1]]
421
      if node not in result:
422
        raise errors.ConfigurationError("DRBD device passed unknown node")
423
    else:
424
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
425
    return result
426

    
427
  def ComputeNodeTree(self, parent_node):
428
    """Compute the node/disk tree for this disk and its children.
429

430
    This method, given the node on which the parent disk lives, will
431
    return the list of all (node, disk) pairs which describe the disk
432
    tree in the most compact way. For example, a drbd/lvm stack
433
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
434
    which represents all the top-level devices on the nodes.
435

436
    """
437
    my_nodes = self.GetNodes(parent_node)
438
    result = [(node, self) for node in my_nodes]
439
    if not self.children:
440
      # leaf device
441
      return result
442
    for node in my_nodes:
443
      for child in self.children:
444
        child_result = child.ComputeNodeTree(node)
445
        if len(child_result) == 1:
446
          # child (and all its descendants) is simple, doesn't split
447
          # over multiple hosts, so we don't need to describe it, our
448
          # own entry for this node describes it completely
449
          continue
450
        else:
451
          # check if child nodes differ from my nodes; note that
452
          # subdisk can differ from the child itself, and be instead
453
          # one of its descendants
454
          for subnode, subdisk in child_result:
455
            if subnode not in my_nodes:
456
              result.append((subnode, subdisk))
457
            # otherwise child is under our own node, so we ignore this
458
            # entry (but probably the other results in the list will
459
            # be different)
460
    return result
461

    
462
  def RecordGrow(self, amount):
463
    """Update the size of this disk after growth.
464

465
    This method recurses over the disks's children and updates their
466
    size correspondigly. The method needs to be kept in sync with the
467
    actual algorithms from bdev.
468

469
    """
470
    if self.dev_type == constants.LD_LV:
471
      self.size += amount
472
    elif self.dev_type == constants.LD_DRBD8:
473
      if self.children:
474
        self.children[0].RecordGrow(amount)
475
      self.size += amount
476
    else:
477
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
478
                                   " disk type %s" % self.dev_type)
479

    
480
  def UnsetSize(self):
481
    """Sets recursively the size to zero for the disk and its children.
482

483
    """
484
    if self.children:
485
      for child in self.children:
486
        child.UnsetSize()
487
    self.size = 0
488

    
489
  def SetPhysicalID(self, target_node, nodes_ip):
490
    """Convert the logical ID to the physical ID.
491

492
    This is used only for drbd, which needs ip/port configuration.
493

494
    The routine descends down and updates its children also, because
495
    this helps when the only the top device is passed to the remote
496
    node.
497

498
    Arguments:
499
      - target_node: the node we wish to configure for
500
      - nodes_ip: a mapping of node name to ip
501

502
    The target_node must exist in in nodes_ip, and must be one of the
503
    nodes in the logical ID for each of the DRBD devices encountered
504
    in the disk tree.
505

506
    """
507
    if self.children:
508
      for child in self.children:
509
        child.SetPhysicalID(target_node, nodes_ip)
510

    
511
    if self.logical_id is None and self.physical_id is not None:
512
      return
513
    if self.dev_type in constants.LDS_DRBD:
514
      pnode, snode, port, pminor, sminor, secret = self.logical_id
515
      if target_node not in (pnode, snode):
516
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
517
                                        target_node)
518
      pnode_ip = nodes_ip.get(pnode, None)
519
      snode_ip = nodes_ip.get(snode, None)
520
      if pnode_ip is None or snode_ip is None:
521
        raise errors.ConfigurationError("Can't find primary or secondary node"
522
                                        " for %s" % str(self))
523
      p_data = (pnode_ip, port)
524
      s_data = (snode_ip, port)
525
      if pnode == target_node:
526
        self.physical_id = p_data + s_data + (pminor, secret)
527
      else: # it must be secondary, we tested above
528
        self.physical_id = s_data + p_data + (sminor, secret)
529
    else:
530
      self.physical_id = self.logical_id
531
    return
532

    
533
  def ToDict(self):
534
    """Disk-specific conversion to standard python types.
535

536
    This replaces the children lists of objects with lists of
537
    standard python types.
538

539
    """
540
    bo = super(Disk, self).ToDict()
541

    
542
    for attr in ("children",):
543
      alist = bo.get(attr, None)
544
      if alist:
545
        bo[attr] = self._ContainerToDicts(alist)
546
    return bo
547

    
548
  @classmethod
549
  def FromDict(cls, val):
550
    """Custom function for Disks
551

552
    """
553
    obj = super(Disk, cls).FromDict(val)
554
    if obj.children:
555
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
556
    if obj.logical_id and isinstance(obj.logical_id, list):
557
      obj.logical_id = tuple(obj.logical_id)
558
    if obj.physical_id and isinstance(obj.physical_id, list):
559
      obj.physical_id = tuple(obj.physical_id)
560
    if obj.dev_type in constants.LDS_DRBD:
561
      # we need a tuple of length six here
562
      if len(obj.logical_id) < 6:
563
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
564
    return obj
565

    
566
  def __str__(self):
567
    """Custom str() formatter for disks.
568

569
    """
570
    if self.dev_type == constants.LD_LV:
571
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
572
    elif self.dev_type in constants.LDS_DRBD:
573
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
574
      val = "<DRBD8("
575
      if self.physical_id is None:
576
        phy = "unconfigured"
577
      else:
578
        phy = ("configured as %s:%s %s:%s" %
579
               (self.physical_id[0], self.physical_id[1],
580
                self.physical_id[2], self.physical_id[3]))
581

    
582
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
583
              (node_a, minor_a, node_b, minor_b, port, phy))
584
      if self.children and self.children.count(None) == 0:
585
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
586
      else:
587
        val += "no local storage"
588
    else:
589
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
590
             (self.dev_type, self.logical_id, self.physical_id, self.children))
591
    if self.iv_name is None:
592
      val += ", not visible"
593
    else:
594
      val += ", visible as /dev/%s" % self.iv_name
595
    if isinstance(self.size, int):
596
      val += ", size=%dm)>" % self.size
597
    else:
598
      val += ", size='%s')>" % (self.size,)
599
    return val
600

    
601
  def Verify(self):
602
    """Checks that this disk is correctly configured.
603

604
    """
605
    all_errors = []
606
    if self.mode not in constants.DISK_ACCESS_SET:
607
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
608
    return all_errors
609

    
610
  def UpgradeConfig(self):
611
    """Fill defaults for missing configuration values.
612

613
    """
614
    if self.children:
615
      for child in self.children:
616
        child.UpgradeConfig()
617
    # add here config upgrade for this disk
618

    
619

    
620
class Instance(TaggableObject):
621
  """Config object representing an instance."""
622
  __slots__ = TaggableObject.__slots__ + [
623
    "name",
624
    "primary_node",
625
    "os",
626
    "hypervisor",
627
    "hvparams",
628
    "beparams",
629
    "admin_up",
630
    "nics",
631
    "disks",
632
    "disk_template",
633
    "network_port",
634
    "serial_no",
635
    ] + _TIMESTAMPS
636

    
637
  def _ComputeSecondaryNodes(self):
638
    """Compute the list of secondary nodes.
639

640
    This is a simple wrapper over _ComputeAllNodes.
641

642
    """
643
    all_nodes = set(self._ComputeAllNodes())
644
    all_nodes.discard(self.primary_node)
645
    return tuple(all_nodes)
646

    
647
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
648
                             "List of secondary nodes")
649

    
650
  def _ComputeAllNodes(self):
651
    """Compute the list of all nodes.
652

653
    Since the data is already there (in the drbd disks), keeping it as
654
    a separate normal attribute is redundant and if not properly
655
    synchronised can cause problems. Thus it's better to compute it
656
    dynamically.
657

658
    """
659
    def _Helper(nodes, device):
660
      """Recursively computes nodes given a top device."""
661
      if device.dev_type in constants.LDS_DRBD:
662
        nodea, nodeb = device.logical_id[:2]
663
        nodes.add(nodea)
664
        nodes.add(nodeb)
665
      if device.children:
666
        for child in device.children:
667
          _Helper(nodes, child)
668

    
669
    all_nodes = set()
670
    all_nodes.add(self.primary_node)
671
    for device in self.disks:
672
      _Helper(all_nodes, device)
673
    return tuple(all_nodes)
674

    
675
  all_nodes = property(_ComputeAllNodes, None, None,
676
                       "List of all nodes of the instance")
677

    
678
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
679
    """Provide a mapping of nodes to LVs this instance owns.
680

681
    This function figures out what logical volumes should belong on
682
    which nodes, recursing through a device tree.
683

684
    @param lvmap: optional dictionary to receive the
685
        'node' : ['lv', ...] data.
686

687
    @return: None if lvmap arg is given, otherwise, a dictionary
688
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
689

690
    """
691
    if node == None:
692
      node = self.primary_node
693

    
694
    if lvmap is None:
695
      lvmap = { node : [] }
696
      ret = lvmap
697
    else:
698
      if not node in lvmap:
699
        lvmap[node] = []
700
      ret = None
701

    
702
    if not devs:
703
      devs = self.disks
704

    
705
    for dev in devs:
706
      if dev.dev_type == constants.LD_LV:
707
        lvmap[node].append(dev.logical_id[1])
708

    
709
      elif dev.dev_type in constants.LDS_DRBD:
710
        if dev.children:
711
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
712
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
713

    
714
      elif dev.children:
715
        self.MapLVsByNode(lvmap, dev.children, node)
716

    
717
    return ret
718

    
719
  def FindDisk(self, idx):
720
    """Find a disk given having a specified index.
721

722
    This is just a wrapper that does validation of the index.
723

724
    @type idx: int
725
    @param idx: the disk index
726
    @rtype: L{Disk}
727
    @return: the corresponding disk
728
    @raise errors.OpPrereqError: when the given index is not valid
729

730
    """
731
    try:
732
      idx = int(idx)
733
      return self.disks[idx]
734
    except ValueError, err:
735
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
736
    except IndexError:
737
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
738
                                 " 0 to %d" % (idx, len(self.disks)))
739

    
740
  def ToDict(self):
741
    """Instance-specific conversion to standard python types.
742

743
    This replaces the children lists of objects with lists of standard
744
    python types.
745

746
    """
747
    bo = super(Instance, self).ToDict()
748

    
749
    for attr in "nics", "disks":
750
      alist = bo.get(attr, None)
751
      if alist:
752
        nlist = self._ContainerToDicts(alist)
753
      else:
754
        nlist = []
755
      bo[attr] = nlist
756
    return bo
757

    
758
  @classmethod
759
  def FromDict(cls, val):
760
    """Custom function for instances.
761

762
    """
763
    obj = super(Instance, cls).FromDict(val)
764
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
765
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
766
    return obj
767

    
768
  def UpgradeConfig(self):
769
    """Fill defaults for missing configuration values.
770

771
    """
772
    for nic in self.nics:
773
      nic.UpgradeConfig()
774
    for disk in self.disks:
775
      disk.UpgradeConfig()
776

    
777

    
778
class OS(ConfigObject):
779
  """Config object representing an operating system."""
780
  __slots__ = [
781
    "name",
782
    "path",
783
    "api_versions",
784
    "create_script",
785
    "export_script",
786
    "import_script",
787
    "rename_script",
788
    ]
789

    
790

    
791
class Node(TaggableObject):
792
  """Config object representing a node."""
793
  __slots__ = TaggableObject.__slots__ + [
794
    "name",
795
    "primary_ip",
796
    "secondary_ip",
797
    "serial_no",
798
    "master_candidate",
799
    "offline",
800
    "drained",
801
    ] + _TIMESTAMPS
802

    
803

    
804
class Cluster(TaggableObject):
805
  """Config object representing the cluster."""
806
  __slots__ = TaggableObject.__slots__ + [
807
    "serial_no",
808
    "rsahostkeypub",
809
    "highest_used_port",
810
    "tcpudp_port_pool",
811
    "mac_prefix",
812
    "volume_group_name",
813
    "default_bridge",
814
    "default_hypervisor",
815
    "master_node",
816
    "master_ip",
817
    "master_netdev",
818
    "cluster_name",
819
    "file_storage_dir",
820
    "enabled_hypervisors",
821
    "hvparams",
822
    "beparams",
823
    "nicparams",
824
    "candidate_pool_size",
825
    "modify_etc_hosts",
826
    ] + _TIMESTAMPS
827

    
828
  def UpgradeConfig(self):
829
    """Fill defaults for missing configuration values.
830

831
    """
832
    if self.hvparams is None:
833
      self.hvparams = constants.HVC_DEFAULTS
834
    else:
835
      for hypervisor in self.hvparams:
836
        self.hvparams[hypervisor] = FillDict(
837
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
838

    
839
    self.beparams = UpgradeGroupedParams(self.beparams,
840
                                         constants.BEC_DEFAULTS)
841
    migrate_default_bridge = not self.nicparams
842
    self.nicparams = UpgradeGroupedParams(self.nicparams,
843
                                          constants.NICC_DEFAULTS)
844
    if migrate_default_bridge:
845
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
846
        self.default_bridge
847

    
848
    if self.modify_etc_hosts is None:
849
      self.modify_etc_hosts = True
850

    
851
    # default_bridge is no longer used it 2.1. The slot is left there to
852
    # support auto-upgrading, but will be removed in 2.2
853
    if self.default_bridge is not None:
854
      self.default_bridge = None
855

    
856
    # default_hypervisor is just the first enabled one in 2.1
857
    if self.default_hypervisor is not None:
858
      self.enabled_hypervisors = ([self.default_hypervisor] +
859
        [hvname for hvname in self.enabled_hypervisors
860
         if hvname != self.default_hypervisor])
861
      self.default_hypervisor = None
862

    
863
  def ToDict(self):
864
    """Custom function for cluster.
865

866
    """
867
    mydict = super(Cluster, self).ToDict()
868
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
869
    return mydict
870

    
871
  @classmethod
872
  def FromDict(cls, val):
873
    """Custom function for cluster.
874

875
    """
876
    obj = super(Cluster, cls).FromDict(val)
877
    if not isinstance(obj.tcpudp_port_pool, set):
878
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
879
    return obj
880

    
881
  def FillHV(self, instance):
882
    """Fill an instance's hvparams dict.
883

884
    @type instance: L{objects.Instance}
885
    @param instance: the instance parameter to fill
886
    @rtype: dict
887
    @return: a copy of the instance's hvparams with missing keys filled from
888
        the cluster defaults
889

890
    """
891
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
892
                         instance.hvparams)
893

    
894
  def FillBE(self, instance):
895
    """Fill an instance's beparams dict.
896

897
    @type instance: L{objects.Instance}
898
    @param instance: the instance parameter to fill
899
    @rtype: dict
900
    @return: a copy of the instance's beparams with missing keys filled from
901
        the cluster defaults
902

903
    """
904
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
905
                          instance.beparams)
906

    
907

    
908
class BlockDevStatus(ConfigObject):
909
  """Config object representing the status of a block device."""
910
  __slots__ = [
911
    "dev_path",
912
    "major",
913
    "minor",
914
    "sync_percent",
915
    "estimated_time",
916
    "is_degraded",
917
    "ldisk_status",
918
    ]
919

    
920

    
921
class ConfdRequest(ConfigObject):
922
  """Object holding a confd request.
923

924
  @ivar protocol: confd protocol version
925
  @ivar type: confd query type
926
  @ivar query: query request
927
  @ivar rsalt: requested reply salt
928

929
  """
930
  __slots__ = [
931
    "protocol",
932
    "type",
933
    "query",
934
    "rsalt",
935
    ]
936

    
937

    
938
class ConfdReply(ConfigObject):
939
  """Object holding a confd reply.
940

941
  @ivar protocol: confd protocol version
942
  @ivar status: reply status code (ok, error)
943
  @ivar answer: confd query reply
944
  @ivar serial: configuration serial number
945

946
  """
947
  __slots__ = [
948
    "protocol",
949
    "status",
950
    "answer",
951
    "serial",
952
    ]
953

    
954

    
955
class SerializableConfigParser(ConfigParser.SafeConfigParser):
956
  """Simple wrapper over ConfigParse that allows serialization.
957

958
  This class is basically ConfigParser.SafeConfigParser with two
959
  additional methods that allow it to serialize/unserialize to/from a
960
  buffer.
961

962
  """
963
  def Dumps(self):
964
    """Dump this instance and return the string representation."""
965
    buf = StringIO()
966
    self.write(buf)
967
    return buf.getvalue()
968

    
969
  @staticmethod
970
  def Loads(data):
971
    """Load data from a string."""
972
    buf = StringIO(data)
973
    cfp = SerializableConfigParser()
974
    cfp.readfp(buf)
975
    return cfp