Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ d693c864

History | View | Annotate | Download (27.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
_TIMESTAMPS = ["ctime", "mtime"]
43

    
44
def FillDict(defaults_dict, custom_dict):
45
  """Basic function to apply settings on top a default dict.
46

47
  @type defaults_dict: dict
48
  @param defaults_dict: dictionary holding the default values
49
  @type custom_dict: dict
50
  @param custom_dict: dictionary holding customized value
51
  @rtype: dict
52
  @return: dict with the 'full' values
53

54
  """
55
  ret_dict = copy.deepcopy(defaults_dict)
56
  ret_dict.update(custom_dict)
57
  return ret_dict
58

    
59

    
60
def UpgradeGroupedParams(target, defaults):
61
  """Update all groups for the target parameter.
62

63
  @type target: dict of dicts
64
  @param target: {group: {parameter: value}}
65
  @type defaults: dict
66
  @param defaults: default parameter values
67

68
  """
69
  if target is None:
70
    target = {constants.PP_DEFAULT: defaults}
71
  else:
72
    for group in target:
73
      target[group] = FillDict(defaults, target[group])
74
  return target
75

    
76

    
77
class ConfigObject(object):
78
  """A generic config object.
79

80
  It has the following properties:
81

82
    - provides somewhat safe recursive unpickling and pickling for its classes
83
    - unset attributes which are defined in slots are always returned
84
      as None instead of raising an error
85

86
  Classes derived from this must always declare __slots__ (we use many
87
  config objects and the memory reduction is useful)
88

89
  """
90
  __slots__ = []
91

    
92
  def __init__(self, **kwargs):
93
    for k, v in kwargs.iteritems():
94
      setattr(self, k, v)
95
    self.UpgradeConfig()
96

    
97
  def __getattr__(self, name):
98
    if name not in self.__slots__:
99
      raise AttributeError("Invalid object attribute %s.%s" %
100
                           (type(self).__name__, name))
101
    return None
102

    
103
  def __setstate__(self, state):
104
    for name in state:
105
      if name in self.__slots__:
106
        setattr(self, name, state[name])
107

    
108
  def ToDict(self):
109
    """Convert to a dict holding only standard python types.
110

111
    The generic routine just dumps all of this object's attributes in
112
    a dict. It does not work if the class has children who are
113
    ConfigObjects themselves (e.g. the nics list in an Instance), in
114
    which case the object should subclass the function in order to
115
    make sure all objects returned are only standard python types.
116

117
    """
118
    result = {}
119
    for name in self.__slots__:
120
      value = getattr(self, name, None)
121
      if value is not None:
122
        result[name] = value
123
    return result
124

    
125
  __getstate__ = ToDict
126

    
127
  @classmethod
128
  def FromDict(cls, val):
129
    """Create an object from a dictionary.
130

131
    This generic routine takes a dict, instantiates a new instance of
132
    the given class, and sets attributes based on the dict content.
133

134
    As for `ToDict`, this does not work if the class has children
135
    who are ConfigObjects themselves (e.g. the nics list in an
136
    Instance), in which case the object should subclass the function
137
    and alter the objects.
138

139
    """
140
    if not isinstance(val, dict):
141
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
142
                                      " expected dict, got %s" % type(val))
143
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
144
    obj = cls(**val_str)
145
    return obj
146

    
147
  @staticmethod
148
  def _ContainerToDicts(container):
149
    """Convert the elements of a container to standard python types.
150

151
    This method converts a container with elements derived from
152
    ConfigData to standard python types. If the container is a dict,
153
    we don't touch the keys, only the values.
154

155
    """
156
    if isinstance(container, dict):
157
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
158
    elif isinstance(container, (list, tuple, set, frozenset)):
159
      ret = [elem.ToDict() for elem in container]
160
    else:
161
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
162
                      type(container))
163
    return ret
164

    
165
  @staticmethod
166
  def _ContainerFromDicts(source, c_type, e_type):
167
    """Convert a container from standard python types.
168

169
    This method converts a container with standard python types to
170
    ConfigData objects. If the container is a dict, we don't touch the
171
    keys, only the values.
172

173
    """
174
    if not isinstance(c_type, type):
175
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
176
                      " not a type" % type(c_type))
177
    if c_type is dict:
178
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
179
    elif c_type in (list, tuple, set, frozenset):
180
      ret = c_type([e_type.FromDict(elem) for elem in source])
181
    else:
182
      raise TypeError("Invalid container type %s passed to"
183
                      " _ContainerFromDicts" % c_type)
184
    return ret
185

    
186
  def Copy(self):
187
    """Makes a deep copy of the current object and its children.
188

189
    """
190
    dict_form = self.ToDict()
191
    clone_obj = self.__class__.FromDict(dict_form)
192
    return clone_obj
193

    
194
  def __repr__(self):
195
    """Implement __repr__ for ConfigObjects."""
196
    return repr(self.ToDict())
197

    
198
  def UpgradeConfig(self):
199
    """Fill defaults for missing configuration values.
200

201
    This method will be called at object init time, and its implementation will
202
    be object dependent.
203

204
    """
205
    pass
206

    
207

    
208
class TaggableObject(ConfigObject):
209
  """An generic class supporting tags.
210

211
  """
212
  __slots__ = ConfigObject.__slots__ + ["tags"]
213

    
214
  @staticmethod
215
  def ValidateTag(tag):
216
    """Check if a tag is valid.
217

218
    If the tag is invalid, an errors.TagError will be raised. The
219
    function has no return value.
220

221
    """
222
    if not isinstance(tag, basestring):
223
      raise errors.TagError("Invalid tag type (not a string)")
224
    if len(tag) > constants.MAX_TAG_LEN:
225
      raise errors.TagError("Tag too long (>%d characters)" %
226
                            constants.MAX_TAG_LEN)
227
    if not tag:
228
      raise errors.TagError("Tags cannot be empty")
229
    if not re.match("^[\w.+*/:-]+$", tag):
230
      raise errors.TagError("Tag contains invalid characters")
231

    
232
  def GetTags(self):
233
    """Return the tags list.
234

235
    """
236
    tags = getattr(self, "tags", None)
237
    if tags is None:
238
      tags = self.tags = set()
239
    return tags
240

    
241
  def AddTag(self, tag):
242
    """Add a new tag.
243

244
    """
245
    self.ValidateTag(tag)
246
    tags = self.GetTags()
247
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
248
      raise errors.TagError("Too many tags")
249
    self.GetTags().add(tag)
250

    
251
  def RemoveTag(self, tag):
252
    """Remove a tag.
253

254
    """
255
    self.ValidateTag(tag)
256
    tags = self.GetTags()
257
    try:
258
      tags.remove(tag)
259
    except KeyError:
260
      raise errors.TagError("Tag not found")
261

    
262
  def ToDict(self):
263
    """Taggable-object-specific conversion to standard python types.
264

265
    This replaces the tags set with a list.
266

267
    """
268
    bo = super(TaggableObject, self).ToDict()
269

    
270
    tags = bo.get("tags", None)
271
    if isinstance(tags, set):
272
      bo["tags"] = list(tags)
273
    return bo
274

    
275
  @classmethod
276
  def FromDict(cls, val):
277
    """Custom function for instances.
278

279
    """
280
    obj = super(TaggableObject, cls).FromDict(val)
281
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
282
      obj.tags = set(obj.tags)
283
    return obj
284

    
285

    
286
class ConfigData(ConfigObject):
287
  """Top-level config object."""
288
  __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"] + \
289
              _TIMESTAMPS
290

    
291
  def ToDict(self):
292
    """Custom function for top-level config data.
293

294
    This just replaces the list of instances, nodes and the cluster
295
    with standard python types.
296

297
    """
298
    mydict = super(ConfigData, self).ToDict()
299
    mydict["cluster"] = mydict["cluster"].ToDict()
300
    for key in "nodes", "instances":
301
      mydict[key] = self._ContainerToDicts(mydict[key])
302

    
303
    return mydict
304

    
305
  @classmethod
306
  def FromDict(cls, val):
307
    """Custom function for top-level config data
308

309
    """
310
    obj = super(ConfigData, cls).FromDict(val)
311
    obj.cluster = Cluster.FromDict(obj.cluster)
312
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
313
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
314
    return obj
315

    
316

    
317
class NIC(ConfigObject):
318
  """Config object representing a network card."""
319
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
320

    
321
  @classmethod
322
  def CheckParameterSyntax(cls, nicparams):
323
    """Check the given parameters for validity.
324

325
    @type nicparams:  dict
326
    @param nicparams: dictionary with parameter names/value
327
    @raise errors.ConfigurationError: when a parameter is not valid
328

329
    """
330
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
331
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
332
      raise errors.ConfigurationError(err)
333

    
334
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
335
        not nicparams[constants.NIC_LINK]):
336
      err = "Missing bridged nic link"
337
      raise errors.ConfigurationError(err)
338

    
339
  def UpgradeConfig(self):
340
    """Fill defaults for missing configuration values.
341

342
    """
343
    if self.nicparams is None:
344
      self.nicparams = {}
345
      if self.bridge is not None:
346
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
347
        self.nicparams[constants.NIC_LINK] = self.bridge
348
    # bridge is no longer used it 2.1. The slot is left there to support
349
    # upgrading, but will be removed in 2.2
350
    if self.bridge is not None:
351
      self.bridge = None
352

    
353

    
354
class Disk(ConfigObject):
355
  """Config object representing a block device."""
356
  __slots__ = ["dev_type", "logical_id", "physical_id",
357
               "children", "iv_name", "size", "mode"]
358

    
359
  def CreateOnSecondary(self):
360
    """Test if this device needs to be created on a secondary node."""
361
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
362

    
363
  def AssembleOnSecondary(self):
364
    """Test if this device needs to be assembled on a secondary node."""
365
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
366

    
367
  def OpenOnSecondary(self):
368
    """Test if this device needs to be opened on a secondary node."""
369
    return self.dev_type in (constants.LD_LV,)
370

    
371
  def StaticDevPath(self):
372
    """Return the device path if this device type has a static one.
373

374
    Some devices (LVM for example) live always at the same /dev/ path,
375
    irrespective of their status. For such devices, we return this
376
    path, for others we return None.
377

378
    """
379
    if self.dev_type == constants.LD_LV:
380
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
381
    return None
382

    
383
  def ChildrenNeeded(self):
384
    """Compute the needed number of children for activation.
385

386
    This method will return either -1 (all children) or a positive
387
    number denoting the minimum number of children needed for
388
    activation (only mirrored devices will usually return >=0).
389

390
    Currently, only DRBD8 supports diskless activation (therefore we
391
    return 0), for all other we keep the previous semantics and return
392
    -1.
393

394
    """
395
    if self.dev_type == constants.LD_DRBD8:
396
      return 0
397
    return -1
398

    
399
  def GetNodes(self, node):
400
    """This function returns the nodes this device lives on.
401

402
    Given the node on which the parent of the device lives on (or, in
403
    case of a top-level device, the primary node of the devices'
404
    instance), this function will return a list of nodes on which this
405
    devices needs to (or can) be assembled.
406

407
    """
408
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
409
      result = [node]
410
    elif self.dev_type in constants.LDS_DRBD:
411
      result = [self.logical_id[0], self.logical_id[1]]
412
      if node not in result:
413
        raise errors.ConfigurationError("DRBD device passed unknown node")
414
    else:
415
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
416
    return result
417

    
418
  def ComputeNodeTree(self, parent_node):
419
    """Compute the node/disk tree for this disk and its children.
420

421
    This method, given the node on which the parent disk lives, will
422
    return the list of all (node, disk) pairs which describe the disk
423
    tree in the most compact way. For example, a drbd/lvm stack
424
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
425
    which represents all the top-level devices on the nodes.
426

427
    """
428
    my_nodes = self.GetNodes(parent_node)
429
    result = [(node, self) for node in my_nodes]
430
    if not self.children:
431
      # leaf device
432
      return result
433
    for node in my_nodes:
434
      for child in self.children:
435
        child_result = child.ComputeNodeTree(node)
436
        if len(child_result) == 1:
437
          # child (and all its descendants) is simple, doesn't split
438
          # over multiple hosts, so we don't need to describe it, our
439
          # own entry for this node describes it completely
440
          continue
441
        else:
442
          # check if child nodes differ from my nodes; note that
443
          # subdisk can differ from the child itself, and be instead
444
          # one of its descendants
445
          for subnode, subdisk in child_result:
446
            if subnode not in my_nodes:
447
              result.append((subnode, subdisk))
448
            # otherwise child is under our own node, so we ignore this
449
            # entry (but probably the other results in the list will
450
            # be different)
451
    return result
452

    
453
  def RecordGrow(self, amount):
454
    """Update the size of this disk after growth.
455

456
    This method recurses over the disks's children and updates their
457
    size correspondigly. The method needs to be kept in sync with the
458
    actual algorithms from bdev.
459

460
    """
461
    if self.dev_type == constants.LD_LV:
462
      self.size += amount
463
    elif self.dev_type == constants.LD_DRBD8:
464
      if self.children:
465
        self.children[0].RecordGrow(amount)
466
      self.size += amount
467
    else:
468
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
469
                                   " disk type %s" % self.dev_type)
470

    
471
  def UnsetSize(self):
472
    """Sets recursively the size to zero for the disk and its children.
473

474
    """
475
    if self.children:
476
      for child in self.children:
477
        child.UnsetSize()
478
    self.size = 0
479

    
480
  def SetPhysicalID(self, target_node, nodes_ip):
481
    """Convert the logical ID to the physical ID.
482

483
    This is used only for drbd, which needs ip/port configuration.
484

485
    The routine descends down and updates its children also, because
486
    this helps when the only the top device is passed to the remote
487
    node.
488

489
    Arguments:
490
      - target_node: the node we wish to configure for
491
      - nodes_ip: a mapping of node name to ip
492

493
    The target_node must exist in in nodes_ip, and must be one of the
494
    nodes in the logical ID for each of the DRBD devices encountered
495
    in the disk tree.
496

497
    """
498
    if self.children:
499
      for child in self.children:
500
        child.SetPhysicalID(target_node, nodes_ip)
501

    
502
    if self.logical_id is None and self.physical_id is not None:
503
      return
504
    if self.dev_type in constants.LDS_DRBD:
505
      pnode, snode, port, pminor, sminor, secret = self.logical_id
506
      if target_node not in (pnode, snode):
507
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
508
                                        target_node)
509
      pnode_ip = nodes_ip.get(pnode, None)
510
      snode_ip = nodes_ip.get(snode, None)
511
      if pnode_ip is None or snode_ip is None:
512
        raise errors.ConfigurationError("Can't find primary or secondary node"
513
                                        " for %s" % str(self))
514
      p_data = (pnode_ip, port)
515
      s_data = (snode_ip, port)
516
      if pnode == target_node:
517
        self.physical_id = p_data + s_data + (pminor, secret)
518
      else: # it must be secondary, we tested above
519
        self.physical_id = s_data + p_data + (sminor, secret)
520
    else:
521
      self.physical_id = self.logical_id
522
    return
523

    
524
  def ToDict(self):
525
    """Disk-specific conversion to standard python types.
526

527
    This replaces the children lists of objects with lists of
528
    standard python types.
529

530
    """
531
    bo = super(Disk, self).ToDict()
532

    
533
    for attr in ("children",):
534
      alist = bo.get(attr, None)
535
      if alist:
536
        bo[attr] = self._ContainerToDicts(alist)
537
    return bo
538

    
539
  @classmethod
540
  def FromDict(cls, val):
541
    """Custom function for Disks
542

543
    """
544
    obj = super(Disk, cls).FromDict(val)
545
    if obj.children:
546
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
547
    if obj.logical_id and isinstance(obj.logical_id, list):
548
      obj.logical_id = tuple(obj.logical_id)
549
    if obj.physical_id and isinstance(obj.physical_id, list):
550
      obj.physical_id = tuple(obj.physical_id)
551
    if obj.dev_type in constants.LDS_DRBD:
552
      # we need a tuple of length six here
553
      if len(obj.logical_id) < 6:
554
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
555
    return obj
556

    
557
  def __str__(self):
558
    """Custom str() formatter for disks.
559

560
    """
561
    if self.dev_type == constants.LD_LV:
562
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
563
    elif self.dev_type in constants.LDS_DRBD:
564
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
565
      val = "<DRBD8("
566
      if self.physical_id is None:
567
        phy = "unconfigured"
568
      else:
569
        phy = ("configured as %s:%s %s:%s" %
570
               (self.physical_id[0], self.physical_id[1],
571
                self.physical_id[2], self.physical_id[3]))
572

    
573
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
574
              (node_a, minor_a, node_b, minor_b, port, phy))
575
      if self.children and self.children.count(None) == 0:
576
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
577
      else:
578
        val += "no local storage"
579
    else:
580
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
581
             (self.dev_type, self.logical_id, self.physical_id, self.children))
582
    if self.iv_name is None:
583
      val += ", not visible"
584
    else:
585
      val += ", visible as /dev/%s" % self.iv_name
586
    if isinstance(self.size, int):
587
      val += ", size=%dm)>" % self.size
588
    else:
589
      val += ", size='%s')>" % (self.size,)
590
    return val
591

    
592
  def Verify(self):
593
    """Checks that this disk is correctly configured.
594

595
    """
596
    all_errors = []
597
    if self.mode not in constants.DISK_ACCESS_SET:
598
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
599
    return all_errors
600

    
601

    
602
class Instance(TaggableObject):
603
  """Config object representing an instance."""
604
  __slots__ = TaggableObject.__slots__ + [
605
    "name",
606
    "primary_node",
607
    "os",
608
    "hypervisor",
609
    "hvparams",
610
    "beparams",
611
    "admin_up",
612
    "nics",
613
    "disks",
614
    "disk_template",
615
    "network_port",
616
    "serial_no",
617
    ] + _TIMESTAMPS
618

    
619
  def _ComputeSecondaryNodes(self):
620
    """Compute the list of secondary nodes.
621

622
    This is a simple wrapper over _ComputeAllNodes.
623

624
    """
625
    all_nodes = set(self._ComputeAllNodes())
626
    all_nodes.discard(self.primary_node)
627
    return tuple(all_nodes)
628

    
629
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
630
                             "List of secondary nodes")
631

    
632
  def _ComputeAllNodes(self):
633
    """Compute the list of all nodes.
634

635
    Since the data is already there (in the drbd disks), keeping it as
636
    a separate normal attribute is redundant and if not properly
637
    synchronised can cause problems. Thus it's better to compute it
638
    dynamically.
639

640
    """
641
    def _Helper(nodes, device):
642
      """Recursively computes nodes given a top device."""
643
      if device.dev_type in constants.LDS_DRBD:
644
        nodea, nodeb = device.logical_id[:2]
645
        nodes.add(nodea)
646
        nodes.add(nodeb)
647
      if device.children:
648
        for child in device.children:
649
          _Helper(nodes, child)
650

    
651
    all_nodes = set()
652
    all_nodes.add(self.primary_node)
653
    for device in self.disks:
654
      _Helper(all_nodes, device)
655
    return tuple(all_nodes)
656

    
657
  all_nodes = property(_ComputeAllNodes, None, None,
658
                       "List of all nodes of the instance")
659

    
660
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
661
    """Provide a mapping of nodes to LVs this instance owns.
662

663
    This function figures out what logical volumes should belong on
664
    which nodes, recursing through a device tree.
665

666
    @param lvmap: optional dictionary to receive the
667
        'node' : ['lv', ...] data.
668

669
    @return: None if lvmap arg is given, otherwise, a dictionary
670
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
671

672
    """
673
    if node == None:
674
      node = self.primary_node
675

    
676
    if lvmap is None:
677
      lvmap = { node : [] }
678
      ret = lvmap
679
    else:
680
      if not node in lvmap:
681
        lvmap[node] = []
682
      ret = None
683

    
684
    if not devs:
685
      devs = self.disks
686

    
687
    for dev in devs:
688
      if dev.dev_type == constants.LD_LV:
689
        lvmap[node].append(dev.logical_id[1])
690

    
691
      elif dev.dev_type in constants.LDS_DRBD:
692
        if dev.children:
693
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
694
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
695

    
696
      elif dev.children:
697
        self.MapLVsByNode(lvmap, dev.children, node)
698

    
699
    return ret
700

    
701
  def FindDisk(self, idx):
702
    """Find a disk given having a specified index.
703

704
    This is just a wrapper that does validation of the index.
705

706
    @type idx: int
707
    @param idx: the disk index
708
    @rtype: L{Disk}
709
    @return: the corresponding disk
710
    @raise errors.OpPrereqError: when the given index is not valid
711

712
    """
713
    try:
714
      idx = int(idx)
715
      return self.disks[idx]
716
    except ValueError, err:
717
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
718
    except IndexError:
719
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
720
                                 " 0 to %d" % (idx, len(self.disks)))
721

    
722
  def ToDict(self):
723
    """Instance-specific conversion to standard python types.
724

725
    This replaces the children lists of objects with lists of standard
726
    python types.
727

728
    """
729
    bo = super(Instance, self).ToDict()
730

    
731
    for attr in "nics", "disks":
732
      alist = bo.get(attr, None)
733
      if alist:
734
        nlist = self._ContainerToDicts(alist)
735
      else:
736
        nlist = []
737
      bo[attr] = nlist
738
    return bo
739

    
740
  @classmethod
741
  def FromDict(cls, val):
742
    """Custom function for instances.
743

744
    """
745
    obj = super(Instance, cls).FromDict(val)
746
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
747
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
748
    return obj
749

    
750

    
751
class OS(ConfigObject):
752
  """Config object representing an operating system."""
753
  __slots__ = [
754
    "name",
755
    "path",
756
    "api_versions",
757
    "create_script",
758
    "export_script",
759
    "import_script",
760
    "rename_script",
761
    ]
762

    
763

    
764
class Node(TaggableObject):
765
  """Config object representing a node."""
766
  __slots__ = TaggableObject.__slots__ + [
767
    "name",
768
    "primary_ip",
769
    "secondary_ip",
770
    "serial_no",
771
    "master_candidate",
772
    "offline",
773
    "drained",
774
    ] + _TIMESTAMPS
775

    
776

    
777
class Cluster(TaggableObject):
778
  """Config object representing the cluster."""
779
  __slots__ = TaggableObject.__slots__ + [
780
    "serial_no",
781
    "rsahostkeypub",
782
    "highest_used_port",
783
    "tcpudp_port_pool",
784
    "mac_prefix",
785
    "volume_group_name",
786
    "default_bridge",
787
    "default_hypervisor",
788
    "master_node",
789
    "master_ip",
790
    "master_netdev",
791
    "cluster_name",
792
    "file_storage_dir",
793
    "enabled_hypervisors",
794
    "hvparams",
795
    "beparams",
796
    "nicparams",
797
    "candidate_pool_size",
798
    "modify_etc_hosts",
799
    ] + _TIMESTAMPS
800

    
801
  def UpgradeConfig(self):
802
    """Fill defaults for missing configuration values.
803

804
    """
805
    if self.hvparams is None:
806
      self.hvparams = constants.HVC_DEFAULTS
807
    else:
808
      for hypervisor in self.hvparams:
809
        self.hvparams[hypervisor] = FillDict(
810
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
811

    
812
    self.beparams = UpgradeGroupedParams(self.beparams,
813
                                         constants.BEC_DEFAULTS)
814
    migrate_default_bridge = not self.nicparams
815
    self.nicparams = UpgradeGroupedParams(self.nicparams,
816
                                          constants.NICC_DEFAULTS)
817
    if migrate_default_bridge:
818
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
819
        self.default_bridge
820

    
821
    if self.modify_etc_hosts is None:
822
      self.modify_etc_hosts = True
823

    
824
    # default_bridge is no longer used it 2.1. The slot is left there to
825
    # support auto-upgrading, but will be removed in 2.2
826
    if self.default_bridge is not None:
827
      self.default_bridge = None
828

    
829
    # default_hypervisor is just the first enabled one in 2.1
830
    if self.default_hypervisor is not None:
831
      self.enabled_hypervisors = [self.default_hypervisor] + \
832
        [hvname for hvname in self.enabled_hypervisors
833
         if hvname != self.default_hypervisor]
834
      self.default_hypervisor = None
835

    
836

    
837
  def ToDict(self):
838
    """Custom function for cluster.
839

840
    """
841
    mydict = super(Cluster, self).ToDict()
842
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
843
    return mydict
844

    
845
  @classmethod
846
  def FromDict(cls, val):
847
    """Custom function for cluster.
848

849
    """
850
    obj = super(Cluster, cls).FromDict(val)
851
    if not isinstance(obj.tcpudp_port_pool, set):
852
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
853
    return obj
854

    
855
  def FillHV(self, instance):
856
    """Fill an instance's hvparams dict.
857

858
    @type instance: L{objects.Instance}
859
    @param instance: the instance parameter to fill
860
    @rtype: dict
861
    @return: a copy of the instance's hvparams with missing keys filled from
862
        the cluster defaults
863

864
    """
865
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
866
                         instance.hvparams)
867

    
868
  def FillBE(self, instance):
869
    """Fill an instance's beparams dict.
870

871
    @type instance: L{objects.Instance}
872
    @param instance: the instance parameter to fill
873
    @rtype: dict
874
    @return: a copy of the instance's beparams with missing keys filled from
875
        the cluster defaults
876

877
    """
878
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
879
                          instance.beparams)
880

    
881

    
882
class BlockDevStatus(ConfigObject):
883
  """Config object representing the status of a block device."""
884
  __slots__ = [
885
    "dev_path",
886
    "major",
887
    "minor",
888
    "sync_percent",
889
    "estimated_time",
890
    "is_degraded",
891
    "ldisk_status",
892
    ]
893

    
894

    
895
class ConfdRequest(ConfigObject):
896
  """Object holding a confd request.
897

898
  @ivar protocol: confd protocol version
899
  @ivar type: confd query type
900
  @ivar query: query request
901
  @ivar rsalt: requested reply salt
902

903
  """
904
  __slots__ = [
905
    "protocol",
906
    "type",
907
    "query",
908
    "rsalt",
909
    ]
910

    
911

    
912
class ConfdReply(ConfigObject):
913
  """Object holding a confd reply.
914

915
  @ivar protocol: confd protocol version
916
  @ivar status: reply status code (ok, error)
917
  @ivar answer: confd query reply
918
  @ivar serial: configuration serial number
919

920
  """
921
  __slots__ = [
922
    "protocol",
923
    "status",
924
    "answer",
925
    "serial",
926
    ]
927

    
928

    
929
class SerializableConfigParser(ConfigParser.SafeConfigParser):
930
  """Simple wrapper over ConfigParse that allows serialization.
931

932
  This class is basically ConfigParser.SafeConfigParser with two
933
  additional methods that allow it to serialize/unserialize to/from a
934
  buffer.
935

936
  """
937
  def Dumps(self):
938
    """Dump this instance and return the string representation."""
939
    buf = StringIO()
940
    self.write(buf)
941
    return buf.getvalue()
942

    
943
  @staticmethod
944
  def Loads(data):
945
    """Load data from a string."""
946
    buf = StringIO(data)
947
    cfp = SerializableConfigParser()
948
    cfp.readfp(buf)
949
    return cfp