Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 13f1af63

History | View | Annotate | Download (26.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
def FillDict(defaults_dict, custom_dict):
43
    """Basic function to apply settings on top a default dict.
44

45
    @type defaults_dict: dict
46
    @param defaults_dict: dictionary holding the default values
47
    @type custom_dict: dict
48
    @param custom_dict: dictionary holding customized value
49
    @rtype: dict
50
    @return: dict with the 'full' values
51

52
    """
53
    ret_dict = copy.deepcopy(defaults_dict)
54
    ret_dict.update(custom_dict)
55
    return ret_dict
56

    
57

    
58
def UpgradeGroupedParams(target, defaults):
59
  """Update all groups for the target parameter.
60

61
  @type target: dict of dicts
62
  @param target: {group: {parameter: value}}
63
  @type defaults: dict
64
  @param defaults: default parameter values
65

66
  """
67
  if target is None:
68
    target = {constants.PP_DEFAULT: defaults}
69
  else:
70
    for group in target:
71
      target[group] = FillDict(defaults, target[group])
72
  return target
73

    
74

    
75
class ConfigObject(object):
76
  """A generic config object.
77

78
  It has the following properties:
79

80
    - provides somewhat safe recursive unpickling and pickling for its classes
81
    - unset attributes which are defined in slots are always returned
82
      as None instead of raising an error
83

84
  Classes derived from this must always declare __slots__ (we use many
85
  config objects and the memory reduction is useful)
86

87
  """
88
  __slots__ = []
89

    
90
  def __init__(self, **kwargs):
91
    for k, v in kwargs.iteritems():
92
      setattr(self, k, v)
93
    self.UpgradeConfig()
94

    
95
  def __getattr__(self, name):
96
    if name not in self.__slots__:
97
      raise AttributeError("Invalid object attribute %s.%s" %
98
                           (type(self).__name__, name))
99
    return None
100

    
101
  def __setitem__(self, key, value):
102
    if key not in self.__slots__:
103
      raise KeyError(key)
104
    setattr(self, key, value)
105

    
106
  def __getstate__(self):
107
    state = {}
108
    for name in self.__slots__:
109
      if hasattr(self, name):
110
        state[name] = getattr(self, name)
111
    return state
112

    
113
  def __setstate__(self, state):
114
    for name in state:
115
      if name in self.__slots__:
116
        setattr(self, name, state[name])
117

    
118
  def ToDict(self):
119
    """Convert to a dict holding only standard python types.
120

121
    The generic routine just dumps all of this object's attributes in
122
    a dict. It does not work if the class has children who are
123
    ConfigObjects themselves (e.g. the nics list in an Instance), in
124
    which case the object should subclass the function in order to
125
    make sure all objects returned are only standard python types.
126

127
    """
128
    return dict([(k, getattr(self, k, None)) for k in self.__slots__])
129

    
130
  @classmethod
131
  def FromDict(cls, val):
132
    """Create an object from a dictionary.
133

134
    This generic routine takes a dict, instantiates a new instance of
135
    the given class, and sets attributes based on the dict content.
136

137
    As for `ToDict`, this does not work if the class has children
138
    who are ConfigObjects themselves (e.g. the nics list in an
139
    Instance), in which case the object should subclass the function
140
    and alter the objects.
141

142
    """
143
    if not isinstance(val, dict):
144
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
145
                                      " expected dict, got %s" % type(val))
146
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
147
    obj = cls(**val_str)
148
    return obj
149

    
150
  @staticmethod
151
  def _ContainerToDicts(container):
152
    """Convert the elements of a container to standard python types.
153

154
    This method converts a container with elements derived from
155
    ConfigData to standard python types. If the container is a dict,
156
    we don't touch the keys, only the values.
157

158
    """
159
    if isinstance(container, dict):
160
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
161
    elif isinstance(container, (list, tuple, set, frozenset)):
162
      ret = [elem.ToDict() for elem in container]
163
    else:
164
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
165
                      type(container))
166
    return ret
167

    
168
  @staticmethod
169
  def _ContainerFromDicts(source, c_type, e_type):
170
    """Convert a container from standard python types.
171

172
    This method converts a container with standard python types to
173
    ConfigData objects. If the container is a dict, we don't touch the
174
    keys, only the values.
175

176
    """
177
    if not isinstance(c_type, type):
178
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
179
                      " not a type" % type(c_type))
180
    if c_type is dict:
181
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
182
    elif c_type in (list, tuple, set, frozenset):
183
      ret = c_type([e_type.FromDict(elem) for elem in source])
184
    else:
185
      raise TypeError("Invalid container type %s passed to"
186
                      " _ContainerFromDicts" % c_type)
187
    return ret
188

    
189
  def __repr__(self):
190
    """Implement __repr__ for ConfigObjects."""
191
    return repr(self.ToDict())
192

    
193
  def UpgradeConfig(self):
194
    """Fill defaults for missing configuration values.
195

196
    This method will be called at object init time, and its implementation will
197
    be object dependent.
198

199
    """
200
    pass
201

    
202

    
203
class TaggableObject(ConfigObject):
204
  """An generic class supporting tags.
205

206
  """
207
  __slots__ = ConfigObject.__slots__ + ["tags"]
208

    
209
  @staticmethod
210
  def ValidateTag(tag):
211
    """Check if a tag is valid.
212

213
    If the tag is invalid, an errors.TagError will be raised. The
214
    function has no return value.
215

216
    """
217
    if not isinstance(tag, basestring):
218
      raise errors.TagError("Invalid tag type (not a string)")
219
    if len(tag) > constants.MAX_TAG_LEN:
220
      raise errors.TagError("Tag too long (>%d characters)" %
221
                            constants.MAX_TAG_LEN)
222
    if not tag:
223
      raise errors.TagError("Tags cannot be empty")
224
    if not re.match("^[\w.+*/:-]+$", tag):
225
      raise errors.TagError("Tag contains invalid characters")
226

    
227
  def GetTags(self):
228
    """Return the tags list.
229

230
    """
231
    tags = getattr(self, "tags", None)
232
    if tags is None:
233
      tags = self.tags = set()
234
    return tags
235

    
236
  def AddTag(self, tag):
237
    """Add a new tag.
238

239
    """
240
    self.ValidateTag(tag)
241
    tags = self.GetTags()
242
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
243
      raise errors.TagError("Too many tags")
244
    self.GetTags().add(tag)
245

    
246
  def RemoveTag(self, tag):
247
    """Remove a tag.
248

249
    """
250
    self.ValidateTag(tag)
251
    tags = self.GetTags()
252
    try:
253
      tags.remove(tag)
254
    except KeyError:
255
      raise errors.TagError("Tag not found")
256

    
257
  def ToDict(self):
258
    """Taggable-object-specific conversion to standard python types.
259

260
    This replaces the tags set with a list.
261

262
    """
263
    bo = super(TaggableObject, self).ToDict()
264

    
265
    tags = bo.get("tags", None)
266
    if isinstance(tags, set):
267
      bo["tags"] = list(tags)
268
    return bo
269

    
270
  @classmethod
271
  def FromDict(cls, val):
272
    """Custom function for instances.
273

274
    """
275
    obj = super(TaggableObject, cls).FromDict(val)
276
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
277
      obj.tags = set(obj.tags)
278
    return obj
279

    
280

    
281
class ConfigData(ConfigObject):
282
  """Top-level config object."""
283
  __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
284

    
285
  def ToDict(self):
286
    """Custom function for top-level config data.
287

288
    This just replaces the list of instances, nodes and the cluster
289
    with standard python types.
290

291
    """
292
    mydict = super(ConfigData, self).ToDict()
293
    mydict["cluster"] = mydict["cluster"].ToDict()
294
    for key in "nodes", "instances":
295
      mydict[key] = self._ContainerToDicts(mydict[key])
296

    
297
    return mydict
298

    
299
  @classmethod
300
  def FromDict(cls, val):
301
    """Custom function for top-level config data
302

303
    """
304
    obj = super(ConfigData, cls).FromDict(val)
305
    obj.cluster = Cluster.FromDict(obj.cluster)
306
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
307
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
308
    return obj
309

    
310

    
311
class NIC(ConfigObject):
312
  """Config object representing a network card."""
313
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
314

    
315
  @classmethod
316
  def CheckParameterSyntax(cls, nicparams):
317
    """Check the given parameters for validity.
318

319
    @type nicparams:  dict
320
    @param nicparams: dictionary with parameter names/value
321
    @raise errors.ConfigurationError: when a parameter is not valid
322

323
    """
324
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
325
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
326
      raise errors.ConfigurationError(err)
327

    
328
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
329
        not nicparams[constants.NIC_LINK]):
330
      err = "Missing bridged nic link"
331
      raise errors.ConfigurationError(err)
332

    
333
  def UpgradeConfig(self):
334
    """Fill defaults for missing configuration values.
335

336
    """
337
    if self.nicparams is None:
338
      self.nicparams = {}
339
      if self.bridge is not None:
340
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
341
        self.nicparams[constants.NIC_LINK] = self.bridge
342

    
343

    
344
class Disk(ConfigObject):
345
  """Config object representing a block device."""
346
  __slots__ = ["dev_type", "logical_id", "physical_id",
347
               "children", "iv_name", "size", "mode"]
348

    
349
  def CreateOnSecondary(self):
350
    """Test if this device needs to be created on a secondary node."""
351
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
352

    
353
  def AssembleOnSecondary(self):
354
    """Test if this device needs to be assembled on a secondary node."""
355
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
356

    
357
  def OpenOnSecondary(self):
358
    """Test if this device needs to be opened on a secondary node."""
359
    return self.dev_type in (constants.LD_LV,)
360

    
361
  def StaticDevPath(self):
362
    """Return the device path if this device type has a static one.
363

364
    Some devices (LVM for example) live always at the same /dev/ path,
365
    irrespective of their status. For such devices, we return this
366
    path, for others we return None.
367

368
    """
369
    if self.dev_type == constants.LD_LV:
370
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
371
    return None
372

    
373
  def ChildrenNeeded(self):
374
    """Compute the needed number of children for activation.
375

376
    This method will return either -1 (all children) or a positive
377
    number denoting the minimum number of children needed for
378
    activation (only mirrored devices will usually return >=0).
379

380
    Currently, only DRBD8 supports diskless activation (therefore we
381
    return 0), for all other we keep the previous semantics and return
382
    -1.
383

384
    """
385
    if self.dev_type == constants.LD_DRBD8:
386
      return 0
387
    return -1
388

    
389
  def GetNodes(self, node):
390
    """This function returns the nodes this device lives on.
391

392
    Given the node on which the parent of the device lives on (or, in
393
    case of a top-level device, the primary node of the devices'
394
    instance), this function will return a list of nodes on which this
395
    devices needs to (or can) be assembled.
396

397
    """
398
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
399
      result = [node]
400
    elif self.dev_type in constants.LDS_DRBD:
401
      result = [self.logical_id[0], self.logical_id[1]]
402
      if node not in result:
403
        raise errors.ConfigurationError("DRBD device passed unknown node")
404
    else:
405
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
406
    return result
407

    
408
  def ComputeNodeTree(self, parent_node):
409
    """Compute the node/disk tree for this disk and its children.
410

411
    This method, given the node on which the parent disk lives, will
412
    return the list of all (node, disk) pairs which describe the disk
413
    tree in the most compact way. For example, a drbd/lvm stack
414
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
415
    which represents all the top-level devices on the nodes.
416

417
    """
418
    my_nodes = self.GetNodes(parent_node)
419
    result = [(node, self) for node in my_nodes]
420
    if not self.children:
421
      # leaf device
422
      return result
423
    for node in my_nodes:
424
      for child in self.children:
425
        child_result = child.ComputeNodeTree(node)
426
        if len(child_result) == 1:
427
          # child (and all its descendants) is simple, doesn't split
428
          # over multiple hosts, so we don't need to describe it, our
429
          # own entry for this node describes it completely
430
          continue
431
        else:
432
          # check if child nodes differ from my nodes; note that
433
          # subdisk can differ from the child itself, and be instead
434
          # one of its descendants
435
          for subnode, subdisk in child_result:
436
            if subnode not in my_nodes:
437
              result.append((subnode, subdisk))
438
            # otherwise child is under our own node, so we ignore this
439
            # entry (but probably the other results in the list will
440
            # be different)
441
    return result
442

    
443
  def RecordGrow(self, amount):
444
    """Update the size of this disk after growth.
445

446
    This method recurses over the disks's children and updates their
447
    size correspondigly. The method needs to be kept in sync with the
448
    actual algorithms from bdev.
449

450
    """
451
    if self.dev_type == constants.LD_LV:
452
      self.size += amount
453
    elif self.dev_type == constants.LD_DRBD8:
454
      if self.children:
455
        self.children[0].RecordGrow(amount)
456
      self.size += amount
457
    else:
458
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
459
                                   " disk type %s" % self.dev_type)
460

    
461
  def SetPhysicalID(self, target_node, nodes_ip):
462
    """Convert the logical ID to the physical ID.
463

464
    This is used only for drbd, which needs ip/port configuration.
465

466
    The routine descends down and updates its children also, because
467
    this helps when the only the top device is passed to the remote
468
    node.
469

470
    Arguments:
471
      - target_node: the node we wish to configure for
472
      - nodes_ip: a mapping of node name to ip
473

474
    The target_node must exist in in nodes_ip, and must be one of the
475
    nodes in the logical ID for each of the DRBD devices encountered
476
    in the disk tree.
477

478
    """
479
    if self.children:
480
      for child in self.children:
481
        child.SetPhysicalID(target_node, nodes_ip)
482

    
483
    if self.logical_id is None and self.physical_id is not None:
484
      return
485
    if self.dev_type in constants.LDS_DRBD:
486
      pnode, snode, port, pminor, sminor, secret = self.logical_id
487
      if target_node not in (pnode, snode):
488
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
489
                                        target_node)
490
      pnode_ip = nodes_ip.get(pnode, None)
491
      snode_ip = nodes_ip.get(snode, None)
492
      if pnode_ip is None or snode_ip is None:
493
        raise errors.ConfigurationError("Can't find primary or secondary node"
494
                                        " for %s" % str(self))
495
      p_data = (pnode_ip, port)
496
      s_data = (snode_ip, port)
497
      if pnode == target_node:
498
        self.physical_id = p_data + s_data + (pminor, secret)
499
      else: # it must be secondary, we tested above
500
        self.physical_id = s_data + p_data + (sminor, secret)
501
    else:
502
      self.physical_id = self.logical_id
503
    return
504

    
505
  def ToDict(self):
506
    """Disk-specific conversion to standard python types.
507

508
    This replaces the children lists of objects with lists of
509
    standard python types.
510

511
    """
512
    bo = super(Disk, self).ToDict()
513

    
514
    for attr in ("children",):
515
      alist = bo.get(attr, None)
516
      if alist:
517
        bo[attr] = self._ContainerToDicts(alist)
518
    return bo
519

    
520
  @classmethod
521
  def FromDict(cls, val):
522
    """Custom function for Disks
523

524
    """
525
    obj = super(Disk, cls).FromDict(val)
526
    if obj.children:
527
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
528
    if obj.logical_id and isinstance(obj.logical_id, list):
529
      obj.logical_id = tuple(obj.logical_id)
530
    if obj.physical_id and isinstance(obj.physical_id, list):
531
      obj.physical_id = tuple(obj.physical_id)
532
    if obj.dev_type in constants.LDS_DRBD:
533
      # we need a tuple of length six here
534
      if len(obj.logical_id) < 6:
535
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
536
    return obj
537

    
538
  def __str__(self):
539
    """Custom str() formatter for disks.
540

541
    """
542
    if self.dev_type == constants.LD_LV:
543
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
544
    elif self.dev_type in constants.LDS_DRBD:
545
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
546
      val = "<DRBD8("
547
      if self.physical_id is None:
548
        phy = "unconfigured"
549
      else:
550
        phy = ("configured as %s:%s %s:%s" %
551
               (self.physical_id[0], self.physical_id[1],
552
                self.physical_id[2], self.physical_id[3]))
553

    
554
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
555
              (node_a, minor_a, node_b, minor_b, port, phy))
556
      if self.children and self.children.count(None) == 0:
557
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
558
      else:
559
        val += "no local storage"
560
    else:
561
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
562
             (self.dev_type, self.logical_id, self.physical_id, self.children))
563
    if self.iv_name is None:
564
      val += ", not visible"
565
    else:
566
      val += ", visible as /dev/%s" % self.iv_name
567
    if isinstance(self.size, int):
568
      val += ", size=%dm)>" % self.size
569
    else:
570
      val += ", size='%s')>" % (self.size,)
571
    return val
572

    
573
  def Verify(self):
574
    """Checks that this disk is correctly configured.
575

576
    """
577
    errors = []
578
    if self.mode not in constants.DISK_ACCESS_SET:
579
      errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
580
    return errors
581

    
582

    
583
class Instance(TaggableObject):
584
  """Config object representing an instance."""
585
  __slots__ = TaggableObject.__slots__ + [
586
    "name",
587
    "primary_node",
588
    "os",
589
    "hypervisor",
590
    "hvparams",
591
    "beparams",
592
    "admin_up",
593
    "nics",
594
    "disks",
595
    "disk_template",
596
    "network_port",
597
    "serial_no",
598
    ]
599

    
600
  def _ComputeSecondaryNodes(self):
601
    """Compute the list of secondary nodes.
602

603
    This is a simple wrapper over _ComputeAllNodes.
604

605
    """
606
    all_nodes = set(self._ComputeAllNodes())
607
    all_nodes.discard(self.primary_node)
608
    return tuple(all_nodes)
609

    
610
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
611
                             "List of secondary nodes")
612

    
613
  def _ComputeAllNodes(self):
614
    """Compute the list of all nodes.
615

616
    Since the data is already there (in the drbd disks), keeping it as
617
    a separate normal attribute is redundant and if not properly
618
    synchronised can cause problems. Thus it's better to compute it
619
    dynamically.
620

621
    """
622
    def _Helper(nodes, device):
623
      """Recursively computes nodes given a top device."""
624
      if device.dev_type in constants.LDS_DRBD:
625
        nodea, nodeb = device.logical_id[:2]
626
        nodes.add(nodea)
627
        nodes.add(nodeb)
628
      if device.children:
629
        for child in device.children:
630
          _Helper(nodes, child)
631

    
632
    all_nodes = set()
633
    all_nodes.add(self.primary_node)
634
    for device in self.disks:
635
      _Helper(all_nodes, device)
636
    return tuple(all_nodes)
637

    
638
  all_nodes = property(_ComputeAllNodes, None, None,
639
                       "List of all nodes of the instance")
640

    
641
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
642
    """Provide a mapping of nodes to LVs this instance owns.
643

644
    This function figures out what logical volumes should belong on
645
    which nodes, recursing through a device tree.
646

647
    @param lvmap: optional dictionary to receive the
648
        'node' : ['lv', ...] data.
649

650
    @return: None if lvmap arg is given, otherwise, a dictionary
651
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
652

653
    """
654
    if node == None:
655
      node = self.primary_node
656

    
657
    if lvmap is None:
658
      lvmap = { node : [] }
659
      ret = lvmap
660
    else:
661
      if not node in lvmap:
662
        lvmap[node] = []
663
      ret = None
664

    
665
    if not devs:
666
      devs = self.disks
667

    
668
    for dev in devs:
669
      if dev.dev_type == constants.LD_LV:
670
        lvmap[node].append(dev.logical_id[1])
671

    
672
      elif dev.dev_type in constants.LDS_DRBD:
673
        if dev.children:
674
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
675
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
676

    
677
      elif dev.children:
678
        self.MapLVsByNode(lvmap, dev.children, node)
679

    
680
    return ret
681

    
682
  def FindDisk(self, idx):
683
    """Find a disk given having a specified index.
684

685
    This is just a wrapper that does validation of the index.
686

687
    @type idx: int
688
    @param idx: the disk index
689
    @rtype: L{Disk}
690
    @return: the corresponding disk
691
    @raise errors.OpPrereqError: when the given index is not valid
692

693
    """
694
    try:
695
      idx = int(idx)
696
      return self.disks[idx]
697
    except ValueError, err:
698
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
699
    except IndexError:
700
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
701
                                 " 0 to %d" % (idx, len(self.disks)))
702

    
703
  def ToDict(self):
704
    """Instance-specific conversion to standard python types.
705

706
    This replaces the children lists of objects with lists of standard
707
    python types.
708

709
    """
710
    bo = super(Instance, self).ToDict()
711

    
712
    for attr in "nics", "disks":
713
      alist = bo.get(attr, None)
714
      if alist:
715
        nlist = self._ContainerToDicts(alist)
716
      else:
717
        nlist = []
718
      bo[attr] = nlist
719
    return bo
720

    
721
  @classmethod
722
  def FromDict(cls, val):
723
    """Custom function for instances.
724

725
    """
726
    obj = super(Instance, cls).FromDict(val)
727
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
728
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
729
    return obj
730

    
731

    
732
class OS(ConfigObject):
733
  """Config object representing an operating system."""
734
  __slots__ = [
735
    "name",
736
    "path",
737
    "status",
738
    "api_versions",
739
    "create_script",
740
    "export_script",
741
    "import_script",
742
    "rename_script",
743
    ]
744

    
745
  @classmethod
746
  def FromInvalidOS(cls, err):
747
    """Create an OS from an InvalidOS error.
748

749
    This routine knows how to convert an InvalidOS error to an OS
750
    object representing the broken OS with a meaningful error message.
751

752
    """
753
    if not isinstance(err, errors.InvalidOS):
754
      raise errors.ProgrammerError("Trying to initialize an OS from an"
755
                                   " invalid object of type %s" % type(err))
756

    
757
    return cls(name=err.args[0], path=err.args[1], status=err.args[2])
758

    
759
  def __nonzero__(self):
760
    return self.status == constants.OS_VALID_STATUS
761

    
762
  __bool__ = __nonzero__
763

    
764

    
765
class Node(TaggableObject):
766
  """Config object representing a node."""
767
  __slots__ = TaggableObject.__slots__ + [
768
    "name",
769
    "primary_ip",
770
    "secondary_ip",
771
    "serial_no",
772
    "master_candidate",
773
    "offline",
774
    "drained",
775
    ]
776

    
777

    
778
class Cluster(TaggableObject):
779
  """Config object representing the cluster."""
780
  __slots__ = TaggableObject.__slots__ + [
781
    "serial_no",
782
    "rsahostkeypub",
783
    "highest_used_port",
784
    "tcpudp_port_pool",
785
    "mac_prefix",
786
    "volume_group_name",
787
    "default_bridge",
788
    "default_hypervisor",
789
    "master_node",
790
    "master_ip",
791
    "master_netdev",
792
    "cluster_name",
793
    "file_storage_dir",
794
    "enabled_hypervisors",
795
    "hvparams",
796
    "beparams",
797
    "nicparams",
798
    "candidate_pool_size",
799
    "modify_etc_hosts",
800
    ]
801

    
802
  def UpgradeConfig(self):
803
    """Fill defaults for missing configuration values.
804

805
    """
806
    if self.hvparams is None:
807
      self.hvparams = constants.HVC_DEFAULTS
808
    else:
809
      for hypervisor in self.hvparams:
810
        self.hvparams[hypervisor] = FillDict(
811
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
812

    
813
    self.beparams = UpgradeGroupedParams(self.beparams,
814
                                         constants.BEC_DEFAULTS)
815
    migrate_default_bridge = not self.nicparams
816
    self.nicparams = UpgradeGroupedParams(self.nicparams,
817
                                          constants.NICC_DEFAULTS)
818
    if migrate_default_bridge:
819
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
820
        self.default_bridge
821

    
822
    if self.modify_etc_hosts is None:
823
      self.modify_etc_hosts = True
824

    
825
  def ToDict(self):
826
    """Custom function for cluster.
827

828
    """
829
    mydict = super(Cluster, self).ToDict()
830
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
831
    return mydict
832

    
833
  @classmethod
834
  def FromDict(cls, val):
835
    """Custom function for cluster.
836

837
    """
838
    obj = super(Cluster, cls).FromDict(val)
839
    if not isinstance(obj.tcpudp_port_pool, set):
840
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
841
    return obj
842

    
843
  def FillHV(self, instance):
844
    """Fill an instance's hvparams dict.
845

846
    @type instance: object
847
    @param instance: the instance parameter to fill
848
    @rtype: dict
849
    @return: a copy of the instance's hvparams with missing keys filled from
850
        the cluster defaults
851

852
    """
853
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
854
                         instance.hvparams)
855

    
856
  def FillBE(self, instance):
857
    """Fill an instance's beparams dict.
858

859
    @type instance: object
860
    @param instance: the instance parameter to fill
861
    @rtype: dict
862
    @return: a copy of the instance's beparams with missing keys filled from
863
        the cluster defaults
864

865
    """
866
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
867
                          instance.beparams)
868

    
869

    
870
class SerializableConfigParser(ConfigParser.SafeConfigParser):
871
  """Simple wrapper over ConfigParse that allows serialization.
872

873
  This class is basically ConfigParser.SafeConfigParser with two
874
  additional methods that allow it to serialize/unserialize to/from a
875
  buffer.
876

877
  """
878
  def Dumps(self):
879
    """Dump this instance and return the string representation."""
880
    buf = StringIO()
881
    self.write(buf)
882
    return buf.getvalue()
883

    
884
  @staticmethod
885
  def Loads(data):
886
    """Load data from a string."""
887
    buf = StringIO(data)
888
    cfp = SerializableConfigParser()
889
    cfp.readfp(buf)
890
    return cfp