Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ c8fcde47

History | View | Annotate | Download (26.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
def FillDict(defaults_dict, custom_dict):
43
    """Basic function to apply settings on top a default dict.
44

45
    @type defaults_dict: dict
46
    @param defaults_dict: dictionary holding the default values
47
    @type custom_dict: dict
48
    @param custom_dict: dictionary holding customized value
49
    @rtype: dict
50
    @return: dict with the 'full' values
51

52
    """
53
    ret_dict = copy.deepcopy(defaults_dict)
54
    ret_dict.update(custom_dict)
55
    return ret_dict
56

    
57

    
58
def UpgradeGroupedParams(target, defaults):
59
  """Update all groups for the target parameter.
60

61
  @type target: dict of dicts
62
  @param target: {group: {parameter: value}}
63
  @type defaults: dict
64
  @param defaults: default parameter values
65

66
  """
67
  if target is None:
68
    target = {constants.PP_DEFAULT: defaults}
69
  else:
70
    for group in target:
71
      target[group] = FillDict(defaults, target[group])
72
  return target
73

    
74

    
75
class ConfigObject(object):
76
  """A generic config object.
77

78
  It has the following properties:
79

80
    - provides somewhat safe recursive unpickling and pickling for its classes
81
    - unset attributes which are defined in slots are always returned
82
      as None instead of raising an error
83

84
  Classes derived from this must always declare __slots__ (we use many
85
  config objects and the memory reduction is useful)
86

87
  """
88
  __slots__ = []
89

    
90
  def __init__(self, **kwargs):
91
    for k, v in kwargs.iteritems():
92
      setattr(self, k, v)
93
    self.UpgradeConfig()
94

    
95
  def __getattr__(self, name):
96
    if name not in self.__slots__:
97
      raise AttributeError("Invalid object attribute %s.%s" %
98
                           (type(self).__name__, name))
99
    return None
100

    
101
  def __setitem__(self, key, value):
102
    if key not in self.__slots__:
103
      raise KeyError(key)
104
    setattr(self, key, value)
105

    
106
  def __getstate__(self):
107
    state = {}
108
    for name in self.__slots__:
109
      if hasattr(self, name):
110
        state[name] = getattr(self, name)
111
    return state
112

    
113
  def __setstate__(self, state):
114
    for name in state:
115
      if name in self.__slots__:
116
        setattr(self, name, state[name])
117

    
118
  def ToDict(self):
119
    """Convert to a dict holding only standard python types.
120

121
    The generic routine just dumps all of this object's attributes in
122
    a dict. It does not work if the class has children who are
123
    ConfigObjects themselves (e.g. the nics list in an Instance), in
124
    which case the object should subclass the function in order to
125
    make sure all objects returned are only standard python types.
126

127
    """
128
    return dict([(k, getattr(self, k, None)) for k in self.__slots__])
129

    
130
  @classmethod
131
  def FromDict(cls, val):
132
    """Create an object from a dictionary.
133

134
    This generic routine takes a dict, instantiates a new instance of
135
    the given class, and sets attributes based on the dict content.
136

137
    As for `ToDict`, this does not work if the class has children
138
    who are ConfigObjects themselves (e.g. the nics list in an
139
    Instance), in which case the object should subclass the function
140
    and alter the objects.
141

142
    """
143
    if not isinstance(val, dict):
144
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
145
                                      " expected dict, got %s" % type(val))
146
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
147
    obj = cls(**val_str)
148
    return obj
149

    
150
  @staticmethod
151
  def _ContainerToDicts(container):
152
    """Convert the elements of a container to standard python types.
153

154
    This method converts a container with elements derived from
155
    ConfigData to standard python types. If the container is a dict,
156
    we don't touch the keys, only the values.
157

158
    """
159
    if isinstance(container, dict):
160
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
161
    elif isinstance(container, (list, tuple, set, frozenset)):
162
      ret = [elem.ToDict() for elem in container]
163
    else:
164
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
165
                      type(container))
166
    return ret
167

    
168
  @staticmethod
169
  def _ContainerFromDicts(source, c_type, e_type):
170
    """Convert a container from standard python types.
171

172
    This method converts a container with standard python types to
173
    ConfigData objects. If the container is a dict, we don't touch the
174
    keys, only the values.
175

176
    """
177
    if not isinstance(c_type, type):
178
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
179
                      " not a type" % type(c_type))
180
    if c_type is dict:
181
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
182
    elif c_type in (list, tuple, set, frozenset):
183
      ret = c_type([e_type.FromDict(elem) for elem in source])
184
    else:
185
      raise TypeError("Invalid container type %s passed to"
186
                      " _ContainerFromDicts" % c_type)
187
    return ret
188

    
189
  def __repr__(self):
190
    """Implement __repr__ for ConfigObjects."""
191
    return repr(self.ToDict())
192

    
193
  def UpgradeConfig(self):
194
    """Fill defaults for missing configuration values.
195

196
    This method will be called at object init time, and its implementation will
197
    be object dependent.
198

199
    """
200
    pass
201

    
202

    
203
class TaggableObject(ConfigObject):
204
  """An generic class supporting tags.
205

206
  """
207
  __slots__ = ConfigObject.__slots__ + ["tags"]
208

    
209
  @staticmethod
210
  def ValidateTag(tag):
211
    """Check if a tag is valid.
212

213
    If the tag is invalid, an errors.TagError will be raised. The
214
    function has no return value.
215

216
    """
217
    if not isinstance(tag, basestring):
218
      raise errors.TagError("Invalid tag type (not a string)")
219
    if len(tag) > constants.MAX_TAG_LEN:
220
      raise errors.TagError("Tag too long (>%d characters)" %
221
                            constants.MAX_TAG_LEN)
222
    if not tag:
223
      raise errors.TagError("Tags cannot be empty")
224
    if not re.match("^[\w.+*/:-]+$", tag):
225
      raise errors.TagError("Tag contains invalid characters")
226

    
227
  def GetTags(self):
228
    """Return the tags list.
229

230
    """
231
    tags = getattr(self, "tags", None)
232
    if tags is None:
233
      tags = self.tags = set()
234
    return tags
235

    
236
  def AddTag(self, tag):
237
    """Add a new tag.
238

239
    """
240
    self.ValidateTag(tag)
241
    tags = self.GetTags()
242
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
243
      raise errors.TagError("Too many tags")
244
    self.GetTags().add(tag)
245

    
246
  def RemoveTag(self, tag):
247
    """Remove a tag.
248

249
    """
250
    self.ValidateTag(tag)
251
    tags = self.GetTags()
252
    try:
253
      tags.remove(tag)
254
    except KeyError:
255
      raise errors.TagError("Tag not found")
256

    
257
  def ToDict(self):
258
    """Taggable-object-specific conversion to standard python types.
259

260
    This replaces the tags set with a list.
261

262
    """
263
    bo = super(TaggableObject, self).ToDict()
264

    
265
    tags = bo.get("tags", None)
266
    if isinstance(tags, set):
267
      bo["tags"] = list(tags)
268
    return bo
269

    
270
  @classmethod
271
  def FromDict(cls, val):
272
    """Custom function for instances.
273

274
    """
275
    obj = super(TaggableObject, cls).FromDict(val)
276
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
277
      obj.tags = set(obj.tags)
278
    return obj
279

    
280

    
281
class ConfigData(ConfigObject):
282
  """Top-level config object."""
283
  __slots__ = ["version", "cluster", "nodes", "instances", "serial_no"]
284

    
285
  def ToDict(self):
286
    """Custom function for top-level config data.
287

288
    This just replaces the list of instances, nodes and the cluster
289
    with standard python types.
290

291
    """
292
    mydict = super(ConfigData, self).ToDict()
293
    mydict["cluster"] = mydict["cluster"].ToDict()
294
    for key in "nodes", "instances":
295
      mydict[key] = self._ContainerToDicts(mydict[key])
296

    
297
    return mydict
298

    
299
  @classmethod
300
  def FromDict(cls, val):
301
    """Custom function for top-level config data
302

303
    """
304
    obj = super(ConfigData, cls).FromDict(val)
305
    obj.cluster = Cluster.FromDict(obj.cluster)
306
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
307
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
308
    return obj
309

    
310

    
311
class NIC(ConfigObject):
312
  """Config object representing a network card."""
313
  __slots__ = ["mac", "ip", "bridge"]
314

    
315
  @classmethod
316
  def CheckParameterSyntax(cls, nicparams):
317
    """Check the given parameters for validity.
318

319
    @type nicparams:  dict
320
    @param nicparams: dictionary with parameter names/value
321
    @raise errors.ConfigurationError: when a parameter is not valid
322

323
    """
324
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
325
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
326
      raise errors.ConfigurationError(err)
327

    
328
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
329
        not nicparams[constants.NIC_LINK]):
330
      err = "Missing bridged nic link"
331
      raise errors.ConfigurationError(err)
332

    
333

    
334
class Disk(ConfigObject):
335
  """Config object representing a block device."""
336
  __slots__ = ["dev_type", "logical_id", "physical_id",
337
               "children", "iv_name", "size", "mode"]
338

    
339
  def CreateOnSecondary(self):
340
    """Test if this device needs to be created on a secondary node."""
341
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
342

    
343
  def AssembleOnSecondary(self):
344
    """Test if this device needs to be assembled on a secondary node."""
345
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
346

    
347
  def OpenOnSecondary(self):
348
    """Test if this device needs to be opened on a secondary node."""
349
    return self.dev_type in (constants.LD_LV,)
350

    
351
  def StaticDevPath(self):
352
    """Return the device path if this device type has a static one.
353

354
    Some devices (LVM for example) live always at the same /dev/ path,
355
    irrespective of their status. For such devices, we return this
356
    path, for others we return None.
357

358
    """
359
    if self.dev_type == constants.LD_LV:
360
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
361
    return None
362

    
363
  def ChildrenNeeded(self):
364
    """Compute the needed number of children for activation.
365

366
    This method will return either -1 (all children) or a positive
367
    number denoting the minimum number of children needed for
368
    activation (only mirrored devices will usually return >=0).
369

370
    Currently, only DRBD8 supports diskless activation (therefore we
371
    return 0), for all other we keep the previous semantics and return
372
    -1.
373

374
    """
375
    if self.dev_type == constants.LD_DRBD8:
376
      return 0
377
    return -1
378

    
379
  def GetNodes(self, node):
380
    """This function returns the nodes this device lives on.
381

382
    Given the node on which the parent of the device lives on (or, in
383
    case of a top-level device, the primary node of the devices'
384
    instance), this function will return a list of nodes on which this
385
    devices needs to (or can) be assembled.
386

387
    """
388
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
389
      result = [node]
390
    elif self.dev_type in constants.LDS_DRBD:
391
      result = [self.logical_id[0], self.logical_id[1]]
392
      if node not in result:
393
        raise errors.ConfigurationError("DRBD device passed unknown node")
394
    else:
395
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
396
    return result
397

    
398
  def ComputeNodeTree(self, parent_node):
399
    """Compute the node/disk tree for this disk and its children.
400

401
    This method, given the node on which the parent disk lives, will
402
    return the list of all (node, disk) pairs which describe the disk
403
    tree in the most compact way. For example, a drbd/lvm stack
404
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
405
    which represents all the top-level devices on the nodes.
406

407
    """
408
    my_nodes = self.GetNodes(parent_node)
409
    result = [(node, self) for node in my_nodes]
410
    if not self.children:
411
      # leaf device
412
      return result
413
    for node in my_nodes:
414
      for child in self.children:
415
        child_result = child.ComputeNodeTree(node)
416
        if len(child_result) == 1:
417
          # child (and all its descendants) is simple, doesn't split
418
          # over multiple hosts, so we don't need to describe it, our
419
          # own entry for this node describes it completely
420
          continue
421
        else:
422
          # check if child nodes differ from my nodes; note that
423
          # subdisk can differ from the child itself, and be instead
424
          # one of its descendants
425
          for subnode, subdisk in child_result:
426
            if subnode not in my_nodes:
427
              result.append((subnode, subdisk))
428
            # otherwise child is under our own node, so we ignore this
429
            # entry (but probably the other results in the list will
430
            # be different)
431
    return result
432

    
433
  def RecordGrow(self, amount):
434
    """Update the size of this disk after growth.
435

436
    This method recurses over the disks's children and updates their
437
    size correspondigly. The method needs to be kept in sync with the
438
    actual algorithms from bdev.
439

440
    """
441
    if self.dev_type == constants.LD_LV:
442
      self.size += amount
443
    elif self.dev_type == constants.LD_DRBD8:
444
      if self.children:
445
        self.children[0].RecordGrow(amount)
446
      self.size += amount
447
    else:
448
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
449
                                   " disk type %s" % self.dev_type)
450

    
451
  def SetPhysicalID(self, target_node, nodes_ip):
452
    """Convert the logical ID to the physical ID.
453

454
    This is used only for drbd, which needs ip/port configuration.
455

456
    The routine descends down and updates its children also, because
457
    this helps when the only the top device is passed to the remote
458
    node.
459

460
    Arguments:
461
      - target_node: the node we wish to configure for
462
      - nodes_ip: a mapping of node name to ip
463

464
    The target_node must exist in in nodes_ip, and must be one of the
465
    nodes in the logical ID for each of the DRBD devices encountered
466
    in the disk tree.
467

468
    """
469
    if self.children:
470
      for child in self.children:
471
        child.SetPhysicalID(target_node, nodes_ip)
472

    
473
    if self.logical_id is None and self.physical_id is not None:
474
      return
475
    if self.dev_type in constants.LDS_DRBD:
476
      pnode, snode, port, pminor, sminor, secret = self.logical_id
477
      if target_node not in (pnode, snode):
478
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
479
                                        target_node)
480
      pnode_ip = nodes_ip.get(pnode, None)
481
      snode_ip = nodes_ip.get(snode, None)
482
      if pnode_ip is None or snode_ip is None:
483
        raise errors.ConfigurationError("Can't find primary or secondary node"
484
                                        " for %s" % str(self))
485
      p_data = (pnode_ip, port)
486
      s_data = (snode_ip, port)
487
      if pnode == target_node:
488
        self.physical_id = p_data + s_data + (pminor, secret)
489
      else: # it must be secondary, we tested above
490
        self.physical_id = s_data + p_data + (sminor, secret)
491
    else:
492
      self.physical_id = self.logical_id
493
    return
494

    
495
  def ToDict(self):
496
    """Disk-specific conversion to standard python types.
497

498
    This replaces the children lists of objects with lists of
499
    standard python types.
500

501
    """
502
    bo = super(Disk, self).ToDict()
503

    
504
    for attr in ("children",):
505
      alist = bo.get(attr, None)
506
      if alist:
507
        bo[attr] = self._ContainerToDicts(alist)
508
    return bo
509

    
510
  @classmethod
511
  def FromDict(cls, val):
512
    """Custom function for Disks
513

514
    """
515
    obj = super(Disk, cls).FromDict(val)
516
    if obj.children:
517
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
518
    if obj.logical_id and isinstance(obj.logical_id, list):
519
      obj.logical_id = tuple(obj.logical_id)
520
    if obj.physical_id and isinstance(obj.physical_id, list):
521
      obj.physical_id = tuple(obj.physical_id)
522
    if obj.dev_type in constants.LDS_DRBD:
523
      # we need a tuple of length six here
524
      if len(obj.logical_id) < 6:
525
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
526
    return obj
527

    
528
  def __str__(self):
529
    """Custom str() formatter for disks.
530

531
    """
532
    if self.dev_type == constants.LD_LV:
533
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
534
    elif self.dev_type in constants.LDS_DRBD:
535
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
536
      val = "<DRBD8("
537
      if self.physical_id is None:
538
        phy = "unconfigured"
539
      else:
540
        phy = ("configured as %s:%s %s:%s" %
541
               (self.physical_id[0], self.physical_id[1],
542
                self.physical_id[2], self.physical_id[3]))
543

    
544
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
545
              (node_a, minor_a, node_b, minor_b, port, phy))
546
      if self.children and self.children.count(None) == 0:
547
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
548
      else:
549
        val += "no local storage"
550
    else:
551
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
552
             (self.dev_type, self.logical_id, self.physical_id, self.children))
553
    if self.iv_name is None:
554
      val += ", not visible"
555
    else:
556
      val += ", visible as /dev/%s" % self.iv_name
557
    if isinstance(self.size, int):
558
      val += ", size=%dm)>" % self.size
559
    else:
560
      val += ", size='%s')>" % (self.size,)
561
    return val
562

    
563
  def Verify(self):
564
    """Checks that this disk is correctly configured.
565

566
    """
567
    errors = []
568
    if self.mode not in constants.DISK_ACCESS_SET:
569
      errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
570
    return errors
571

    
572

    
573
class Instance(TaggableObject):
574
  """Config object representing an instance."""
575
  __slots__ = TaggableObject.__slots__ + [
576
    "name",
577
    "primary_node",
578
    "os",
579
    "hypervisor",
580
    "hvparams",
581
    "beparams",
582
    "admin_up",
583
    "nics",
584
    "disks",
585
    "disk_template",
586
    "network_port",
587
    "serial_no",
588
    ]
589

    
590
  def _ComputeSecondaryNodes(self):
591
    """Compute the list of secondary nodes.
592

593
    This is a simple wrapper over _ComputeAllNodes.
594

595
    """
596
    all_nodes = set(self._ComputeAllNodes())
597
    all_nodes.discard(self.primary_node)
598
    return tuple(all_nodes)
599

    
600
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
601
                             "List of secondary nodes")
602

    
603
  def _ComputeAllNodes(self):
604
    """Compute the list of all nodes.
605

606
    Since the data is already there (in the drbd disks), keeping it as
607
    a separate normal attribute is redundant and if not properly
608
    synchronised can cause problems. Thus it's better to compute it
609
    dynamically.
610

611
    """
612
    def _Helper(nodes, device):
613
      """Recursively computes nodes given a top device."""
614
      if device.dev_type in constants.LDS_DRBD:
615
        nodea, nodeb = device.logical_id[:2]
616
        nodes.add(nodea)
617
        nodes.add(nodeb)
618
      if device.children:
619
        for child in device.children:
620
          _Helper(nodes, child)
621

    
622
    all_nodes = set()
623
    all_nodes.add(self.primary_node)
624
    for device in self.disks:
625
      _Helper(all_nodes, device)
626
    return tuple(all_nodes)
627

    
628
  all_nodes = property(_ComputeAllNodes, None, None,
629
                       "List of all nodes of the instance")
630

    
631
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
632
    """Provide a mapping of nodes to LVs this instance owns.
633

634
    This function figures out what logical volumes should belong on
635
    which nodes, recursing through a device tree.
636

637
    @param lvmap: optional dictionary to receive the
638
        'node' : ['lv', ...] data.
639

640
    @return: None if lvmap arg is given, otherwise, a dictionary
641
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
642

643
    """
644
    if node == None:
645
      node = self.primary_node
646

    
647
    if lvmap is None:
648
      lvmap = { node : [] }
649
      ret = lvmap
650
    else:
651
      if not node in lvmap:
652
        lvmap[node] = []
653
      ret = None
654

    
655
    if not devs:
656
      devs = self.disks
657

    
658
    for dev in devs:
659
      if dev.dev_type == constants.LD_LV:
660
        lvmap[node].append(dev.logical_id[1])
661

    
662
      elif dev.dev_type in constants.LDS_DRBD:
663
        if dev.children:
664
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
665
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
666

    
667
      elif dev.children:
668
        self.MapLVsByNode(lvmap, dev.children, node)
669

    
670
    return ret
671

    
672
  def FindDisk(self, idx):
673
    """Find a disk given having a specified index.
674

675
    This is just a wrapper that does validation of the index.
676

677
    @type idx: int
678
    @param idx: the disk index
679
    @rtype: L{Disk}
680
    @return: the corresponding disk
681
    @raise errors.OpPrereqError: when the given index is not valid
682

683
    """
684
    try:
685
      idx = int(idx)
686
      return self.disks[idx]
687
    except ValueError, err:
688
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
689
    except IndexError:
690
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
691
                                 " 0 to %d" % (idx, len(self.disks)))
692

    
693
  def ToDict(self):
694
    """Instance-specific conversion to standard python types.
695

696
    This replaces the children lists of objects with lists of standard
697
    python types.
698

699
    """
700
    bo = super(Instance, self).ToDict()
701

    
702
    for attr in "nics", "disks":
703
      alist = bo.get(attr, None)
704
      if alist:
705
        nlist = self._ContainerToDicts(alist)
706
      else:
707
        nlist = []
708
      bo[attr] = nlist
709
    return bo
710

    
711
  @classmethod
712
  def FromDict(cls, val):
713
    """Custom function for instances.
714

715
    """
716
    obj = super(Instance, cls).FromDict(val)
717
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
718
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
719
    return obj
720

    
721

    
722
class OS(ConfigObject):
723
  """Config object representing an operating system."""
724
  __slots__ = [
725
    "name",
726
    "path",
727
    "status",
728
    "api_versions",
729
    "create_script",
730
    "export_script",
731
    "import_script",
732
    "rename_script",
733
    ]
734

    
735
  @classmethod
736
  def FromInvalidOS(cls, err):
737
    """Create an OS from an InvalidOS error.
738

739
    This routine knows how to convert an InvalidOS error to an OS
740
    object representing the broken OS with a meaningful error message.
741

742
    """
743
    if not isinstance(err, errors.InvalidOS):
744
      raise errors.ProgrammerError("Trying to initialize an OS from an"
745
                                   " invalid object of type %s" % type(err))
746

    
747
    return cls(name=err.args[0], path=err.args[1], status=err.args[2])
748

    
749
  def __nonzero__(self):
750
    return self.status == constants.OS_VALID_STATUS
751

    
752
  __bool__ = __nonzero__
753

    
754

    
755
class Node(TaggableObject):
756
  """Config object representing a node."""
757
  __slots__ = TaggableObject.__slots__ + [
758
    "name",
759
    "primary_ip",
760
    "secondary_ip",
761
    "serial_no",
762
    "master_candidate",
763
    "offline",
764
    "drained",
765
    ]
766

    
767

    
768
class Cluster(TaggableObject):
769
  """Config object representing the cluster."""
770
  __slots__ = TaggableObject.__slots__ + [
771
    "serial_no",
772
    "rsahostkeypub",
773
    "highest_used_port",
774
    "tcpudp_port_pool",
775
    "mac_prefix",
776
    "volume_group_name",
777
    "default_bridge",
778
    "default_hypervisor",
779
    "master_node",
780
    "master_ip",
781
    "master_netdev",
782
    "cluster_name",
783
    "file_storage_dir",
784
    "enabled_hypervisors",
785
    "hvparams",
786
    "beparams",
787
    "nicparams",
788
    "candidate_pool_size",
789
    "modify_etc_hosts",
790
    ]
791

    
792
  def UpgradeConfig(self):
793
    """Fill defaults for missing configuration values.
794

795
    """
796
    if self.hvparams is None:
797
      self.hvparams = constants.HVC_DEFAULTS
798
    else:
799
      for hypervisor in self.hvparams:
800
        self.hvparams[hypervisor] = FillDict(
801
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
802

    
803
    self.beparams = UpgradeGroupedParams(self.beparams,
804
                                         constants.BEC_DEFAULTS)
805
    migrate_default_bridge = not self.nicparams
806
    self.nicparams = UpgradeGroupedParams(self.nicparams,
807
                                          constants.NICC_DEFAULTS)
808
    if migrate_default_bridge:
809
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
810
        self.default_bridge
811

    
812
    if self.modify_etc_hosts is None:
813
      self.modify_etc_hosts = True
814

    
815
  def ToDict(self):
816
    """Custom function for cluster.
817

818
    """
819
    mydict = super(Cluster, self).ToDict()
820
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
821
    return mydict
822

    
823
  @classmethod
824
  def FromDict(cls, val):
825
    """Custom function for cluster.
826

827
    """
828
    obj = super(Cluster, cls).FromDict(val)
829
    if not isinstance(obj.tcpudp_port_pool, set):
830
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
831
    return obj
832

    
833
  def FillHV(self, instance):
834
    """Fill an instance's hvparams dict.
835

836
    @type instance: object
837
    @param instance: the instance parameter to fill
838
    @rtype: dict
839
    @return: a copy of the instance's hvparams with missing keys filled from
840
        the cluster defaults
841

842
    """
843
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
844
                         instance.hvparams)
845

    
846
  def FillBE(self, instance):
847
    """Fill an instance's beparams dict.
848

849
    @type instance: object
850
    @param instance: the instance parameter to fill
851
    @rtype: dict
852
    @return: a copy of the instance's beparams with missing keys filled from
853
        the cluster defaults
854

855
    """
856
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
857
                          instance.beparams)
858

    
859

    
860
class SerializableConfigParser(ConfigParser.SafeConfigParser):
861
  """Simple wrapper over ConfigParse that allows serialization.
862

863
  This class is basically ConfigParser.SafeConfigParser with two
864
  additional methods that allow it to serialize/unserialize to/from a
865
  buffer.
866

867
  """
868
  def Dumps(self):
869
    """Dump this instance and return the string representation."""
870
    buf = StringIO()
871
    self.write(buf)
872
    return buf.getvalue()
873

    
874
  @staticmethod
875
  def Loads(data):
876
    """Load data from a string."""
877
    buf = StringIO(data)
878
    cfp = SerializableConfigParser()
879
    cfp.readfp(buf)
880
    return cfp