Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ b5e5632e

History | View | Annotate | Download (28.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29

    
30
import ConfigParser
31
import re
32
import copy
33
from cStringIO import StringIO
34

    
35
from ganeti import errors
36
from ganeti import constants
37

    
38

    
39
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
40
           "OS", "Node", "Cluster", "FillDict"]
41

    
42
_TIMESTAMPS = ["ctime", "mtime"]
43
_UUID = ["uuid"]
44

    
45
def FillDict(defaults_dict, custom_dict):
46
  """Basic function to apply settings on top a default dict.
47

48
  @type defaults_dict: dict
49
  @param defaults_dict: dictionary holding the default values
50
  @type custom_dict: dict
51
  @param custom_dict: dictionary holding customized value
52
  @rtype: dict
53
  @return: dict with the 'full' values
54

55
  """
56
  ret_dict = copy.deepcopy(defaults_dict)
57
  ret_dict.update(custom_dict)
58
  return ret_dict
59

    
60

    
61
def UpgradeGroupedParams(target, defaults):
62
  """Update all groups for the target parameter.
63

64
  @type target: dict of dicts
65
  @param target: {group: {parameter: value}}
66
  @type defaults: dict
67
  @param defaults: default parameter values
68

69
  """
70
  if target is None:
71
    target = {constants.PP_DEFAULT: defaults}
72
  else:
73
    for group in target:
74
      target[group] = FillDict(defaults, target[group])
75
  return target
76

    
77

    
78
class ConfigObject(object):
79
  """A generic config object.
80

81
  It has the following properties:
82

83
    - provides somewhat safe recursive unpickling and pickling for its classes
84
    - unset attributes which are defined in slots are always returned
85
      as None instead of raising an error
86

87
  Classes derived from this must always declare __slots__ (we use many
88
  config objects and the memory reduction is useful)
89

90
  """
91
  __slots__ = []
92

    
93
  def __init__(self, **kwargs):
94
    for k, v in kwargs.iteritems():
95
      setattr(self, k, v)
96

    
97
  def __getattr__(self, name):
98
    if name not in self.__slots__:
99
      raise AttributeError("Invalid object attribute %s.%s" %
100
                           (type(self).__name__, name))
101
    return None
102

    
103
  def __setstate__(self, state):
104
    for name in state:
105
      if name in self.__slots__:
106
        setattr(self, name, state[name])
107

    
108
  def ToDict(self):
109
    """Convert to a dict holding only standard python types.
110

111
    The generic routine just dumps all of this object's attributes in
112
    a dict. It does not work if the class has children who are
113
    ConfigObjects themselves (e.g. the nics list in an Instance), in
114
    which case the object should subclass the function in order to
115
    make sure all objects returned are only standard python types.
116

117
    """
118
    result = {}
119
    for name in self.__slots__:
120
      value = getattr(self, name, None)
121
      if value is not None:
122
        result[name] = value
123
    return result
124

    
125
  __getstate__ = ToDict
126

    
127
  @classmethod
128
  def FromDict(cls, val):
129
    """Create an object from a dictionary.
130

131
    This generic routine takes a dict, instantiates a new instance of
132
    the given class, and sets attributes based on the dict content.
133

134
    As for `ToDict`, this does not work if the class has children
135
    who are ConfigObjects themselves (e.g. the nics list in an
136
    Instance), in which case the object should subclass the function
137
    and alter the objects.
138

139
    """
140
    if not isinstance(val, dict):
141
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
142
                                      " expected dict, got %s" % type(val))
143
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
144
    obj = cls(**val_str)
145
    return obj
146

    
147
  @staticmethod
148
  def _ContainerToDicts(container):
149
    """Convert the elements of a container to standard python types.
150

151
    This method converts a container with elements derived from
152
    ConfigData to standard python types. If the container is a dict,
153
    we don't touch the keys, only the values.
154

155
    """
156
    if isinstance(container, dict):
157
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
158
    elif isinstance(container, (list, tuple, set, frozenset)):
159
      ret = [elem.ToDict() for elem in container]
160
    else:
161
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
162
                      type(container))
163
    return ret
164

    
165
  @staticmethod
166
  def _ContainerFromDicts(source, c_type, e_type):
167
    """Convert a container from standard python types.
168

169
    This method converts a container with standard python types to
170
    ConfigData objects. If the container is a dict, we don't touch the
171
    keys, only the values.
172

173
    """
174
    if not isinstance(c_type, type):
175
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
176
                      " not a type" % type(c_type))
177
    if c_type is dict:
178
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
179
    elif c_type in (list, tuple, set, frozenset):
180
      ret = c_type([e_type.FromDict(elem) for elem in source])
181
    else:
182
      raise TypeError("Invalid container type %s passed to"
183
                      " _ContainerFromDicts" % c_type)
184
    return ret
185

    
186
  def Copy(self):
187
    """Makes a deep copy of the current object and its children.
188

189
    """
190
    dict_form = self.ToDict()
191
    clone_obj = self.__class__.FromDict(dict_form)
192
    return clone_obj
193

    
194
  def __repr__(self):
195
    """Implement __repr__ for ConfigObjects."""
196
    return repr(self.ToDict())
197

    
198
  def UpgradeConfig(self):
199
    """Fill defaults for missing configuration values.
200

201
    This method will be called at configuration load time, and its
202
    implementation will be object dependent.
203

204
    """
205
    pass
206

    
207

    
208
class TaggableObject(ConfigObject):
209
  """An generic class supporting tags.
210

211
  """
212
  __slots__ = ConfigObject.__slots__ + ["tags"]
213
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
214

    
215
  @classmethod
216
  def ValidateTag(cls, tag):
217
    """Check if a tag is valid.
218

219
    If the tag is invalid, an errors.TagError will be raised. The
220
    function has no return value.
221

222
    """
223
    if not isinstance(tag, basestring):
224
      raise errors.TagError("Invalid tag type (not a string)")
225
    if len(tag) > constants.MAX_TAG_LEN:
226
      raise errors.TagError("Tag too long (>%d characters)" %
227
                            constants.MAX_TAG_LEN)
228
    if not tag:
229
      raise errors.TagError("Tags cannot be empty")
230
    if not cls.VALID_TAG_RE.match(tag):
231
      raise errors.TagError("Tag contains invalid characters")
232

    
233
  def GetTags(self):
234
    """Return the tags list.
235

236
    """
237
    tags = getattr(self, "tags", None)
238
    if tags is None:
239
      tags = self.tags = set()
240
    return tags
241

    
242
  def AddTag(self, tag):
243
    """Add a new tag.
244

245
    """
246
    self.ValidateTag(tag)
247
    tags = self.GetTags()
248
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
249
      raise errors.TagError("Too many tags")
250
    self.GetTags().add(tag)
251

    
252
  def RemoveTag(self, tag):
253
    """Remove a tag.
254

255
    """
256
    self.ValidateTag(tag)
257
    tags = self.GetTags()
258
    try:
259
      tags.remove(tag)
260
    except KeyError:
261
      raise errors.TagError("Tag not found")
262

    
263
  def ToDict(self):
264
    """Taggable-object-specific conversion to standard python types.
265

266
    This replaces the tags set with a list.
267

268
    """
269
    bo = super(TaggableObject, self).ToDict()
270

    
271
    tags = bo.get("tags", None)
272
    if isinstance(tags, set):
273
      bo["tags"] = list(tags)
274
    return bo
275

    
276
  @classmethod
277
  def FromDict(cls, val):
278
    """Custom function for instances.
279

280
    """
281
    obj = super(TaggableObject, cls).FromDict(val)
282
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
283
      obj.tags = set(obj.tags)
284
    return obj
285

    
286

    
287
class ConfigData(ConfigObject):
288
  """Top-level config object."""
289
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
290
               _TIMESTAMPS)
291

    
292
  def ToDict(self):
293
    """Custom function for top-level config data.
294

295
    This just replaces the list of instances, nodes and the cluster
296
    with standard python types.
297

298
    """
299
    mydict = super(ConfigData, self).ToDict()
300
    mydict["cluster"] = mydict["cluster"].ToDict()
301
    for key in "nodes", "instances":
302
      mydict[key] = self._ContainerToDicts(mydict[key])
303

    
304
    return mydict
305

    
306
  @classmethod
307
  def FromDict(cls, val):
308
    """Custom function for top-level config data
309

310
    """
311
    obj = super(ConfigData, cls).FromDict(val)
312
    obj.cluster = Cluster.FromDict(obj.cluster)
313
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
314
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
315
    return obj
316

    
317
  def UpgradeConfig(self):
318
    """Fill defaults for missing configuration values.
319

320
    """
321
    self.cluster.UpgradeConfig()
322
    for node in self.nodes.values():
323
      node.UpgradeConfig()
324
    for instance in self.instances.values():
325
      instance.UpgradeConfig()
326

    
327

    
328
class NIC(ConfigObject):
329
  """Config object representing a network card."""
330
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
331

    
332
  @classmethod
333
  def CheckParameterSyntax(cls, nicparams):
334
    """Check the given parameters for validity.
335

336
    @type nicparams:  dict
337
    @param nicparams: dictionary with parameter names/value
338
    @raise errors.ConfigurationError: when a parameter is not valid
339

340
    """
341
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
342
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
343
      raise errors.ConfigurationError(err)
344

    
345
    if (nicparams[constants.NIC_MODE] is constants.NIC_MODE_BRIDGED and
346
        not nicparams[constants.NIC_LINK]):
347
      err = "Missing bridged nic link"
348
      raise errors.ConfigurationError(err)
349

    
350
  def UpgradeConfig(self):
351
    """Fill defaults for missing configuration values.
352

353
    """
354
    if self.nicparams is None:
355
      self.nicparams = {}
356
      if self.bridge is not None:
357
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
358
        self.nicparams[constants.NIC_LINK] = self.bridge
359
    # bridge is no longer used it 2.1. The slot is left there to support
360
    # upgrading, but will be removed in 2.2
361
    if self.bridge is not None:
362
      self.bridge = None
363

    
364

    
365
class Disk(ConfigObject):
366
  """Config object representing a block device."""
367
  __slots__ = ["dev_type", "logical_id", "physical_id",
368
               "children", "iv_name", "size", "mode"]
369

    
370
  def CreateOnSecondary(self):
371
    """Test if this device needs to be created on a secondary node."""
372
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
373

    
374
  def AssembleOnSecondary(self):
375
    """Test if this device needs to be assembled on a secondary node."""
376
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
377

    
378
  def OpenOnSecondary(self):
379
    """Test if this device needs to be opened on a secondary node."""
380
    return self.dev_type in (constants.LD_LV,)
381

    
382
  def StaticDevPath(self):
383
    """Return the device path if this device type has a static one.
384

385
    Some devices (LVM for example) live always at the same /dev/ path,
386
    irrespective of their status. For such devices, we return this
387
    path, for others we return None.
388

389
    """
390
    if self.dev_type == constants.LD_LV:
391
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
392
    return None
393

    
394
  def ChildrenNeeded(self):
395
    """Compute the needed number of children for activation.
396

397
    This method will return either -1 (all children) or a positive
398
    number denoting the minimum number of children needed for
399
    activation (only mirrored devices will usually return >=0).
400

401
    Currently, only DRBD8 supports diskless activation (therefore we
402
    return 0), for all other we keep the previous semantics and return
403
    -1.
404

405
    """
406
    if self.dev_type == constants.LD_DRBD8:
407
      return 0
408
    return -1
409

    
410
  def GetNodes(self, node):
411
    """This function returns the nodes this device lives on.
412

413
    Given the node on which the parent of the device lives on (or, in
414
    case of a top-level device, the primary node of the devices'
415
    instance), this function will return a list of nodes on which this
416
    devices needs to (or can) be assembled.
417

418
    """
419
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
420
      result = [node]
421
    elif self.dev_type in constants.LDS_DRBD:
422
      result = [self.logical_id[0], self.logical_id[1]]
423
      if node not in result:
424
        raise errors.ConfigurationError("DRBD device passed unknown node")
425
    else:
426
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
427
    return result
428

    
429
  def ComputeNodeTree(self, parent_node):
430
    """Compute the node/disk tree for this disk and its children.
431

432
    This method, given the node on which the parent disk lives, will
433
    return the list of all (node, disk) pairs which describe the disk
434
    tree in the most compact way. For example, a drbd/lvm stack
435
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
436
    which represents all the top-level devices on the nodes.
437

438
    """
439
    my_nodes = self.GetNodes(parent_node)
440
    result = [(node, self) for node in my_nodes]
441
    if not self.children:
442
      # leaf device
443
      return result
444
    for node in my_nodes:
445
      for child in self.children:
446
        child_result = child.ComputeNodeTree(node)
447
        if len(child_result) == 1:
448
          # child (and all its descendants) is simple, doesn't split
449
          # over multiple hosts, so we don't need to describe it, our
450
          # own entry for this node describes it completely
451
          continue
452
        else:
453
          # check if child nodes differ from my nodes; note that
454
          # subdisk can differ from the child itself, and be instead
455
          # one of its descendants
456
          for subnode, subdisk in child_result:
457
            if subnode not in my_nodes:
458
              result.append((subnode, subdisk))
459
            # otherwise child is under our own node, so we ignore this
460
            # entry (but probably the other results in the list will
461
            # be different)
462
    return result
463

    
464
  def RecordGrow(self, amount):
465
    """Update the size of this disk after growth.
466

467
    This method recurses over the disks's children and updates their
468
    size correspondigly. The method needs to be kept in sync with the
469
    actual algorithms from bdev.
470

471
    """
472
    if self.dev_type == constants.LD_LV:
473
      self.size += amount
474
    elif self.dev_type == constants.LD_DRBD8:
475
      if self.children:
476
        self.children[0].RecordGrow(amount)
477
      self.size += amount
478
    else:
479
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
480
                                   " disk type %s" % self.dev_type)
481

    
482
  def UnsetSize(self):
483
    """Sets recursively the size to zero for the disk and its children.
484

485
    """
486
    if self.children:
487
      for child in self.children:
488
        child.UnsetSize()
489
    self.size = 0
490

    
491
  def SetPhysicalID(self, target_node, nodes_ip):
492
    """Convert the logical ID to the physical ID.
493

494
    This is used only for drbd, which needs ip/port configuration.
495

496
    The routine descends down and updates its children also, because
497
    this helps when the only the top device is passed to the remote
498
    node.
499

500
    Arguments:
501
      - target_node: the node we wish to configure for
502
      - nodes_ip: a mapping of node name to ip
503

504
    The target_node must exist in in nodes_ip, and must be one of the
505
    nodes in the logical ID for each of the DRBD devices encountered
506
    in the disk tree.
507

508
    """
509
    if self.children:
510
      for child in self.children:
511
        child.SetPhysicalID(target_node, nodes_ip)
512

    
513
    if self.logical_id is None and self.physical_id is not None:
514
      return
515
    if self.dev_type in constants.LDS_DRBD:
516
      pnode, snode, port, pminor, sminor, secret = self.logical_id
517
      if target_node not in (pnode, snode):
518
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
519
                                        target_node)
520
      pnode_ip = nodes_ip.get(pnode, None)
521
      snode_ip = nodes_ip.get(snode, None)
522
      if pnode_ip is None or snode_ip is None:
523
        raise errors.ConfigurationError("Can't find primary or secondary node"
524
                                        " for %s" % str(self))
525
      p_data = (pnode_ip, port)
526
      s_data = (snode_ip, port)
527
      if pnode == target_node:
528
        self.physical_id = p_data + s_data + (pminor, secret)
529
      else: # it must be secondary, we tested above
530
        self.physical_id = s_data + p_data + (sminor, secret)
531
    else:
532
      self.physical_id = self.logical_id
533
    return
534

    
535
  def ToDict(self):
536
    """Disk-specific conversion to standard python types.
537

538
    This replaces the children lists of objects with lists of
539
    standard python types.
540

541
    """
542
    bo = super(Disk, self).ToDict()
543

    
544
    for attr in ("children",):
545
      alist = bo.get(attr, None)
546
      if alist:
547
        bo[attr] = self._ContainerToDicts(alist)
548
    return bo
549

    
550
  @classmethod
551
  def FromDict(cls, val):
552
    """Custom function for Disks
553

554
    """
555
    obj = super(Disk, cls).FromDict(val)
556
    if obj.children:
557
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
558
    if obj.logical_id and isinstance(obj.logical_id, list):
559
      obj.logical_id = tuple(obj.logical_id)
560
    if obj.physical_id and isinstance(obj.physical_id, list):
561
      obj.physical_id = tuple(obj.physical_id)
562
    if obj.dev_type in constants.LDS_DRBD:
563
      # we need a tuple of length six here
564
      if len(obj.logical_id) < 6:
565
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
566
    return obj
567

    
568
  def __str__(self):
569
    """Custom str() formatter for disks.
570

571
    """
572
    if self.dev_type == constants.LD_LV:
573
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
574
    elif self.dev_type in constants.LDS_DRBD:
575
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
576
      val = "<DRBD8("
577
      if self.physical_id is None:
578
        phy = "unconfigured"
579
      else:
580
        phy = ("configured as %s:%s %s:%s" %
581
               (self.physical_id[0], self.physical_id[1],
582
                self.physical_id[2], self.physical_id[3]))
583

    
584
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
585
              (node_a, minor_a, node_b, minor_b, port, phy))
586
      if self.children and self.children.count(None) == 0:
587
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
588
      else:
589
        val += "no local storage"
590
    else:
591
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
592
             (self.dev_type, self.logical_id, self.physical_id, self.children))
593
    if self.iv_name is None:
594
      val += ", not visible"
595
    else:
596
      val += ", visible as /dev/%s" % self.iv_name
597
    if isinstance(self.size, int):
598
      val += ", size=%dm)>" % self.size
599
    else:
600
      val += ", size='%s')>" % (self.size,)
601
    return val
602

    
603
  def Verify(self):
604
    """Checks that this disk is correctly configured.
605

606
    """
607
    all_errors = []
608
    if self.mode not in constants.DISK_ACCESS_SET:
609
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
610
    return all_errors
611

    
612
  def UpgradeConfig(self):
613
    """Fill defaults for missing configuration values.
614

615
    """
616
    if self.children:
617
      for child in self.children:
618
        child.UpgradeConfig()
619
    # add here config upgrade for this disk
620

    
621

    
622
class Instance(TaggableObject):
623
  """Config object representing an instance."""
624
  __slots__ = TaggableObject.__slots__ + [
625
    "name",
626
    "primary_node",
627
    "os",
628
    "hypervisor",
629
    "hvparams",
630
    "beparams",
631
    "admin_up",
632
    "nics",
633
    "disks",
634
    "disk_template",
635
    "network_port",
636
    "serial_no",
637
    ] + _TIMESTAMPS + _UUID
638

    
639
  def _ComputeSecondaryNodes(self):
640
    """Compute the list of secondary nodes.
641

642
    This is a simple wrapper over _ComputeAllNodes.
643

644
    """
645
    all_nodes = set(self._ComputeAllNodes())
646
    all_nodes.discard(self.primary_node)
647
    return tuple(all_nodes)
648

    
649
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
650
                             "List of secondary nodes")
651

    
652
  def _ComputeAllNodes(self):
653
    """Compute the list of all nodes.
654

655
    Since the data is already there (in the drbd disks), keeping it as
656
    a separate normal attribute is redundant and if not properly
657
    synchronised can cause problems. Thus it's better to compute it
658
    dynamically.
659

660
    """
661
    def _Helper(nodes, device):
662
      """Recursively computes nodes given a top device."""
663
      if device.dev_type in constants.LDS_DRBD:
664
        nodea, nodeb = device.logical_id[:2]
665
        nodes.add(nodea)
666
        nodes.add(nodeb)
667
      if device.children:
668
        for child in device.children:
669
          _Helper(nodes, child)
670

    
671
    all_nodes = set()
672
    all_nodes.add(self.primary_node)
673
    for device in self.disks:
674
      _Helper(all_nodes, device)
675
    return tuple(all_nodes)
676

    
677
  all_nodes = property(_ComputeAllNodes, None, None,
678
                       "List of all nodes of the instance")
679

    
680
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
681
    """Provide a mapping of nodes to LVs this instance owns.
682

683
    This function figures out what logical volumes should belong on
684
    which nodes, recursing through a device tree.
685

686
    @param lvmap: optional dictionary to receive the
687
        'node' : ['lv', ...] data.
688

689
    @return: None if lvmap arg is given, otherwise, a dictionary
690
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
691

692
    """
693
    if node == None:
694
      node = self.primary_node
695

    
696
    if lvmap is None:
697
      lvmap = { node : [] }
698
      ret = lvmap
699
    else:
700
      if not node in lvmap:
701
        lvmap[node] = []
702
      ret = None
703

    
704
    if not devs:
705
      devs = self.disks
706

    
707
    for dev in devs:
708
      if dev.dev_type == constants.LD_LV:
709
        lvmap[node].append(dev.logical_id[1])
710

    
711
      elif dev.dev_type in constants.LDS_DRBD:
712
        if dev.children:
713
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
714
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
715

    
716
      elif dev.children:
717
        self.MapLVsByNode(lvmap, dev.children, node)
718

    
719
    return ret
720

    
721
  def FindDisk(self, idx):
722
    """Find a disk given having a specified index.
723

724
    This is just a wrapper that does validation of the index.
725

726
    @type idx: int
727
    @param idx: the disk index
728
    @rtype: L{Disk}
729
    @return: the corresponding disk
730
    @raise errors.OpPrereqError: when the given index is not valid
731

732
    """
733
    try:
734
      idx = int(idx)
735
      return self.disks[idx]
736
    except ValueError, err:
737
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err))
738
    except IndexError:
739
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
740
                                 " 0 to %d" % (idx, len(self.disks)))
741

    
742
  def ToDict(self):
743
    """Instance-specific conversion to standard python types.
744

745
    This replaces the children lists of objects with lists of standard
746
    python types.
747

748
    """
749
    bo = super(Instance, self).ToDict()
750

    
751
    for attr in "nics", "disks":
752
      alist = bo.get(attr, None)
753
      if alist:
754
        nlist = self._ContainerToDicts(alist)
755
      else:
756
        nlist = []
757
      bo[attr] = nlist
758
    return bo
759

    
760
  @classmethod
761
  def FromDict(cls, val):
762
    """Custom function for instances.
763

764
    """
765
    obj = super(Instance, cls).FromDict(val)
766
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
767
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
768
    return obj
769

    
770
  def UpgradeConfig(self):
771
    """Fill defaults for missing configuration values.
772

773
    """
774
    for nic in self.nics:
775
      nic.UpgradeConfig()
776
    for disk in self.disks:
777
      disk.UpgradeConfig()
778

    
779

    
780
class OS(ConfigObject):
781
  """Config object representing an operating system."""
782
  __slots__ = [
783
    "name",
784
    "path",
785
    "api_versions",
786
    "create_script",
787
    "export_script",
788
    "import_script",
789
    "rename_script",
790
    "supported_variants",
791
    ]
792

    
793

    
794
class Node(TaggableObject):
795
  """Config object representing a node."""
796
  __slots__ = TaggableObject.__slots__ + [
797
    "name",
798
    "primary_ip",
799
    "secondary_ip",
800
    "serial_no",
801
    "master_candidate",
802
    "offline",
803
    "drained",
804
    ] + _TIMESTAMPS + _UUID
805

    
806

    
807
class Cluster(TaggableObject):
808
  """Config object representing the cluster."""
809
  __slots__ = TaggableObject.__slots__ + [
810
    "serial_no",
811
    "rsahostkeypub",
812
    "highest_used_port",
813
    "tcpudp_port_pool",
814
    "mac_prefix",
815
    "volume_group_name",
816
    "default_bridge",
817
    "default_hypervisor",
818
    "master_node",
819
    "master_ip",
820
    "master_netdev",
821
    "cluster_name",
822
    "file_storage_dir",
823
    "enabled_hypervisors",
824
    "hvparams",
825
    "beparams",
826
    "nicparams",
827
    "candidate_pool_size",
828
    "modify_etc_hosts",
829
    "modify_ssh_setup",
830
    ] + _TIMESTAMPS + _UUID
831

    
832
  def UpgradeConfig(self):
833
    """Fill defaults for missing configuration values.
834

835
    """
836
    if self.hvparams is None:
837
      self.hvparams = constants.HVC_DEFAULTS
838
    else:
839
      for hypervisor in self.hvparams:
840
        self.hvparams[hypervisor] = FillDict(
841
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
842

    
843
    self.beparams = UpgradeGroupedParams(self.beparams,
844
                                         constants.BEC_DEFAULTS)
845
    migrate_default_bridge = not self.nicparams
846
    self.nicparams = UpgradeGroupedParams(self.nicparams,
847
                                          constants.NICC_DEFAULTS)
848
    if migrate_default_bridge:
849
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
850
        self.default_bridge
851

    
852
    if self.modify_etc_hosts is None:
853
      self.modify_etc_hosts = True
854

    
855
    if self.modify_ssh_setup is None:
856
      self.modify_ssh_setup = True
857

    
858
    # default_bridge is no longer used it 2.1. The slot is left there to
859
    # support auto-upgrading, but will be removed in 2.2
860
    if self.default_bridge is not None:
861
      self.default_bridge = None
862

    
863
    # default_hypervisor is just the first enabled one in 2.1
864
    if self.default_hypervisor is not None:
865
      self.enabled_hypervisors = ([self.default_hypervisor] +
866
        [hvname for hvname in self.enabled_hypervisors
867
         if hvname != self.default_hypervisor])
868
      self.default_hypervisor = None
869

    
870
  def ToDict(self):
871
    """Custom function for cluster.
872

873
    """
874
    mydict = super(Cluster, self).ToDict()
875
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
876
    return mydict
877

    
878
  @classmethod
879
  def FromDict(cls, val):
880
    """Custom function for cluster.
881

882
    """
883
    obj = super(Cluster, cls).FromDict(val)
884
    if not isinstance(obj.tcpudp_port_pool, set):
885
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
886
    return obj
887

    
888
  def FillHV(self, instance):
889
    """Fill an instance's hvparams dict.
890

891
    @type instance: L{objects.Instance}
892
    @param instance: the instance parameter to fill
893
    @rtype: dict
894
    @return: a copy of the instance's hvparams with missing keys filled from
895
        the cluster defaults
896

897
    """
898
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
899
                         instance.hvparams)
900

    
901
  def FillBE(self, instance):
902
    """Fill an instance's beparams dict.
903

904
    @type instance: L{objects.Instance}
905
    @param instance: the instance parameter to fill
906
    @rtype: dict
907
    @return: a copy of the instance's beparams with missing keys filled from
908
        the cluster defaults
909

910
    """
911
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
912
                          instance.beparams)
913

    
914

    
915
class BlockDevStatus(ConfigObject):
916
  """Config object representing the status of a block device."""
917
  __slots__ = [
918
    "dev_path",
919
    "major",
920
    "minor",
921
    "sync_percent",
922
    "estimated_time",
923
    "is_degraded",
924
    "ldisk_status",
925
    ]
926

    
927

    
928
class ConfdRequest(ConfigObject):
929
  """Object holding a confd request.
930

931
  @ivar protocol: confd protocol version
932
  @ivar type: confd query type
933
  @ivar query: query request
934
  @ivar rsalt: requested reply salt
935

936
  """
937
  __slots__ = [
938
    "protocol",
939
    "type",
940
    "query",
941
    "rsalt",
942
    ]
943

    
944

    
945
class ConfdReply(ConfigObject):
946
  """Object holding a confd reply.
947

948
  @ivar protocol: confd protocol version
949
  @ivar status: reply status code (ok, error)
950
  @ivar answer: confd query reply
951
  @ivar serial: configuration serial number
952

953
  """
954
  __slots__ = [
955
    "protocol",
956
    "status",
957
    "answer",
958
    "serial",
959
    ]
960

    
961

    
962
class SerializableConfigParser(ConfigParser.SafeConfigParser):
963
  """Simple wrapper over ConfigParse that allows serialization.
964

965
  This class is basically ConfigParser.SafeConfigParser with two
966
  additional methods that allow it to serialize/unserialize to/from a
967
  buffer.
968

969
  """
970
  def Dumps(self):
971
    """Dump this instance and return the string representation."""
972
    buf = StringIO()
973
    self.write(buf)
974
    return buf.getvalue()
975

    
976
  @staticmethod
977
  def Loads(data):
978
    """Load data from a string."""
979
    buf = StringIO(data)
980
    cfp = SerializableConfigParser()
981
    cfp.readfp(buf)
982
    return cfp