Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 89b70f39

History | View | Annotate | Download (29.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44

    
45
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46
           "OS", "Node", "Cluster", "FillDict"]
47

    
48
_TIMESTAMPS = ["ctime", "mtime"]
49
_UUID = ["uuid"]
50

    
51
def FillDict(defaults_dict, custom_dict, skip_keys=None):
52
  """Basic function to apply settings on top a default dict.
53

54
  @type defaults_dict: dict
55
  @param defaults_dict: dictionary holding the default values
56
  @type custom_dict: dict
57
  @param custom_dict: dictionary holding customized value
58
  @type skip_keys: list
59
  @param skip_keys: which keys not to fill
60
  @rtype: dict
61
  @return: dict with the 'full' values
62

63
  """
64
  ret_dict = copy.deepcopy(defaults_dict)
65
  ret_dict.update(custom_dict)
66
  if skip_keys:
67
    for k in skip_keys:
68
      try:
69
        del ret_dict[k]
70
      except KeyError:
71
        pass
72
  return ret_dict
73

    
74

    
75
def UpgradeGroupedParams(target, defaults):
76
  """Update all groups for the target parameter.
77

78
  @type target: dict of dicts
79
  @param target: {group: {parameter: value}}
80
  @type defaults: dict
81
  @param defaults: default parameter values
82

83
  """
84
  if target is None:
85
    target = {constants.PP_DEFAULT: defaults}
86
  else:
87
    for group in target:
88
      target[group] = FillDict(defaults, target[group])
89
  return target
90

    
91

    
92
class ConfigObject(object):
93
  """A generic config object.
94

95
  It has the following properties:
96

97
    - provides somewhat safe recursive unpickling and pickling for its classes
98
    - unset attributes which are defined in slots are always returned
99
      as None instead of raising an error
100

101
  Classes derived from this must always declare __slots__ (we use many
102
  config objects and the memory reduction is useful)
103

104
  """
105
  __slots__ = []
106

    
107
  def __init__(self, **kwargs):
108
    for k, v in kwargs.iteritems():
109
      setattr(self, k, v)
110

    
111
  def __getattr__(self, name):
112
    if name not in self.__slots__:
113
      raise AttributeError("Invalid object attribute %s.%s" %
114
                           (type(self).__name__, name))
115
    return None
116

    
117
  def __setstate__(self, state):
118
    for name in state:
119
      if name in self.__slots__:
120
        setattr(self, name, state[name])
121

    
122
  def ToDict(self):
123
    """Convert to a dict holding only standard python types.
124

125
    The generic routine just dumps all of this object's attributes in
126
    a dict. It does not work if the class has children who are
127
    ConfigObjects themselves (e.g. the nics list in an Instance), in
128
    which case the object should subclass the function in order to
129
    make sure all objects returned are only standard python types.
130

131
    """
132
    result = {}
133
    for name in self.__slots__:
134
      value = getattr(self, name, None)
135
      if value is not None:
136
        result[name] = value
137
    return result
138

    
139
  __getstate__ = ToDict
140

    
141
  @classmethod
142
  def FromDict(cls, val):
143
    """Create an object from a dictionary.
144

145
    This generic routine takes a dict, instantiates a new instance of
146
    the given class, and sets attributes based on the dict content.
147

148
    As for `ToDict`, this does not work if the class has children
149
    who are ConfigObjects themselves (e.g. the nics list in an
150
    Instance), in which case the object should subclass the function
151
    and alter the objects.
152

153
    """
154
    if not isinstance(val, dict):
155
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
156
                                      " expected dict, got %s" % type(val))
157
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
158
    obj = cls(**val_str) # pylint: disable-msg=W0142
159
    return obj
160

    
161
  @staticmethod
162
  def _ContainerToDicts(container):
163
    """Convert the elements of a container to standard python types.
164

165
    This method converts a container with elements derived from
166
    ConfigData to standard python types. If the container is a dict,
167
    we don't touch the keys, only the values.
168

169
    """
170
    if isinstance(container, dict):
171
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
172
    elif isinstance(container, (list, tuple, set, frozenset)):
173
      ret = [elem.ToDict() for elem in container]
174
    else:
175
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
176
                      type(container))
177
    return ret
178

    
179
  @staticmethod
180
  def _ContainerFromDicts(source, c_type, e_type):
181
    """Convert a container from standard python types.
182

183
    This method converts a container with standard python types to
184
    ConfigData objects. If the container is a dict, we don't touch the
185
    keys, only the values.
186

187
    """
188
    if not isinstance(c_type, type):
189
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
190
                      " not a type" % type(c_type))
191
    if c_type is dict:
192
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
193
    elif c_type in (list, tuple, set, frozenset):
194
      ret = c_type([e_type.FromDict(elem) for elem in source])
195
    else:
196
      raise TypeError("Invalid container type %s passed to"
197
                      " _ContainerFromDicts" % c_type)
198
    return ret
199

    
200
  def Copy(self):
201
    """Makes a deep copy of the current object and its children.
202

203
    """
204
    dict_form = self.ToDict()
205
    clone_obj = self.__class__.FromDict(dict_form)
206
    return clone_obj
207

    
208
  def __repr__(self):
209
    """Implement __repr__ for ConfigObjects."""
210
    return repr(self.ToDict())
211

    
212
  def UpgradeConfig(self):
213
    """Fill defaults for missing configuration values.
214

215
    This method will be called at configuration load time, and its
216
    implementation will be object dependent.
217

218
    """
219
    pass
220

    
221

    
222
class TaggableObject(ConfigObject):
223
  """An generic class supporting tags.
224

225
  """
226
  __slots__ = ConfigObject.__slots__ + ["tags"]
227
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
228

    
229
  @classmethod
230
  def ValidateTag(cls, tag):
231
    """Check if a tag is valid.
232

233
    If the tag is invalid, an errors.TagError will be raised. The
234
    function has no return value.
235

236
    """
237
    if not isinstance(tag, basestring):
238
      raise errors.TagError("Invalid tag type (not a string)")
239
    if len(tag) > constants.MAX_TAG_LEN:
240
      raise errors.TagError("Tag too long (>%d characters)" %
241
                            constants.MAX_TAG_LEN)
242
    if not tag:
243
      raise errors.TagError("Tags cannot be empty")
244
    if not cls.VALID_TAG_RE.match(tag):
245
      raise errors.TagError("Tag contains invalid characters")
246

    
247
  def GetTags(self):
248
    """Return the tags list.
249

250
    """
251
    tags = getattr(self, "tags", None)
252
    if tags is None:
253
      tags = self.tags = set()
254
    return tags
255

    
256
  def AddTag(self, tag):
257
    """Add a new tag.
258

259
    """
260
    self.ValidateTag(tag)
261
    tags = self.GetTags()
262
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
263
      raise errors.TagError("Too many tags")
264
    self.GetTags().add(tag)
265

    
266
  def RemoveTag(self, tag):
267
    """Remove a tag.
268

269
    """
270
    self.ValidateTag(tag)
271
    tags = self.GetTags()
272
    try:
273
      tags.remove(tag)
274
    except KeyError:
275
      raise errors.TagError("Tag not found")
276

    
277
  def ToDict(self):
278
    """Taggable-object-specific conversion to standard python types.
279

280
    This replaces the tags set with a list.
281

282
    """
283
    bo = super(TaggableObject, self).ToDict()
284

    
285
    tags = bo.get("tags", None)
286
    if isinstance(tags, set):
287
      bo["tags"] = list(tags)
288
    return bo
289

    
290
  @classmethod
291
  def FromDict(cls, val):
292
    """Custom function for instances.
293

294
    """
295
    obj = super(TaggableObject, cls).FromDict(val)
296
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
297
      obj.tags = set(obj.tags)
298
    return obj
299

    
300

    
301
class ConfigData(ConfigObject):
302
  """Top-level config object."""
303
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
304
               _TIMESTAMPS)
305

    
306
  def ToDict(self):
307
    """Custom function for top-level config data.
308

309
    This just replaces the list of instances, nodes and the cluster
310
    with standard python types.
311

312
    """
313
    mydict = super(ConfigData, self).ToDict()
314
    mydict["cluster"] = mydict["cluster"].ToDict()
315
    for key in "nodes", "instances":
316
      mydict[key] = self._ContainerToDicts(mydict[key])
317

    
318
    return mydict
319

    
320
  @classmethod
321
  def FromDict(cls, val):
322
    """Custom function for top-level config data
323

324
    """
325
    obj = super(ConfigData, cls).FromDict(val)
326
    obj.cluster = Cluster.FromDict(obj.cluster)
327
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
328
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
329
    return obj
330

    
331
  def UpgradeConfig(self):
332
    """Fill defaults for missing configuration values.
333

334
    """
335
    self.cluster.UpgradeConfig()
336
    for node in self.nodes.values():
337
      node.UpgradeConfig()
338
    for instance in self.instances.values():
339
      instance.UpgradeConfig()
340

    
341

    
342
class NIC(ConfigObject):
343
  """Config object representing a network card."""
344
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
345

    
346
  @classmethod
347
  def CheckParameterSyntax(cls, nicparams):
348
    """Check the given parameters for validity.
349

350
    @type nicparams:  dict
351
    @param nicparams: dictionary with parameter names/value
352
    @raise errors.ConfigurationError: when a parameter is not valid
353

354
    """
355
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
356
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
357
      raise errors.ConfigurationError(err)
358

    
359
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
360
        not nicparams[constants.NIC_LINK]):
361
      err = "Missing bridged nic link"
362
      raise errors.ConfigurationError(err)
363

    
364
  def UpgradeConfig(self):
365
    """Fill defaults for missing configuration values.
366

367
    """
368
    if self.nicparams is None:
369
      self.nicparams = {}
370
      if self.bridge is not None:
371
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
372
        self.nicparams[constants.NIC_LINK] = self.bridge
373
    # bridge is no longer used it 2.1. The slot is left there to support
374
    # upgrading, but will be removed in 2.2
375
    if self.bridge is not None:
376
      self.bridge = None
377

    
378

    
379
class Disk(ConfigObject):
380
  """Config object representing a block device."""
381
  __slots__ = ["dev_type", "logical_id", "physical_id",
382
               "children", "iv_name", "size", "mode"]
383

    
384
  def CreateOnSecondary(self):
385
    """Test if this device needs to be created on a secondary node."""
386
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
387

    
388
  def AssembleOnSecondary(self):
389
    """Test if this device needs to be assembled on a secondary node."""
390
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
391

    
392
  def OpenOnSecondary(self):
393
    """Test if this device needs to be opened on a secondary node."""
394
    return self.dev_type in (constants.LD_LV,)
395

    
396
  def StaticDevPath(self):
397
    """Return the device path if this device type has a static one.
398

399
    Some devices (LVM for example) live always at the same /dev/ path,
400
    irrespective of their status. For such devices, we return this
401
    path, for others we return None.
402

403
    """
404
    if self.dev_type == constants.LD_LV:
405
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
406
    return None
407

    
408
  def ChildrenNeeded(self):
409
    """Compute the needed number of children for activation.
410

411
    This method will return either -1 (all children) or a positive
412
    number denoting the minimum number of children needed for
413
    activation (only mirrored devices will usually return >=0).
414

415
    Currently, only DRBD8 supports diskless activation (therefore we
416
    return 0), for all other we keep the previous semantics and return
417
    -1.
418

419
    """
420
    if self.dev_type == constants.LD_DRBD8:
421
      return 0
422
    return -1
423

    
424
  def GetNodes(self, node):
425
    """This function returns the nodes this device lives on.
426

427
    Given the node on which the parent of the device lives on (or, in
428
    case of a top-level device, the primary node of the devices'
429
    instance), this function will return a list of nodes on which this
430
    devices needs to (or can) be assembled.
431

432
    """
433
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
434
      result = [node]
435
    elif self.dev_type in constants.LDS_DRBD:
436
      result = [self.logical_id[0], self.logical_id[1]]
437
      if node not in result:
438
        raise errors.ConfigurationError("DRBD device passed unknown node")
439
    else:
440
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
441
    return result
442

    
443
  def ComputeNodeTree(self, parent_node):
444
    """Compute the node/disk tree for this disk and its children.
445

446
    This method, given the node on which the parent disk lives, will
447
    return the list of all (node, disk) pairs which describe the disk
448
    tree in the most compact way. For example, a drbd/lvm stack
449
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
450
    which represents all the top-level devices on the nodes.
451

452
    """
453
    my_nodes = self.GetNodes(parent_node)
454
    result = [(node, self) for node in my_nodes]
455
    if not self.children:
456
      # leaf device
457
      return result
458
    for node in my_nodes:
459
      for child in self.children:
460
        child_result = child.ComputeNodeTree(node)
461
        if len(child_result) == 1:
462
          # child (and all its descendants) is simple, doesn't split
463
          # over multiple hosts, so we don't need to describe it, our
464
          # own entry for this node describes it completely
465
          continue
466
        else:
467
          # check if child nodes differ from my nodes; note that
468
          # subdisk can differ from the child itself, and be instead
469
          # one of its descendants
470
          for subnode, subdisk in child_result:
471
            if subnode not in my_nodes:
472
              result.append((subnode, subdisk))
473
            # otherwise child is under our own node, so we ignore this
474
            # entry (but probably the other results in the list will
475
            # be different)
476
    return result
477

    
478
  def RecordGrow(self, amount):
479
    """Update the size of this disk after growth.
480

481
    This method recurses over the disks's children and updates their
482
    size correspondigly. The method needs to be kept in sync with the
483
    actual algorithms from bdev.
484

485
    """
486
    if self.dev_type == constants.LD_LV:
487
      self.size += amount
488
    elif self.dev_type == constants.LD_DRBD8:
489
      if self.children:
490
        self.children[0].RecordGrow(amount)
491
      self.size += amount
492
    else:
493
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
494
                                   " disk type %s" % self.dev_type)
495

    
496
  def UnsetSize(self):
497
    """Sets recursively the size to zero for the disk and its children.
498

499
    """
500
    if self.children:
501
      for child in self.children:
502
        child.UnsetSize()
503
    self.size = 0
504

    
505
  def SetPhysicalID(self, target_node, nodes_ip):
506
    """Convert the logical ID to the physical ID.
507

508
    This is used only for drbd, which needs ip/port configuration.
509

510
    The routine descends down and updates its children also, because
511
    this helps when the only the top device is passed to the remote
512
    node.
513

514
    Arguments:
515
      - target_node: the node we wish to configure for
516
      - nodes_ip: a mapping of node name to ip
517

518
    The target_node must exist in in nodes_ip, and must be one of the
519
    nodes in the logical ID for each of the DRBD devices encountered
520
    in the disk tree.
521

522
    """
523
    if self.children:
524
      for child in self.children:
525
        child.SetPhysicalID(target_node, nodes_ip)
526

    
527
    if self.logical_id is None and self.physical_id is not None:
528
      return
529
    if self.dev_type in constants.LDS_DRBD:
530
      pnode, snode, port, pminor, sminor, secret = self.logical_id
531
      if target_node not in (pnode, snode):
532
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
533
                                        target_node)
534
      pnode_ip = nodes_ip.get(pnode, None)
535
      snode_ip = nodes_ip.get(snode, None)
536
      if pnode_ip is None or snode_ip is None:
537
        raise errors.ConfigurationError("Can't find primary or secondary node"
538
                                        " for %s" % str(self))
539
      p_data = (pnode_ip, port)
540
      s_data = (snode_ip, port)
541
      if pnode == target_node:
542
        self.physical_id = p_data + s_data + (pminor, secret)
543
      else: # it must be secondary, we tested above
544
        self.physical_id = s_data + p_data + (sminor, secret)
545
    else:
546
      self.physical_id = self.logical_id
547
    return
548

    
549
  def ToDict(self):
550
    """Disk-specific conversion to standard python types.
551

552
    This replaces the children lists of objects with lists of
553
    standard python types.
554

555
    """
556
    bo = super(Disk, self).ToDict()
557

    
558
    for attr in ("children",):
559
      alist = bo.get(attr, None)
560
      if alist:
561
        bo[attr] = self._ContainerToDicts(alist)
562
    return bo
563

    
564
  @classmethod
565
  def FromDict(cls, val):
566
    """Custom function for Disks
567

568
    """
569
    obj = super(Disk, cls).FromDict(val)
570
    if obj.children:
571
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
572
    if obj.logical_id and isinstance(obj.logical_id, list):
573
      obj.logical_id = tuple(obj.logical_id)
574
    if obj.physical_id and isinstance(obj.physical_id, list):
575
      obj.physical_id = tuple(obj.physical_id)
576
    if obj.dev_type in constants.LDS_DRBD:
577
      # we need a tuple of length six here
578
      if len(obj.logical_id) < 6:
579
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
580
    return obj
581

    
582
  def __str__(self):
583
    """Custom str() formatter for disks.
584

585
    """
586
    if self.dev_type == constants.LD_LV:
587
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
588
    elif self.dev_type in constants.LDS_DRBD:
589
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
590
      val = "<DRBD8("
591
      if self.physical_id is None:
592
        phy = "unconfigured"
593
      else:
594
        phy = ("configured as %s:%s %s:%s" %
595
               (self.physical_id[0], self.physical_id[1],
596
                self.physical_id[2], self.physical_id[3]))
597

    
598
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
599
              (node_a, minor_a, node_b, minor_b, port, phy))
600
      if self.children and self.children.count(None) == 0:
601
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
602
      else:
603
        val += "no local storage"
604
    else:
605
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
606
             (self.dev_type, self.logical_id, self.physical_id, self.children))
607
    if self.iv_name is None:
608
      val += ", not visible"
609
    else:
610
      val += ", visible as /dev/%s" % self.iv_name
611
    if isinstance(self.size, int):
612
      val += ", size=%dm)>" % self.size
613
    else:
614
      val += ", size='%s')>" % (self.size,)
615
    return val
616

    
617
  def Verify(self):
618
    """Checks that this disk is correctly configured.
619

620
    """
621
    all_errors = []
622
    if self.mode not in constants.DISK_ACCESS_SET:
623
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
624
    return all_errors
625

    
626
  def UpgradeConfig(self):
627
    """Fill defaults for missing configuration values.
628

629
    """
630
    if self.children:
631
      for child in self.children:
632
        child.UpgradeConfig()
633
    # add here config upgrade for this disk
634

    
635

    
636
class Instance(TaggableObject):
637
  """Config object representing an instance."""
638
  __slots__ = TaggableObject.__slots__ + [
639
    "name",
640
    "primary_node",
641
    "os",
642
    "hypervisor",
643
    "hvparams",
644
    "beparams",
645
    "admin_up",
646
    "nics",
647
    "disks",
648
    "disk_template",
649
    "network_port",
650
    "serial_no",
651
    ] + _TIMESTAMPS + _UUID
652

    
653
  def _ComputeSecondaryNodes(self):
654
    """Compute the list of secondary nodes.
655

656
    This is a simple wrapper over _ComputeAllNodes.
657

658
    """
659
    all_nodes = set(self._ComputeAllNodes())
660
    all_nodes.discard(self.primary_node)
661
    return tuple(all_nodes)
662

    
663
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
664
                             "List of secondary nodes")
665

    
666
  def _ComputeAllNodes(self):
667
    """Compute the list of all nodes.
668

669
    Since the data is already there (in the drbd disks), keeping it as
670
    a separate normal attribute is redundant and if not properly
671
    synchronised can cause problems. Thus it's better to compute it
672
    dynamically.
673

674
    """
675
    def _Helper(nodes, device):
676
      """Recursively computes nodes given a top device."""
677
      if device.dev_type in constants.LDS_DRBD:
678
        nodea, nodeb = device.logical_id[:2]
679
        nodes.add(nodea)
680
        nodes.add(nodeb)
681
      if device.children:
682
        for child in device.children:
683
          _Helper(nodes, child)
684

    
685
    all_nodes = set()
686
    all_nodes.add(self.primary_node)
687
    for device in self.disks:
688
      _Helper(all_nodes, device)
689
    return tuple(all_nodes)
690

    
691
  all_nodes = property(_ComputeAllNodes, None, None,
692
                       "List of all nodes of the instance")
693

    
694
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
695
    """Provide a mapping of nodes to LVs this instance owns.
696

697
    This function figures out what logical volumes should belong on
698
    which nodes, recursing through a device tree.
699

700
    @param lvmap: optional dictionary to receive the
701
        'node' : ['lv', ...] data.
702

703
    @return: None if lvmap arg is given, otherwise, a dictionary
704
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
705

706
    """
707
    if node == None:
708
      node = self.primary_node
709

    
710
    if lvmap is None:
711
      lvmap = { node : [] }
712
      ret = lvmap
713
    else:
714
      if not node in lvmap:
715
        lvmap[node] = []
716
      ret = None
717

    
718
    if not devs:
719
      devs = self.disks
720

    
721
    for dev in devs:
722
      if dev.dev_type == constants.LD_LV:
723
        lvmap[node].append(dev.logical_id[1])
724

    
725
      elif dev.dev_type in constants.LDS_DRBD:
726
        if dev.children:
727
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
728
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
729

    
730
      elif dev.children:
731
        self.MapLVsByNode(lvmap, dev.children, node)
732

    
733
    return ret
734

    
735
  def FindDisk(self, idx):
736
    """Find a disk given having a specified index.
737

738
    This is just a wrapper that does validation of the index.
739

740
    @type idx: int
741
    @param idx: the disk index
742
    @rtype: L{Disk}
743
    @return: the corresponding disk
744
    @raise errors.OpPrereqError: when the given index is not valid
745

746
    """
747
    try:
748
      idx = int(idx)
749
      return self.disks[idx]
750
    except ValueError, err:
751
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
752
                                 errors.ECODE_INVAL)
753
    except IndexError:
754
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
755
                                 " 0 to %d" % (idx, len(self.disks)),
756
                                 errors.ECODE_INVAL)
757

    
758
  def ToDict(self):
759
    """Instance-specific conversion to standard python types.
760

761
    This replaces the children lists of objects with lists of standard
762
    python types.
763

764
    """
765
    bo = super(Instance, self).ToDict()
766

    
767
    for attr in "nics", "disks":
768
      alist = bo.get(attr, None)
769
      if alist:
770
        nlist = self._ContainerToDicts(alist)
771
      else:
772
        nlist = []
773
      bo[attr] = nlist
774
    return bo
775

    
776
  @classmethod
777
  def FromDict(cls, val):
778
    """Custom function for instances.
779

780
    """
781
    obj = super(Instance, cls).FromDict(val)
782
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
783
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
784
    return obj
785

    
786
  def UpgradeConfig(self):
787
    """Fill defaults for missing configuration values.
788

789
    """
790
    for nic in self.nics:
791
      nic.UpgradeConfig()
792
    for disk in self.disks:
793
      disk.UpgradeConfig()
794
    if self.hvparams:
795
      for key in constants.HVC_GLOBALS:
796
        try:
797
          del self.hvparams[key]
798
        except KeyError:
799
          pass
800

    
801

    
802
class OS(ConfigObject):
803
  """Config object representing an operating system."""
804
  __slots__ = [
805
    "name",
806
    "path",
807
    "api_versions",
808
    "create_script",
809
    "export_script",
810
    "import_script",
811
    "rename_script",
812
    "supported_variants",
813
    ]
814

    
815

    
816
class Node(TaggableObject):
817
  """Config object representing a node."""
818
  __slots__ = TaggableObject.__slots__ + [
819
    "name",
820
    "primary_ip",
821
    "secondary_ip",
822
    "serial_no",
823
    "master_candidate",
824
    "offline",
825
    "drained",
826
    ] + _TIMESTAMPS + _UUID
827

    
828

    
829
class Cluster(TaggableObject):
830
  """Config object representing the cluster."""
831
  __slots__ = TaggableObject.__slots__ + [
832
    "serial_no",
833
    "rsahostkeypub",
834
    "highest_used_port",
835
    "tcpudp_port_pool",
836
    "mac_prefix",
837
    "volume_group_name",
838
    "default_bridge",
839
    "default_hypervisor",
840
    "master_node",
841
    "master_ip",
842
    "master_netdev",
843
    "cluster_name",
844
    "file_storage_dir",
845
    "enabled_hypervisors",
846
    "hvparams",
847
    "beparams",
848
    "nicparams",
849
    "candidate_pool_size",
850
    "modify_etc_hosts",
851
    "modify_ssh_setup",
852
    ] + _TIMESTAMPS + _UUID
853

    
854
  def UpgradeConfig(self):
855
    """Fill defaults for missing configuration values.
856

857
    """
858
    # pylint: disable-msg=E0203
859
    # because these are "defined" via slots, not manually
860
    if self.hvparams is None:
861
      self.hvparams = constants.HVC_DEFAULTS
862
    else:
863
      for hypervisor in self.hvparams:
864
        self.hvparams[hypervisor] = FillDict(
865
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
866

    
867
    self.beparams = UpgradeGroupedParams(self.beparams,
868
                                         constants.BEC_DEFAULTS)
869
    migrate_default_bridge = not self.nicparams
870
    self.nicparams = UpgradeGroupedParams(self.nicparams,
871
                                          constants.NICC_DEFAULTS)
872
    if migrate_default_bridge:
873
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
874
        self.default_bridge
875

    
876
    if self.modify_etc_hosts is None:
877
      self.modify_etc_hosts = True
878

    
879
    if self.modify_ssh_setup is None:
880
      self.modify_ssh_setup = True
881

    
882
    # default_bridge is no longer used it 2.1. The slot is left there to
883
    # support auto-upgrading, but will be removed in 2.2
884
    if self.default_bridge is not None:
885
      self.default_bridge = None
886

    
887
    # default_hypervisor is just the first enabled one in 2.1
888
    if self.default_hypervisor is not None:
889
      self.enabled_hypervisors = ([self.default_hypervisor] +
890
        [hvname for hvname in self.enabled_hypervisors
891
         if hvname != self.default_hypervisor])
892
      self.default_hypervisor = None
893

    
894
  def ToDict(self):
895
    """Custom function for cluster.
896

897
    """
898
    mydict = super(Cluster, self).ToDict()
899
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
900
    return mydict
901

    
902
  @classmethod
903
  def FromDict(cls, val):
904
    """Custom function for cluster.
905

906
    """
907
    obj = super(Cluster, cls).FromDict(val)
908
    if not isinstance(obj.tcpudp_port_pool, set):
909
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
910
    return obj
911

    
912
  def FillHV(self, instance, skip_globals=False):
913
    """Fill an instance's hvparams dict.
914

915
    @type instance: L{objects.Instance}
916
    @param instance: the instance parameter to fill
917
    @type skip_globals: boolean
918
    @param skip_globals: if True, the global hypervisor parameters will
919
        not be filled
920
    @rtype: dict
921
    @return: a copy of the instance's hvparams with missing keys filled from
922
        the cluster defaults
923

924
    """
925
    if skip_globals:
926
      skip_keys = constants.HVC_GLOBALS
927
    else:
928
      skip_keys = []
929
    return FillDict(self.hvparams.get(instance.hypervisor, {}),
930
                    instance.hvparams, skip_keys=skip_keys)
931

    
932
  def FillBE(self, instance):
933
    """Fill an instance's beparams dict.
934

935
    @type instance: L{objects.Instance}
936
    @param instance: the instance parameter to fill
937
    @rtype: dict
938
    @return: a copy of the instance's beparams with missing keys filled from
939
        the cluster defaults
940

941
    """
942
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
943
                    instance.beparams)
944

    
945

    
946
class BlockDevStatus(ConfigObject):
947
  """Config object representing the status of a block device."""
948
  __slots__ = [
949
    "dev_path",
950
    "major",
951
    "minor",
952
    "sync_percent",
953
    "estimated_time",
954
    "is_degraded",
955
    "ldisk_status",
956
    ]
957

    
958

    
959
class ConfdRequest(ConfigObject):
960
  """Object holding a confd request.
961

962
  @ivar protocol: confd protocol version
963
  @ivar type: confd query type
964
  @ivar query: query request
965
  @ivar rsalt: requested reply salt
966

967
  """
968
  __slots__ = [
969
    "protocol",
970
    "type",
971
    "query",
972
    "rsalt",
973
    ]
974

    
975

    
976
class ConfdReply(ConfigObject):
977
  """Object holding a confd reply.
978

979
  @ivar protocol: confd protocol version
980
  @ivar status: reply status code (ok, error)
981
  @ivar answer: confd query reply
982
  @ivar serial: configuration serial number
983

984
  """
985
  __slots__ = [
986
    "protocol",
987
    "status",
988
    "answer",
989
    "serial",
990
    ]
991

    
992

    
993
class SerializableConfigParser(ConfigParser.SafeConfigParser):
994
  """Simple wrapper over ConfigParse that allows serialization.
995

996
  This class is basically ConfigParser.SafeConfigParser with two
997
  additional methods that allow it to serialize/unserialize to/from a
998
  buffer.
999

1000
  """
1001
  def Dumps(self):
1002
    """Dump this instance and return the string representation."""
1003
    buf = StringIO()
1004
    self.write(buf)
1005
    return buf.getvalue()
1006

    
1007
  @staticmethod
1008
  def Loads(data):
1009
    """Load data from a string."""
1010
    buf = StringIO(data)
1011
    cfp = SerializableConfigParser()
1012
    cfp.readfp(buf)
1013
    return cfp