Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ bf4af505

History | View | Annotate | Download (35.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44

    
45
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46
           "OS", "Node", "Cluster", "FillDict"]
47

    
48
_TIMESTAMPS = ["ctime", "mtime"]
49
_UUID = ["uuid"]
50

    
51

    
52
def FillDict(defaults_dict, custom_dict, skip_keys=None):
53
  """Basic function to apply settings on top a default dict.
54

55
  @type defaults_dict: dict
56
  @param defaults_dict: dictionary holding the default values
57
  @type custom_dict: dict
58
  @param custom_dict: dictionary holding customized value
59
  @type skip_keys: list
60
  @param skip_keys: which keys not to fill
61
  @rtype: dict
62
  @return: dict with the 'full' values
63

64
  """
65
  ret_dict = copy.deepcopy(defaults_dict)
66
  ret_dict.update(custom_dict)
67
  if skip_keys:
68
    for k in skip_keys:
69
      try:
70
        del ret_dict[k]
71
      except KeyError:
72
        pass
73
  return ret_dict
74

    
75

    
76
def UpgradeGroupedParams(target, defaults):
77
  """Update all groups for the target parameter.
78

79
  @type target: dict of dicts
80
  @param target: {group: {parameter: value}}
81
  @type defaults: dict
82
  @param defaults: default parameter values
83

84
  """
85
  if target is None:
86
    target = {constants.PP_DEFAULT: defaults}
87
  else:
88
    for group in target:
89
      target[group] = FillDict(defaults, target[group])
90
  return target
91

    
92

    
93
class ConfigObject(object):
94
  """A generic config object.
95

96
  It has the following properties:
97

98
    - provides somewhat safe recursive unpickling and pickling for its classes
99
    - unset attributes which are defined in slots are always returned
100
      as None instead of raising an error
101

102
  Classes derived from this must always declare __slots__ (we use many
103
  config objects and the memory reduction is useful)
104

105
  """
106
  __slots__ = []
107

    
108
  def __init__(self, **kwargs):
109
    for k, v in kwargs.iteritems():
110
      setattr(self, k, v)
111

    
112
  def __getattr__(self, name):
113
    if name not in self._all_slots():
114
      raise AttributeError("Invalid object attribute %s.%s" %
115
                           (type(self).__name__, name))
116
    return None
117

    
118
  def __setstate__(self, state):
119
    slots = self._all_slots()
120
    for name in state:
121
      if name in slots:
122
        setattr(self, name, state[name])
123

    
124
  @classmethod
125
  def _all_slots(cls):
126
    """Compute the list of all declared slots for a class.
127

128
    """
129
    slots = []
130
    for parent in cls.__mro__:
131
      slots.extend(getattr(parent, "__slots__", []))
132
    return slots
133

    
134
  def ToDict(self):
135
    """Convert to a dict holding only standard python types.
136

137
    The generic routine just dumps all of this object's attributes in
138
    a dict. It does not work if the class has children who are
139
    ConfigObjects themselves (e.g. the nics list in an Instance), in
140
    which case the object should subclass the function in order to
141
    make sure all objects returned are only standard python types.
142

143
    """
144
    result = {}
145
    for name in self._all_slots():
146
      value = getattr(self, name, None)
147
      if value is not None:
148
        result[name] = value
149
    return result
150

    
151
  __getstate__ = ToDict
152

    
153
  @classmethod
154
  def FromDict(cls, val):
155
    """Create an object from a dictionary.
156

157
    This generic routine takes a dict, instantiates a new instance of
158
    the given class, and sets attributes based on the dict content.
159

160
    As for `ToDict`, this does not work if the class has children
161
    who are ConfigObjects themselves (e.g. the nics list in an
162
    Instance), in which case the object should subclass the function
163
    and alter the objects.
164

165
    """
166
    if not isinstance(val, dict):
167
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
168
                                      " expected dict, got %s" % type(val))
169
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
170
    obj = cls(**val_str) # pylint: disable-msg=W0142
171
    return obj
172

    
173
  @staticmethod
174
  def _ContainerToDicts(container):
175
    """Convert the elements of a container to standard python types.
176

177
    This method converts a container with elements derived from
178
    ConfigData to standard python types. If the container is a dict,
179
    we don't touch the keys, only the values.
180

181
    """
182
    if isinstance(container, dict):
183
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
184
    elif isinstance(container, (list, tuple, set, frozenset)):
185
      ret = [elem.ToDict() for elem in container]
186
    else:
187
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
188
                      type(container))
189
    return ret
190

    
191
  @staticmethod
192
  def _ContainerFromDicts(source, c_type, e_type):
193
    """Convert a container from standard python types.
194

195
    This method converts a container with standard python types to
196
    ConfigData objects. If the container is a dict, we don't touch the
197
    keys, only the values.
198

199
    """
200
    if not isinstance(c_type, type):
201
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
202
                      " not a type" % type(c_type))
203
    if c_type is dict:
204
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
205
    elif c_type in (list, tuple, set, frozenset):
206
      ret = c_type([e_type.FromDict(elem) for elem in source])
207
    else:
208
      raise TypeError("Invalid container type %s passed to"
209
                      " _ContainerFromDicts" % c_type)
210
    return ret
211

    
212
  def Copy(self):
213
    """Makes a deep copy of the current object and its children.
214

215
    """
216
    dict_form = self.ToDict()
217
    clone_obj = self.__class__.FromDict(dict_form)
218
    return clone_obj
219

    
220
  def __repr__(self):
221
    """Implement __repr__ for ConfigObjects."""
222
    return repr(self.ToDict())
223

    
224
  def UpgradeConfig(self):
225
    """Fill defaults for missing configuration values.
226

227
    This method will be called at configuration load time, and its
228
    implementation will be object dependent.
229

230
    """
231
    pass
232

    
233

    
234
class TaggableObject(ConfigObject):
235
  """An generic class supporting tags.
236

237
  """
238
  __slots__ = ["tags"]
239
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
240

    
241
  @classmethod
242
  def ValidateTag(cls, tag):
243
    """Check if a tag is valid.
244

245
    If the tag is invalid, an errors.TagError will be raised. The
246
    function has no return value.
247

248
    """
249
    if not isinstance(tag, basestring):
250
      raise errors.TagError("Invalid tag type (not a string)")
251
    if len(tag) > constants.MAX_TAG_LEN:
252
      raise errors.TagError("Tag too long (>%d characters)" %
253
                            constants.MAX_TAG_LEN)
254
    if not tag:
255
      raise errors.TagError("Tags cannot be empty")
256
    if not cls.VALID_TAG_RE.match(tag):
257
      raise errors.TagError("Tag contains invalid characters")
258

    
259
  def GetTags(self):
260
    """Return the tags list.
261

262
    """
263
    tags = getattr(self, "tags", None)
264
    if tags is None:
265
      tags = self.tags = set()
266
    return tags
267

    
268
  def AddTag(self, tag):
269
    """Add a new tag.
270

271
    """
272
    self.ValidateTag(tag)
273
    tags = self.GetTags()
274
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
275
      raise errors.TagError("Too many tags")
276
    self.GetTags().add(tag)
277

    
278
  def RemoveTag(self, tag):
279
    """Remove a tag.
280

281
    """
282
    self.ValidateTag(tag)
283
    tags = self.GetTags()
284
    try:
285
      tags.remove(tag)
286
    except KeyError:
287
      raise errors.TagError("Tag not found")
288

    
289
  def ToDict(self):
290
    """Taggable-object-specific conversion to standard python types.
291

292
    This replaces the tags set with a list.
293

294
    """
295
    bo = super(TaggableObject, self).ToDict()
296

    
297
    tags = bo.get("tags", None)
298
    if isinstance(tags, set):
299
      bo["tags"] = list(tags)
300
    return bo
301

    
302
  @classmethod
303
  def FromDict(cls, val):
304
    """Custom function for instances.
305

306
    """
307
    obj = super(TaggableObject, cls).FromDict(val)
308
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
309
      obj.tags = set(obj.tags)
310
    return obj
311

    
312

    
313
class ConfigData(ConfigObject):
314
  """Top-level config object."""
315
  __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
316
               _TIMESTAMPS)
317

    
318
  def ToDict(self):
319
    """Custom function for top-level config data.
320

321
    This just replaces the list of instances, nodes and the cluster
322
    with standard python types.
323

324
    """
325
    mydict = super(ConfigData, self).ToDict()
326
    mydict["cluster"] = mydict["cluster"].ToDict()
327
    for key in "nodes", "instances":
328
      mydict[key] = self._ContainerToDicts(mydict[key])
329

    
330
    return mydict
331

    
332
  @classmethod
333
  def FromDict(cls, val):
334
    """Custom function for top-level config data
335

336
    """
337
    obj = super(ConfigData, cls).FromDict(val)
338
    obj.cluster = Cluster.FromDict(obj.cluster)
339
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
340
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
341
    return obj
342

    
343
  def HasAnyDiskOfType(self, dev_type):
344
    """Check if in there is at disk of the given type in the configuration.
345

346
    @type dev_type: L{constants.LDS_BLOCK}
347
    @param dev_type: the type to look for
348
    @rtype: boolean
349
    @return: boolean indicating if a disk of the given type was found or not
350

351
    """
352
    for instance in self.instances.values():
353
      for disk in instance.disks:
354
        if disk.IsBasedOnDiskType(dev_type):
355
          return True
356
    return False
357

    
358
  def UpgradeConfig(self):
359
    """Fill defaults for missing configuration values.
360

361
    """
362
    self.cluster.UpgradeConfig()
363
    for node in self.nodes.values():
364
      node.UpgradeConfig()
365
    for instance in self.instances.values():
366
      instance.UpgradeConfig()
367
    if self.cluster.drbd_usermode_helper is None:
368
      # To decide if we set an helper let's check if at least one instance has
369
      # a DRBD disk. This does not cover all the possible scenarios but it
370
      # gives a good approximation.
371
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
372
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
373

    
374

    
375
class NIC(ConfigObject):
376
  """Config object representing a network card."""
377
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
378

    
379
  @classmethod
380
  def CheckParameterSyntax(cls, nicparams):
381
    """Check the given parameters for validity.
382

383
    @type nicparams:  dict
384
    @param nicparams: dictionary with parameter names/value
385
    @raise errors.ConfigurationError: when a parameter is not valid
386

387
    """
388
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
389
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
390
      raise errors.ConfigurationError(err)
391

    
392
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
393
        not nicparams[constants.NIC_LINK]):
394
      err = "Missing bridged nic link"
395
      raise errors.ConfigurationError(err)
396

    
397
  def UpgradeConfig(self):
398
    """Fill defaults for missing configuration values.
399

400
    """
401
    if self.nicparams is None:
402
      self.nicparams = {}
403
      if self.bridge is not None:
404
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
405
        self.nicparams[constants.NIC_LINK] = self.bridge
406
    # bridge is no longer used it 2.1. The slot is left there to support
407
    # upgrading, but can be removed once upgrades to the current version
408
    # straight from 2.0 are deprecated.
409
    if self.bridge is not None:
410
      self.bridge = None
411

    
412

    
413
class Disk(ConfigObject):
414
  """Config object representing a block device."""
415
  __slots__ = ["dev_type", "logical_id", "physical_id",
416
               "children", "iv_name", "size", "mode"]
417

    
418
  def CreateOnSecondary(self):
419
    """Test if this device needs to be created on a secondary node."""
420
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
421

    
422
  def AssembleOnSecondary(self):
423
    """Test if this device needs to be assembled on a secondary node."""
424
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
425

    
426
  def OpenOnSecondary(self):
427
    """Test if this device needs to be opened on a secondary node."""
428
    return self.dev_type in (constants.LD_LV,)
429

    
430
  def StaticDevPath(self):
431
    """Return the device path if this device type has a static one.
432

433
    Some devices (LVM for example) live always at the same /dev/ path,
434
    irrespective of their status. For such devices, we return this
435
    path, for others we return None.
436

437
    @warning: The path returned is not a normalized pathname; callers
438
        should check that it is a valid path.
439

440
    """
441
    if self.dev_type == constants.LD_LV:
442
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
443
    return None
444

    
445
  def ChildrenNeeded(self):
446
    """Compute the needed number of children for activation.
447

448
    This method will return either -1 (all children) or a positive
449
    number denoting the minimum number of children needed for
450
    activation (only mirrored devices will usually return >=0).
451

452
    Currently, only DRBD8 supports diskless activation (therefore we
453
    return 0), for all other we keep the previous semantics and return
454
    -1.
455

456
    """
457
    if self.dev_type == constants.LD_DRBD8:
458
      return 0
459
    return -1
460

    
461
  def IsBasedOnDiskType(self, dev_type):
462
    """Check if the disk or its children are based on the given type.
463

464
    @type dev_type: L{constants.LDS_BLOCK}
465
    @param dev_type: the type to look for
466
    @rtype: boolean
467
    @return: boolean indicating if a device of the given type was found or not
468

469
    """
470
    if self.children:
471
      for child in self.children:
472
        if child.IsBasedOnDiskType(dev_type):
473
          return True
474
    return self.dev_type == dev_type
475

    
476
  def GetNodes(self, node):
477
    """This function returns the nodes this device lives on.
478

479
    Given the node on which the parent of the device lives on (or, in
480
    case of a top-level device, the primary node of the devices'
481
    instance), this function will return a list of nodes on which this
482
    devices needs to (or can) be assembled.
483

484
    """
485
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
486
      result = [node]
487
    elif self.dev_type in constants.LDS_DRBD:
488
      result = [self.logical_id[0], self.logical_id[1]]
489
      if node not in result:
490
        raise errors.ConfigurationError("DRBD device passed unknown node")
491
    else:
492
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
493
    return result
494

    
495
  def ComputeNodeTree(self, parent_node):
496
    """Compute the node/disk tree for this disk and its children.
497

498
    This method, given the node on which the parent disk lives, will
499
    return the list of all (node, disk) pairs which describe the disk
500
    tree in the most compact way. For example, a drbd/lvm stack
501
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
502
    which represents all the top-level devices on the nodes.
503

504
    """
505
    my_nodes = self.GetNodes(parent_node)
506
    result = [(node, self) for node in my_nodes]
507
    if not self.children:
508
      # leaf device
509
      return result
510
    for node in my_nodes:
511
      for child in self.children:
512
        child_result = child.ComputeNodeTree(node)
513
        if len(child_result) == 1:
514
          # child (and all its descendants) is simple, doesn't split
515
          # over multiple hosts, so we don't need to describe it, our
516
          # own entry for this node describes it completely
517
          continue
518
        else:
519
          # check if child nodes differ from my nodes; note that
520
          # subdisk can differ from the child itself, and be instead
521
          # one of its descendants
522
          for subnode, subdisk in child_result:
523
            if subnode not in my_nodes:
524
              result.append((subnode, subdisk))
525
            # otherwise child is under our own node, so we ignore this
526
            # entry (but probably the other results in the list will
527
            # be different)
528
    return result
529

    
530
  def RecordGrow(self, amount):
531
    """Update the size of this disk after growth.
532

533
    This method recurses over the disks's children and updates their
534
    size correspondigly. The method needs to be kept in sync with the
535
    actual algorithms from bdev.
536

537
    """
538
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
539
      self.size += amount
540
    elif self.dev_type == constants.LD_DRBD8:
541
      if self.children:
542
        self.children[0].RecordGrow(amount)
543
      self.size += amount
544
    else:
545
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
546
                                   " disk type %s" % self.dev_type)
547

    
548
  def UnsetSize(self):
549
    """Sets recursively the size to zero for the disk and its children.
550

551
    """
552
    if self.children:
553
      for child in self.children:
554
        child.UnsetSize()
555
    self.size = 0
556

    
557
  def SetPhysicalID(self, target_node, nodes_ip):
558
    """Convert the logical ID to the physical ID.
559

560
    This is used only for drbd, which needs ip/port configuration.
561

562
    The routine descends down and updates its children also, because
563
    this helps when the only the top device is passed to the remote
564
    node.
565

566
    Arguments:
567
      - target_node: the node we wish to configure for
568
      - nodes_ip: a mapping of node name to ip
569

570
    The target_node must exist in in nodes_ip, and must be one of the
571
    nodes in the logical ID for each of the DRBD devices encountered
572
    in the disk tree.
573

574
    """
575
    if self.children:
576
      for child in self.children:
577
        child.SetPhysicalID(target_node, nodes_ip)
578

    
579
    if self.logical_id is None and self.physical_id is not None:
580
      return
581
    if self.dev_type in constants.LDS_DRBD:
582
      pnode, snode, port, pminor, sminor, secret = self.logical_id
583
      if target_node not in (pnode, snode):
584
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
585
                                        target_node)
586
      pnode_ip = nodes_ip.get(pnode, None)
587
      snode_ip = nodes_ip.get(snode, None)
588
      if pnode_ip is None or snode_ip is None:
589
        raise errors.ConfigurationError("Can't find primary or secondary node"
590
                                        " for %s" % str(self))
591
      p_data = (pnode_ip, port)
592
      s_data = (snode_ip, port)
593
      if pnode == target_node:
594
        self.physical_id = p_data + s_data + (pminor, secret)
595
      else: # it must be secondary, we tested above
596
        self.physical_id = s_data + p_data + (sminor, secret)
597
    else:
598
      self.physical_id = self.logical_id
599
    return
600

    
601
  def ToDict(self):
602
    """Disk-specific conversion to standard python types.
603

604
    This replaces the children lists of objects with lists of
605
    standard python types.
606

607
    """
608
    bo = super(Disk, self).ToDict()
609

    
610
    for attr in ("children",):
611
      alist = bo.get(attr, None)
612
      if alist:
613
        bo[attr] = self._ContainerToDicts(alist)
614
    return bo
615

    
616
  @classmethod
617
  def FromDict(cls, val):
618
    """Custom function for Disks
619

620
    """
621
    obj = super(Disk, cls).FromDict(val)
622
    if obj.children:
623
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
624
    if obj.logical_id and isinstance(obj.logical_id, list):
625
      obj.logical_id = tuple(obj.logical_id)
626
    if obj.physical_id and isinstance(obj.physical_id, list):
627
      obj.physical_id = tuple(obj.physical_id)
628
    if obj.dev_type in constants.LDS_DRBD:
629
      # we need a tuple of length six here
630
      if len(obj.logical_id) < 6:
631
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
632
    return obj
633

    
634
  def __str__(self):
635
    """Custom str() formatter for disks.
636

637
    """
638
    if self.dev_type == constants.LD_LV:
639
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
640
    elif self.dev_type in constants.LDS_DRBD:
641
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
642
      val = "<DRBD8("
643
      if self.physical_id is None:
644
        phy = "unconfigured"
645
      else:
646
        phy = ("configured as %s:%s %s:%s" %
647
               (self.physical_id[0], self.physical_id[1],
648
                self.physical_id[2], self.physical_id[3]))
649

    
650
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
651
              (node_a, minor_a, node_b, minor_b, port, phy))
652
      if self.children and self.children.count(None) == 0:
653
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
654
      else:
655
        val += "no local storage"
656
    else:
657
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
658
             (self.dev_type, self.logical_id, self.physical_id, self.children))
659
    if self.iv_name is None:
660
      val += ", not visible"
661
    else:
662
      val += ", visible as /dev/%s" % self.iv_name
663
    if isinstance(self.size, int):
664
      val += ", size=%dm)>" % self.size
665
    else:
666
      val += ", size='%s')>" % (self.size,)
667
    return val
668

    
669
  def Verify(self):
670
    """Checks that this disk is correctly configured.
671

672
    """
673
    all_errors = []
674
    if self.mode not in constants.DISK_ACCESS_SET:
675
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
676
    return all_errors
677

    
678
  def UpgradeConfig(self):
679
    """Fill defaults for missing configuration values.
680

681
    """
682
    if self.children:
683
      for child in self.children:
684
        child.UpgradeConfig()
685
    # add here config upgrade for this disk
686

    
687

    
688
class Instance(TaggableObject):
689
  """Config object representing an instance."""
690
  __slots__ = [
691
    "name",
692
    "primary_node",
693
    "os",
694
    "hypervisor",
695
    "hvparams",
696
    "beparams",
697
    "osparams",
698
    "admin_up",
699
    "nics",
700
    "disks",
701
    "disk_template",
702
    "network_port",
703
    "serial_no",
704
    ] + _TIMESTAMPS + _UUID
705

    
706
  def _ComputeSecondaryNodes(self):
707
    """Compute the list of secondary nodes.
708

709
    This is a simple wrapper over _ComputeAllNodes.
710

711
    """
712
    all_nodes = set(self._ComputeAllNodes())
713
    all_nodes.discard(self.primary_node)
714
    return tuple(all_nodes)
715

    
716
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
717
                             "List of secondary nodes")
718

    
719
  def _ComputeAllNodes(self):
720
    """Compute the list of all nodes.
721

722
    Since the data is already there (in the drbd disks), keeping it as
723
    a separate normal attribute is redundant and if not properly
724
    synchronised can cause problems. Thus it's better to compute it
725
    dynamically.
726

727
    """
728
    def _Helper(nodes, device):
729
      """Recursively computes nodes given a top device."""
730
      if device.dev_type in constants.LDS_DRBD:
731
        nodea, nodeb = device.logical_id[:2]
732
        nodes.add(nodea)
733
        nodes.add(nodeb)
734
      if device.children:
735
        for child in device.children:
736
          _Helper(nodes, child)
737

    
738
    all_nodes = set()
739
    all_nodes.add(self.primary_node)
740
    for device in self.disks:
741
      _Helper(all_nodes, device)
742
    return tuple(all_nodes)
743

    
744
  all_nodes = property(_ComputeAllNodes, None, None,
745
                       "List of all nodes of the instance")
746

    
747
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
748
    """Provide a mapping of nodes to LVs this instance owns.
749

750
    This function figures out what logical volumes should belong on
751
    which nodes, recursing through a device tree.
752

753
    @param lvmap: optional dictionary to receive the
754
        'node' : ['lv', ...] data.
755

756
    @return: None if lvmap arg is given, otherwise, a dictionary
757
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
758

759
    """
760
    if node == None:
761
      node = self.primary_node
762

    
763
    if lvmap is None:
764
      lvmap = { node : [] }
765
      ret = lvmap
766
    else:
767
      if not node in lvmap:
768
        lvmap[node] = []
769
      ret = None
770

    
771
    if not devs:
772
      devs = self.disks
773

    
774
    for dev in devs:
775
      if dev.dev_type == constants.LD_LV:
776
        lvmap[node].append(dev.logical_id[1])
777

    
778
      elif dev.dev_type in constants.LDS_DRBD:
779
        if dev.children:
780
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
781
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
782

    
783
      elif dev.children:
784
        self.MapLVsByNode(lvmap, dev.children, node)
785

    
786
    return ret
787

    
788
  def FindDisk(self, idx):
789
    """Find a disk given having a specified index.
790

791
    This is just a wrapper that does validation of the index.
792

793
    @type idx: int
794
    @param idx: the disk index
795
    @rtype: L{Disk}
796
    @return: the corresponding disk
797
    @raise errors.OpPrereqError: when the given index is not valid
798

799
    """
800
    try:
801
      idx = int(idx)
802
      return self.disks[idx]
803
    except (TypeError, ValueError), err:
804
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
805
                                 errors.ECODE_INVAL)
806
    except IndexError:
807
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
808
                                 " 0 to %d" % (idx, len(self.disks)),
809
                                 errors.ECODE_INVAL)
810

    
811
  def ToDict(self):
812
    """Instance-specific conversion to standard python types.
813

814
    This replaces the children lists of objects with lists of standard
815
    python types.
816

817
    """
818
    bo = super(Instance, self).ToDict()
819

    
820
    for attr in "nics", "disks":
821
      alist = bo.get(attr, None)
822
      if alist:
823
        nlist = self._ContainerToDicts(alist)
824
      else:
825
        nlist = []
826
      bo[attr] = nlist
827
    return bo
828

    
829
  @classmethod
830
  def FromDict(cls, val):
831
    """Custom function for instances.
832

833
    """
834
    obj = super(Instance, cls).FromDict(val)
835
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
836
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
837
    return obj
838

    
839
  def UpgradeConfig(self):
840
    """Fill defaults for missing configuration values.
841

842
    """
843
    for nic in self.nics:
844
      nic.UpgradeConfig()
845
    for disk in self.disks:
846
      disk.UpgradeConfig()
847
    if self.hvparams:
848
      for key in constants.HVC_GLOBALS:
849
        try:
850
          del self.hvparams[key]
851
        except KeyError:
852
          pass
853
    if self.osparams is None:
854
      self.osparams = {}
855

    
856

    
857
class OS(ConfigObject):
858
  """Config object representing an operating system.
859

860
  @type supported_parameters: list
861
  @ivar supported_parameters: a list of tuples, name and description,
862
      containing the supported parameters by this OS
863

864
  """
865
  __slots__ = [
866
    "name",
867
    "path",
868
    "api_versions",
869
    "create_script",
870
    "export_script",
871
    "import_script",
872
    "rename_script",
873
    "verify_script",
874
    "supported_variants",
875
    "supported_parameters",
876
    ]
877

    
878

    
879
class Node(TaggableObject):
880
  """Config object representing a node."""
881
  __slots__ = [
882
    "name",
883
    "primary_ip",
884
    "secondary_ip",
885
    "serial_no",
886
    "master_candidate",
887
    "offline",
888
    "drained",
889
    ] + _TIMESTAMPS + _UUID
890

    
891

    
892
class Cluster(TaggableObject):
893
  """Config object representing the cluster."""
894
  __slots__ = [
895
    "serial_no",
896
    "rsahostkeypub",
897
    "highest_used_port",
898
    "tcpudp_port_pool",
899
    "mac_prefix",
900
    "volume_group_name",
901
    "drbd_usermode_helper",
902
    "default_bridge",
903
    "default_hypervisor",
904
    "master_node",
905
    "master_ip",
906
    "master_netdev",
907
    "cluster_name",
908
    "file_storage_dir",
909
    "enabled_hypervisors",
910
    "hvparams",
911
    "os_hvp",
912
    "beparams",
913
    "osparams",
914
    "nicparams",
915
    "candidate_pool_size",
916
    "modify_etc_hosts",
917
    "modify_ssh_setup",
918
    "maintain_node_health",
919
    "uid_pool",
920
    "default_iallocator",
921
    ] + _TIMESTAMPS + _UUID
922

    
923
  def UpgradeConfig(self):
924
    """Fill defaults for missing configuration values.
925

926
    """
927
    # pylint: disable-msg=E0203
928
    # because these are "defined" via slots, not manually
929
    if self.hvparams is None:
930
      self.hvparams = constants.HVC_DEFAULTS
931
    else:
932
      for hypervisor in self.hvparams:
933
        self.hvparams[hypervisor] = FillDict(
934
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
935

    
936
    if self.os_hvp is None:
937
      self.os_hvp = {}
938

    
939
    # osparams added before 2.2
940
    if self.osparams is None:
941
      self.osparams = {}
942

    
943
    self.beparams = UpgradeGroupedParams(self.beparams,
944
                                         constants.BEC_DEFAULTS)
945
    migrate_default_bridge = not self.nicparams
946
    self.nicparams = UpgradeGroupedParams(self.nicparams,
947
                                          constants.NICC_DEFAULTS)
948
    if migrate_default_bridge:
949
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
950
        self.default_bridge
951

    
952
    if self.modify_etc_hosts is None:
953
      self.modify_etc_hosts = True
954

    
955
    if self.modify_ssh_setup is None:
956
      self.modify_ssh_setup = True
957

    
958
    # default_bridge is no longer used it 2.1. The slot is left there to
959
    # support auto-upgrading. It can be removed once we decide to deprecate
960
    # upgrading straight from 2.0.
961
    if self.default_bridge is not None:
962
      self.default_bridge = None
963

    
964
    # default_hypervisor is just the first enabled one in 2.1. This slot and
965
    # code can be removed once upgrading straight from 2.0 is deprecated.
966
    if self.default_hypervisor is not None:
967
      self.enabled_hypervisors = ([self.default_hypervisor] +
968
        [hvname for hvname in self.enabled_hypervisors
969
         if hvname != self.default_hypervisor])
970
      self.default_hypervisor = None
971

    
972
    # maintain_node_health added after 2.1.1
973
    if self.maintain_node_health is None:
974
      self.maintain_node_health = False
975

    
976
    if self.uid_pool is None:
977
      self.uid_pool = []
978

    
979
    if self.default_iallocator is None:
980
      self.default_iallocator = ""
981

    
982
  def ToDict(self):
983
    """Custom function for cluster.
984

985
    """
986
    mydict = super(Cluster, self).ToDict()
987
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
988
    return mydict
989

    
990
  @classmethod
991
  def FromDict(cls, val):
992
    """Custom function for cluster.
993

994
    """
995
    obj = super(Cluster, cls).FromDict(val)
996
    if not isinstance(obj.tcpudp_port_pool, set):
997
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
998
    return obj
999

    
1000
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1001
    """Get the default hypervisor parameters for the cluster.
1002

1003
    @param hypervisor: the hypervisor name
1004
    @param os_name: if specified, we'll also update the defaults for this OS
1005
    @param skip_keys: if passed, list of keys not to use
1006
    @return: the defaults dict
1007

1008
    """
1009
    if skip_keys is None:
1010
      skip_keys = []
1011

    
1012
    fill_stack = [self.hvparams.get(hypervisor, {})]
1013
    if os_name is not None:
1014
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1015
      fill_stack.append(os_hvp)
1016

    
1017
    ret_dict = {}
1018
    for o_dict in fill_stack:
1019
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1020

    
1021
    return ret_dict
1022

    
1023
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1024
    """Fill a given hvparams dict with cluster defaults.
1025

1026
    @type hv_name: string
1027
    @param hv_name: the hypervisor to use
1028
    @type os_name: string
1029
    @param os_name: the OS to use for overriding the hypervisor defaults
1030
    @type skip_globals: boolean
1031
    @param skip_globals: if True, the global hypervisor parameters will
1032
        not be filled
1033
    @rtype: dict
1034
    @return: a copy of the given hvparams with missing keys filled from
1035
        the cluster defaults
1036

1037
    """
1038
    if skip_globals:
1039
      skip_keys = constants.HVC_GLOBALS
1040
    else:
1041
      skip_keys = []
1042

    
1043
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1044
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1045

    
1046
  def FillHV(self, instance, skip_globals=False):
1047
    """Fill an instance's hvparams dict with cluster defaults.
1048

1049
    @type instance: L{objects.Instance}
1050
    @param instance: the instance parameter to fill
1051
    @type skip_globals: boolean
1052
    @param skip_globals: if True, the global hypervisor parameters will
1053
        not be filled
1054
    @rtype: dict
1055
    @return: a copy of the instance's hvparams with missing keys filled from
1056
        the cluster defaults
1057

1058
    """
1059
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1060
                             instance.hvparams, skip_globals)
1061

    
1062
  def SimpleFillBE(self, beparams):
1063
    """Fill a given beparams dict with cluster defaults.
1064

1065
    @type beparams: dict
1066
    @param beparams: the dict to fill
1067
    @rtype: dict
1068
    @return: a copy of the passed in beparams with missing keys filled
1069
        from the cluster defaults
1070

1071
    """
1072
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1073

    
1074
  def FillBE(self, instance):
1075
    """Fill an instance's beparams dict with cluster defaults.
1076

1077
    @type instance: L{objects.Instance}
1078
    @param instance: the instance parameter to fill
1079
    @rtype: dict
1080
    @return: a copy of the instance's beparams with missing keys filled from
1081
        the cluster defaults
1082

1083
    """
1084
    return self.SimpleFillBE(instance.beparams)
1085

    
1086
  def SimpleFillNIC(self, nicparams):
1087
    """Fill a given nicparams dict with cluster defaults.
1088

1089
    @type nicparams: dict
1090
    @param nicparams: the dict to fill
1091
    @rtype: dict
1092
    @return: a copy of the passed in nicparams with missing keys filled
1093
        from the cluster defaults
1094

1095
    """
1096
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1097

    
1098
  def SimpleFillOS(self, os_name, os_params):
1099
    """Fill an instance's osparams dict with cluster defaults.
1100

1101
    @type os_name: string
1102
    @param os_name: the OS name to use
1103
    @type os_params: dict
1104
    @param os_params: the dict to fill with default values
1105
    @rtype: dict
1106
    @return: a copy of the instance's osparams with missing keys filled from
1107
        the cluster defaults
1108

1109
    """
1110
    name_only = os_name.split("+", 1)[0]
1111
    # base OS
1112
    result = self.osparams.get(name_only, {})
1113
    # OS with variant
1114
    result = FillDict(result, self.osparams.get(os_name, {}))
1115
    # specified params
1116
    return FillDict(result, os_params)
1117

    
1118

    
1119
class BlockDevStatus(ConfigObject):
1120
  """Config object representing the status of a block device."""
1121
  __slots__ = [
1122
    "dev_path",
1123
    "major",
1124
    "minor",
1125
    "sync_percent",
1126
    "estimated_time",
1127
    "is_degraded",
1128
    "ldisk_status",
1129
    ]
1130

    
1131

    
1132
class ImportExportStatus(ConfigObject):
1133
  """Config object representing the status of an import or export."""
1134
  __slots__ = [
1135
    "recent_output",
1136
    "listen_port",
1137
    "connected",
1138
    "progress_mbytes",
1139
    "progress_throughput",
1140
    "progress_eta",
1141
    "progress_percent",
1142
    "exit_status",
1143
    "error_message",
1144
    ] + _TIMESTAMPS
1145

    
1146

    
1147
class ImportExportOptions(ConfigObject):
1148
  """Options for import/export daemon
1149

1150
  @ivar key_name: X509 key name (None for cluster certificate)
1151
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1152
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1153
  @ivar magic: Used to ensure the connection goes to the right disk
1154

1155
  """
1156
  __slots__ = [
1157
    "key_name",
1158
    "ca_pem",
1159
    "compress",
1160
    "magic",
1161
    ]
1162

    
1163

    
1164
class ConfdRequest(ConfigObject):
1165
  """Object holding a confd request.
1166

1167
  @ivar protocol: confd protocol version
1168
  @ivar type: confd query type
1169
  @ivar query: query request
1170
  @ivar rsalt: requested reply salt
1171

1172
  """
1173
  __slots__ = [
1174
    "protocol",
1175
    "type",
1176
    "query",
1177
    "rsalt",
1178
    ]
1179

    
1180

    
1181
class ConfdReply(ConfigObject):
1182
  """Object holding a confd reply.
1183

1184
  @ivar protocol: confd protocol version
1185
  @ivar status: reply status code (ok, error)
1186
  @ivar answer: confd query reply
1187
  @ivar serial: configuration serial number
1188

1189
  """
1190
  __slots__ = [
1191
    "protocol",
1192
    "status",
1193
    "answer",
1194
    "serial",
1195
    ]
1196

    
1197

    
1198
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1199
  """Simple wrapper over ConfigParse that allows serialization.
1200

1201
  This class is basically ConfigParser.SafeConfigParser with two
1202
  additional methods that allow it to serialize/unserialize to/from a
1203
  buffer.
1204

1205
  """
1206
  def Dumps(self):
1207
    """Dump this instance and return the string representation."""
1208
    buf = StringIO()
1209
    self.write(buf)
1210
    return buf.getvalue()
1211

    
1212
  @classmethod
1213
  def Loads(cls, data):
1214
    """Load data from a string."""
1215
    buf = StringIO(data)
1216
    cfp = cls()
1217
    cfp.readfp(buf)
1218
    return cfp