Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 490acd18

History | View | Annotate | Download (38.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
from cStringIO import StringIO
40

    
41
from ganeti import errors
42
from ganeti import constants
43

    
44
from socket import AF_INET
45

    
46

    
47
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
48
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
49

    
50
_TIMESTAMPS = ["ctime", "mtime"]
51
_UUID = ["uuid"]
52

    
53

    
54
def FillDict(defaults_dict, custom_dict, skip_keys=None):
55
  """Basic function to apply settings on top a default dict.
56

57
  @type defaults_dict: dict
58
  @param defaults_dict: dictionary holding the default values
59
  @type custom_dict: dict
60
  @param custom_dict: dictionary holding customized value
61
  @type skip_keys: list
62
  @param skip_keys: which keys not to fill
63
  @rtype: dict
64
  @return: dict with the 'full' values
65

66
  """
67
  ret_dict = copy.deepcopy(defaults_dict)
68
  ret_dict.update(custom_dict)
69
  if skip_keys:
70
    for k in skip_keys:
71
      try:
72
        del ret_dict[k]
73
      except KeyError:
74
        pass
75
  return ret_dict
76

    
77

    
78
def UpgradeGroupedParams(target, defaults):
79
  """Update all groups for the target parameter.
80

81
  @type target: dict of dicts
82
  @param target: {group: {parameter: value}}
83
  @type defaults: dict
84
  @param defaults: default parameter values
85

86
  """
87
  if target is None:
88
    target = {constants.PP_DEFAULT: defaults}
89
  else:
90
    for group in target:
91
      target[group] = FillDict(defaults, target[group])
92
  return target
93

    
94

    
95
class ConfigObject(object):
96
  """A generic config object.
97

98
  It has the following properties:
99

100
    - provides somewhat safe recursive unpickling and pickling for its classes
101
    - unset attributes which are defined in slots are always returned
102
      as None instead of raising an error
103

104
  Classes derived from this must always declare __slots__ (we use many
105
  config objects and the memory reduction is useful)
106

107
  """
108
  __slots__ = []
109

    
110
  def __init__(self, **kwargs):
111
    for k, v in kwargs.iteritems():
112
      setattr(self, k, v)
113

    
114
  def __getattr__(self, name):
115
    if name not in self._all_slots():
116
      raise AttributeError("Invalid object attribute %s.%s" %
117
                           (type(self).__name__, name))
118
    return None
119

    
120
  def __setstate__(self, state):
121
    slots = self._all_slots()
122
    for name in state:
123
      if name in slots:
124
        setattr(self, name, state[name])
125

    
126
  @classmethod
127
  def _all_slots(cls):
128
    """Compute the list of all declared slots for a class.
129

130
    """
131
    slots = []
132
    for parent in cls.__mro__:
133
      slots.extend(getattr(parent, "__slots__", []))
134
    return slots
135

    
136
  def ToDict(self):
137
    """Convert to a dict holding only standard python types.
138

139
    The generic routine just dumps all of this object's attributes in
140
    a dict. It does not work if the class has children who are
141
    ConfigObjects themselves (e.g. the nics list in an Instance), in
142
    which case the object should subclass the function in order to
143
    make sure all objects returned are only standard python types.
144

145
    """
146
    result = {}
147
    for name in self._all_slots():
148
      value = getattr(self, name, None)
149
      if value is not None:
150
        result[name] = value
151
    return result
152

    
153
  __getstate__ = ToDict
154

    
155
  @classmethod
156
  def FromDict(cls, val):
157
    """Create an object from a dictionary.
158

159
    This generic routine takes a dict, instantiates a new instance of
160
    the given class, and sets attributes based on the dict content.
161

162
    As for `ToDict`, this does not work if the class has children
163
    who are ConfigObjects themselves (e.g. the nics list in an
164
    Instance), in which case the object should subclass the function
165
    and alter the objects.
166

167
    """
168
    if not isinstance(val, dict):
169
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
170
                                      " expected dict, got %s" % type(val))
171
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
172
    obj = cls(**val_str) # pylint: disable-msg=W0142
173
    return obj
174

    
175
  @staticmethod
176
  def _ContainerToDicts(container):
177
    """Convert the elements of a container to standard python types.
178

179
    This method converts a container with elements derived from
180
    ConfigData to standard python types. If the container is a dict,
181
    we don't touch the keys, only the values.
182

183
    """
184
    if isinstance(container, dict):
185
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
186
    elif isinstance(container, (list, tuple, set, frozenset)):
187
      ret = [elem.ToDict() for elem in container]
188
    else:
189
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
190
                      type(container))
191
    return ret
192

    
193
  @staticmethod
194
  def _ContainerFromDicts(source, c_type, e_type):
195
    """Convert a container from standard python types.
196

197
    This method converts a container with standard python types to
198
    ConfigData objects. If the container is a dict, we don't touch the
199
    keys, only the values.
200

201
    """
202
    if not isinstance(c_type, type):
203
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
204
                      " not a type" % type(c_type))
205
    if source is None:
206
      source = c_type()
207
    if c_type is dict:
208
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
209
    elif c_type in (list, tuple, set, frozenset):
210
      ret = c_type([e_type.FromDict(elem) for elem in source])
211
    else:
212
      raise TypeError("Invalid container type %s passed to"
213
                      " _ContainerFromDicts" % c_type)
214
    return ret
215

    
216
  def Copy(self):
217
    """Makes a deep copy of the current object and its children.
218

219
    """
220
    dict_form = self.ToDict()
221
    clone_obj = self.__class__.FromDict(dict_form)
222
    return clone_obj
223

    
224
  def __repr__(self):
225
    """Implement __repr__ for ConfigObjects."""
226
    return repr(self.ToDict())
227

    
228
  def UpgradeConfig(self):
229
    """Fill defaults for missing configuration values.
230

231
    This method will be called at configuration load time, and its
232
    implementation will be object dependent.
233

234
    """
235
    pass
236

    
237

    
238
class TaggableObject(ConfigObject):
239
  """An generic class supporting tags.
240

241
  """
242
  __slots__ = ["tags"]
243
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
244

    
245
  @classmethod
246
  def ValidateTag(cls, tag):
247
    """Check if a tag is valid.
248

249
    If the tag is invalid, an errors.TagError will be raised. The
250
    function has no return value.
251

252
    """
253
    if not isinstance(tag, basestring):
254
      raise errors.TagError("Invalid tag type (not a string)")
255
    if len(tag) > constants.MAX_TAG_LEN:
256
      raise errors.TagError("Tag too long (>%d characters)" %
257
                            constants.MAX_TAG_LEN)
258
    if not tag:
259
      raise errors.TagError("Tags cannot be empty")
260
    if not cls.VALID_TAG_RE.match(tag):
261
      raise errors.TagError("Tag contains invalid characters")
262

    
263
  def GetTags(self):
264
    """Return the tags list.
265

266
    """
267
    tags = getattr(self, "tags", None)
268
    if tags is None:
269
      tags = self.tags = set()
270
    return tags
271

    
272
  def AddTag(self, tag):
273
    """Add a new tag.
274

275
    """
276
    self.ValidateTag(tag)
277
    tags = self.GetTags()
278
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
279
      raise errors.TagError("Too many tags")
280
    self.GetTags().add(tag)
281

    
282
  def RemoveTag(self, tag):
283
    """Remove a tag.
284

285
    """
286
    self.ValidateTag(tag)
287
    tags = self.GetTags()
288
    try:
289
      tags.remove(tag)
290
    except KeyError:
291
      raise errors.TagError("Tag not found")
292

    
293
  def ToDict(self):
294
    """Taggable-object-specific conversion to standard python types.
295

296
    This replaces the tags set with a list.
297

298
    """
299
    bo = super(TaggableObject, self).ToDict()
300

    
301
    tags = bo.get("tags", None)
302
    if isinstance(tags, set):
303
      bo["tags"] = list(tags)
304
    return bo
305

    
306
  @classmethod
307
  def FromDict(cls, val):
308
    """Custom function for instances.
309

310
    """
311
    obj = super(TaggableObject, cls).FromDict(val)
312
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
313
      obj.tags = set(obj.tags)
314
    return obj
315

    
316

    
317
class ConfigData(ConfigObject):
318
  """Top-level config object."""
319
  __slots__ = [
320
    "version",
321
    "cluster",
322
    "nodes",
323
    "nodegroups",
324
    "instances",
325
    "serial_no",
326
    ] + _TIMESTAMPS
327

    
328
  def ToDict(self):
329
    """Custom function for top-level config data.
330

331
    This just replaces the list of instances, nodes and the cluster
332
    with standard python types.
333

334
    """
335
    mydict = super(ConfigData, self).ToDict()
336
    mydict["cluster"] = mydict["cluster"].ToDict()
337
    for key in "nodes", "instances", "nodegroups":
338
      mydict[key] = self._ContainerToDicts(mydict[key])
339

    
340
    return mydict
341

    
342
  @classmethod
343
  def FromDict(cls, val):
344
    """Custom function for top-level config data
345

346
    """
347
    obj = super(ConfigData, cls).FromDict(val)
348
    obj.cluster = Cluster.FromDict(obj.cluster)
349
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
350
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
351
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
352
    return obj
353

    
354
  def HasAnyDiskOfType(self, dev_type):
355
    """Check if in there is at disk of the given type in the configuration.
356

357
    @type dev_type: L{constants.LDS_BLOCK}
358
    @param dev_type: the type to look for
359
    @rtype: boolean
360
    @return: boolean indicating if a disk of the given type was found or not
361

362
    """
363
    for instance in self.instances.values():
364
      for disk in instance.disks:
365
        if disk.IsBasedOnDiskType(dev_type):
366
          return True
367
    return False
368

    
369
  def UpgradeConfig(self):
370
    """Fill defaults for missing configuration values.
371

372
    """
373
    self.cluster.UpgradeConfig()
374
    for node in self.nodes.values():
375
      node.UpgradeConfig()
376
    for instance in self.instances.values():
377
      instance.UpgradeConfig()
378
    if self.nodegroups is None:
379
      self.nodegroups = {}
380
    for nodegroup in self.nodegroups.values():
381
      nodegroup.UpgradeConfig()
382
    if self.cluster.drbd_usermode_helper is None:
383
      # To decide if we set an helper let's check if at least one instance has
384
      # a DRBD disk. This does not cover all the possible scenarios but it
385
      # gives a good approximation.
386
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
387
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
388

    
389

    
390
class NIC(ConfigObject):
391
  """Config object representing a network card."""
392
  __slots__ = ["mac", "ip", "bridge", "nicparams"]
393

    
394
  @classmethod
395
  def CheckParameterSyntax(cls, nicparams):
396
    """Check the given parameters for validity.
397

398
    @type nicparams:  dict
399
    @param nicparams: dictionary with parameter names/value
400
    @raise errors.ConfigurationError: when a parameter is not valid
401

402
    """
403
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
404
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
405
      raise errors.ConfigurationError(err)
406

    
407
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
408
        not nicparams[constants.NIC_LINK]):
409
      err = "Missing bridged nic link"
410
      raise errors.ConfigurationError(err)
411

    
412
  def UpgradeConfig(self):
413
    """Fill defaults for missing configuration values.
414

415
    """
416
    if self.nicparams is None:
417
      self.nicparams = {}
418
      if self.bridge is not None:
419
        self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
420
        self.nicparams[constants.NIC_LINK] = self.bridge
421
    # bridge is no longer used it 2.1. The slot is left there to support
422
    # upgrading, but can be removed once upgrades to the current version
423
    # straight from 2.0 are deprecated.
424
    if self.bridge is not None:
425
      self.bridge = None
426

    
427

    
428
class Disk(ConfigObject):
429
  """Config object representing a block device."""
430
  __slots__ = ["dev_type", "logical_id", "physical_id",
431
               "children", "iv_name", "size", "mode"]
432

    
433
  def CreateOnSecondary(self):
434
    """Test if this device needs to be created on a secondary node."""
435
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
436

    
437
  def AssembleOnSecondary(self):
438
    """Test if this device needs to be assembled on a secondary node."""
439
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
440

    
441
  def OpenOnSecondary(self):
442
    """Test if this device needs to be opened on a secondary node."""
443
    return self.dev_type in (constants.LD_LV,)
444

    
445
  def StaticDevPath(self):
446
    """Return the device path if this device type has a static one.
447

448
    Some devices (LVM for example) live always at the same /dev/ path,
449
    irrespective of their status. For such devices, we return this
450
    path, for others we return None.
451

452
    @warning: The path returned is not a normalized pathname; callers
453
        should check that it is a valid path.
454

455
    """
456
    if self.dev_type == constants.LD_LV:
457
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
458
    return None
459

    
460
  def ChildrenNeeded(self):
461
    """Compute the needed number of children for activation.
462

463
    This method will return either -1 (all children) or a positive
464
    number denoting the minimum number of children needed for
465
    activation (only mirrored devices will usually return >=0).
466

467
    Currently, only DRBD8 supports diskless activation (therefore we
468
    return 0), for all other we keep the previous semantics and return
469
    -1.
470

471
    """
472
    if self.dev_type == constants.LD_DRBD8:
473
      return 0
474
    return -1
475

    
476
  def IsBasedOnDiskType(self, dev_type):
477
    """Check if the disk or its children are based on the given type.
478

479
    @type dev_type: L{constants.LDS_BLOCK}
480
    @param dev_type: the type to look for
481
    @rtype: boolean
482
    @return: boolean indicating if a device of the given type was found or not
483

484
    """
485
    if self.children:
486
      for child in self.children:
487
        if child.IsBasedOnDiskType(dev_type):
488
          return True
489
    return self.dev_type == dev_type
490

    
491
  def GetNodes(self, node):
492
    """This function returns the nodes this device lives on.
493

494
    Given the node on which the parent of the device lives on (or, in
495
    case of a top-level device, the primary node of the devices'
496
    instance), this function will return a list of nodes on which this
497
    devices needs to (or can) be assembled.
498

499
    """
500
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
501
      result = [node]
502
    elif self.dev_type in constants.LDS_DRBD:
503
      result = [self.logical_id[0], self.logical_id[1]]
504
      if node not in result:
505
        raise errors.ConfigurationError("DRBD device passed unknown node")
506
    else:
507
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
508
    return result
509

    
510
  def ComputeNodeTree(self, parent_node):
511
    """Compute the node/disk tree for this disk and its children.
512

513
    This method, given the node on which the parent disk lives, will
514
    return the list of all (node, disk) pairs which describe the disk
515
    tree in the most compact way. For example, a drbd/lvm stack
516
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
517
    which represents all the top-level devices on the nodes.
518

519
    """
520
    my_nodes = self.GetNodes(parent_node)
521
    result = [(node, self) for node in my_nodes]
522
    if not self.children:
523
      # leaf device
524
      return result
525
    for node in my_nodes:
526
      for child in self.children:
527
        child_result = child.ComputeNodeTree(node)
528
        if len(child_result) == 1:
529
          # child (and all its descendants) is simple, doesn't split
530
          # over multiple hosts, so we don't need to describe it, our
531
          # own entry for this node describes it completely
532
          continue
533
        else:
534
          # check if child nodes differ from my nodes; note that
535
          # subdisk can differ from the child itself, and be instead
536
          # one of its descendants
537
          for subnode, subdisk in child_result:
538
            if subnode not in my_nodes:
539
              result.append((subnode, subdisk))
540
            # otherwise child is under our own node, so we ignore this
541
            # entry (but probably the other results in the list will
542
            # be different)
543
    return result
544

    
545
  def RecordGrow(self, amount):
546
    """Update the size of this disk after growth.
547

548
    This method recurses over the disks's children and updates their
549
    size correspondigly. The method needs to be kept in sync with the
550
    actual algorithms from bdev.
551

552
    """
553
    if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
554
      self.size += amount
555
    elif self.dev_type == constants.LD_DRBD8:
556
      if self.children:
557
        self.children[0].RecordGrow(amount)
558
      self.size += amount
559
    else:
560
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
561
                                   " disk type %s" % self.dev_type)
562

    
563
  def UnsetSize(self):
564
    """Sets recursively the size to zero for the disk and its children.
565

566
    """
567
    if self.children:
568
      for child in self.children:
569
        child.UnsetSize()
570
    self.size = 0
571

    
572
  def SetPhysicalID(self, target_node, nodes_ip):
573
    """Convert the logical ID to the physical ID.
574

575
    This is used only for drbd, which needs ip/port configuration.
576

577
    The routine descends down and updates its children also, because
578
    this helps when the only the top device is passed to the remote
579
    node.
580

581
    Arguments:
582
      - target_node: the node we wish to configure for
583
      - nodes_ip: a mapping of node name to ip
584

585
    The target_node must exist in in nodes_ip, and must be one of the
586
    nodes in the logical ID for each of the DRBD devices encountered
587
    in the disk tree.
588

589
    """
590
    if self.children:
591
      for child in self.children:
592
        child.SetPhysicalID(target_node, nodes_ip)
593

    
594
    if self.logical_id is None and self.physical_id is not None:
595
      return
596
    if self.dev_type in constants.LDS_DRBD:
597
      pnode, snode, port, pminor, sminor, secret = self.logical_id
598
      if target_node not in (pnode, snode):
599
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
600
                                        target_node)
601
      pnode_ip = nodes_ip.get(pnode, None)
602
      snode_ip = nodes_ip.get(snode, None)
603
      if pnode_ip is None or snode_ip is None:
604
        raise errors.ConfigurationError("Can't find primary or secondary node"
605
                                        " for %s" % str(self))
606
      p_data = (pnode_ip, port)
607
      s_data = (snode_ip, port)
608
      if pnode == target_node:
609
        self.physical_id = p_data + s_data + (pminor, secret)
610
      else: # it must be secondary, we tested above
611
        self.physical_id = s_data + p_data + (sminor, secret)
612
    else:
613
      self.physical_id = self.logical_id
614
    return
615

    
616
  def ToDict(self):
617
    """Disk-specific conversion to standard python types.
618

619
    This replaces the children lists of objects with lists of
620
    standard python types.
621

622
    """
623
    bo = super(Disk, self).ToDict()
624

    
625
    for attr in ("children",):
626
      alist = bo.get(attr, None)
627
      if alist:
628
        bo[attr] = self._ContainerToDicts(alist)
629
    return bo
630

    
631
  @classmethod
632
  def FromDict(cls, val):
633
    """Custom function for Disks
634

635
    """
636
    obj = super(Disk, cls).FromDict(val)
637
    if obj.children:
638
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
639
    if obj.logical_id and isinstance(obj.logical_id, list):
640
      obj.logical_id = tuple(obj.logical_id)
641
    if obj.physical_id and isinstance(obj.physical_id, list):
642
      obj.physical_id = tuple(obj.physical_id)
643
    if obj.dev_type in constants.LDS_DRBD:
644
      # we need a tuple of length six here
645
      if len(obj.logical_id) < 6:
646
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
647
    return obj
648

    
649
  def __str__(self):
650
    """Custom str() formatter for disks.
651

652
    """
653
    if self.dev_type == constants.LD_LV:
654
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
655
    elif self.dev_type in constants.LDS_DRBD:
656
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
657
      val = "<DRBD8("
658
      if self.physical_id is None:
659
        phy = "unconfigured"
660
      else:
661
        phy = ("configured as %s:%s %s:%s" %
662
               (self.physical_id[0], self.physical_id[1],
663
                self.physical_id[2], self.physical_id[3]))
664

    
665
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
666
              (node_a, minor_a, node_b, minor_b, port, phy))
667
      if self.children and self.children.count(None) == 0:
668
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
669
      else:
670
        val += "no local storage"
671
    else:
672
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
673
             (self.dev_type, self.logical_id, self.physical_id, self.children))
674
    if self.iv_name is None:
675
      val += ", not visible"
676
    else:
677
      val += ", visible as /dev/%s" % self.iv_name
678
    if isinstance(self.size, int):
679
      val += ", size=%dm)>" % self.size
680
    else:
681
      val += ", size='%s')>" % (self.size,)
682
    return val
683

    
684
  def Verify(self):
685
    """Checks that this disk is correctly configured.
686

687
    """
688
    all_errors = []
689
    if self.mode not in constants.DISK_ACCESS_SET:
690
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
691
    return all_errors
692

    
693
  def UpgradeConfig(self):
694
    """Fill defaults for missing configuration values.
695

696
    """
697
    if self.children:
698
      for child in self.children:
699
        child.UpgradeConfig()
700
    # add here config upgrade for this disk
701

    
702

    
703
class Instance(TaggableObject):
704
  """Config object representing an instance."""
705
  __slots__ = [
706
    "name",
707
    "primary_node",
708
    "os",
709
    "hypervisor",
710
    "hvparams",
711
    "beparams",
712
    "osparams",
713
    "admin_up",
714
    "nics",
715
    "disks",
716
    "disk_template",
717
    "network_port",
718
    "serial_no",
719
    ] + _TIMESTAMPS + _UUID
720

    
721
  def _ComputeSecondaryNodes(self):
722
    """Compute the list of secondary nodes.
723

724
    This is a simple wrapper over _ComputeAllNodes.
725

726
    """
727
    all_nodes = set(self._ComputeAllNodes())
728
    all_nodes.discard(self.primary_node)
729
    return tuple(all_nodes)
730

    
731
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
732
                             "List of secondary nodes")
733

    
734
  def _ComputeAllNodes(self):
735
    """Compute the list of all nodes.
736

737
    Since the data is already there (in the drbd disks), keeping it as
738
    a separate normal attribute is redundant and if not properly
739
    synchronised can cause problems. Thus it's better to compute it
740
    dynamically.
741

742
    """
743
    def _Helper(nodes, device):
744
      """Recursively computes nodes given a top device."""
745
      if device.dev_type in constants.LDS_DRBD:
746
        nodea, nodeb = device.logical_id[:2]
747
        nodes.add(nodea)
748
        nodes.add(nodeb)
749
      if device.children:
750
        for child in device.children:
751
          _Helper(nodes, child)
752

    
753
    all_nodes = set()
754
    all_nodes.add(self.primary_node)
755
    for device in self.disks:
756
      _Helper(all_nodes, device)
757
    return tuple(all_nodes)
758

    
759
  all_nodes = property(_ComputeAllNodes, None, None,
760
                       "List of all nodes of the instance")
761

    
762
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
763
    """Provide a mapping of nodes to LVs this instance owns.
764

765
    This function figures out what logical volumes should belong on
766
    which nodes, recursing through a device tree.
767

768
    @param lvmap: optional dictionary to receive the
769
        'node' : ['lv', ...] data.
770

771
    @return: None if lvmap arg is given, otherwise, a dictionary
772
        of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
773

774
    """
775
    if node == None:
776
      node = self.primary_node
777

    
778
    if lvmap is None:
779
      lvmap = { node : [] }
780
      ret = lvmap
781
    else:
782
      if not node in lvmap:
783
        lvmap[node] = []
784
      ret = None
785

    
786
    if not devs:
787
      devs = self.disks
788

    
789
    for dev in devs:
790
      if dev.dev_type == constants.LD_LV:
791
        lvmap[node].append(dev.logical_id[1])
792

    
793
      elif dev.dev_type in constants.LDS_DRBD:
794
        if dev.children:
795
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
796
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
797

    
798
      elif dev.children:
799
        self.MapLVsByNode(lvmap, dev.children, node)
800

    
801
    return ret
802

    
803
  def FindDisk(self, idx):
804
    """Find a disk given having a specified index.
805

806
    This is just a wrapper that does validation of the index.
807

808
    @type idx: int
809
    @param idx: the disk index
810
    @rtype: L{Disk}
811
    @return: the corresponding disk
812
    @raise errors.OpPrereqError: when the given index is not valid
813

814
    """
815
    try:
816
      idx = int(idx)
817
      return self.disks[idx]
818
    except (TypeError, ValueError), err:
819
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
820
                                 errors.ECODE_INVAL)
821
    except IndexError:
822
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
823
                                 " 0 to %d" % (idx, len(self.disks)),
824
                                 errors.ECODE_INVAL)
825

    
826
  def ToDict(self):
827
    """Instance-specific conversion to standard python types.
828

829
    This replaces the children lists of objects with lists of standard
830
    python types.
831

832
    """
833
    bo = super(Instance, self).ToDict()
834

    
835
    for attr in "nics", "disks":
836
      alist = bo.get(attr, None)
837
      if alist:
838
        nlist = self._ContainerToDicts(alist)
839
      else:
840
        nlist = []
841
      bo[attr] = nlist
842
    return bo
843

    
844
  @classmethod
845
  def FromDict(cls, val):
846
    """Custom function for instances.
847

848
    """
849
    obj = super(Instance, cls).FromDict(val)
850
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
851
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
852
    return obj
853

    
854
  def UpgradeConfig(self):
855
    """Fill defaults for missing configuration values.
856

857
    """
858
    for nic in self.nics:
859
      nic.UpgradeConfig()
860
    for disk in self.disks:
861
      disk.UpgradeConfig()
862
    if self.hvparams:
863
      for key in constants.HVC_GLOBALS:
864
        try:
865
          del self.hvparams[key]
866
        except KeyError:
867
          pass
868
    if self.osparams is None:
869
      self.osparams = {}
870

    
871

    
872
class OS(ConfigObject):
873
  """Config object representing an operating system.
874

875
  @type supported_parameters: list
876
  @ivar supported_parameters: a list of tuples, name and description,
877
      containing the supported parameters by this OS
878

879
  @type VARIANT_DELIM: string
880
  @cvar VARIANT_DELIM: the variant delimiter
881

882
  """
883
  __slots__ = [
884
    "name",
885
    "path",
886
    "api_versions",
887
    "create_script",
888
    "export_script",
889
    "import_script",
890
    "rename_script",
891
    "verify_script",
892
    "supported_variants",
893
    "supported_parameters",
894
    ]
895

    
896
  VARIANT_DELIM = "+"
897

    
898
  @classmethod
899
  def SplitNameVariant(cls, name):
900
    """Splits the name into the proper name and variant.
901

902
    @param name: the OS (unprocessed) name
903
    @rtype: list
904
    @return: a list of two elements; if the original name didn't
905
        contain a variant, it's returned as an empty string
906

907
    """
908
    nv = name.split(cls.VARIANT_DELIM, 1)
909
    if len(nv) == 1:
910
      nv.append("")
911
    return nv
912

    
913
  @classmethod
914
  def GetName(cls, name):
915
    """Returns the proper name of the os (without the variant).
916

917
    @param name: the OS (unprocessed) name
918

919
    """
920
    return cls.SplitNameVariant(name)[0]
921

    
922
  @classmethod
923
  def GetVariant(cls, name):
924
    """Returns the variant the os (without the base name).
925

926
    @param name: the OS (unprocessed) name
927

928
    """
929
    return cls.SplitNameVariant(name)[1]
930

    
931

    
932
class Node(TaggableObject):
933
  """Config object representing a node."""
934
  __slots__ = [
935
    "name",
936
    "primary_ip",
937
    "secondary_ip",
938
    "serial_no",
939
    "master_candidate",
940
    "offline",
941
    "drained",
942
    "group",
943
    "master_capable",
944
    "vm_capable",
945
    ] + _TIMESTAMPS + _UUID
946

    
947
  def UpgradeConfig(self):
948
    """Fill defaults for missing configuration values.
949

950
    """
951
    # pylint: disable-msg=E0203
952
    # because these are "defined" via slots, not manually
953
    if self.master_capable is None:
954
      self.master_capable = True
955

    
956
    if self.vm_capable is None:
957
      self.vm_capable = True
958

    
959

    
960
class NodeGroup(ConfigObject):
961
  """Config object representing a node group."""
962
  __slots__ = [
963
    "name",
964
    "members",
965
    ] + _TIMESTAMPS + _UUID
966

    
967
  def ToDict(self):
968
    """Custom function for nodegroup.
969

970
    This discards the members object, which gets recalculated and is only kept
971
    in memory.
972

973
    """
974
    mydict = super(NodeGroup, self).ToDict()
975
    del mydict["members"]
976
    return mydict
977

    
978
  @classmethod
979
  def FromDict(cls, val):
980
    """Custom function for nodegroup.
981

982
    The members slot is initialized to an empty list, upon deserialization.
983

984
    """
985
    obj = super(NodeGroup, cls).FromDict(val)
986
    obj.members = []
987
    return obj
988

    
989

    
990
class Cluster(TaggableObject):
991
  """Config object representing the cluster."""
992
  __slots__ = [
993
    "serial_no",
994
    "rsahostkeypub",
995
    "highest_used_port",
996
    "tcpudp_port_pool",
997
    "mac_prefix",
998
    "volume_group_name",
999
    "reserved_lvs",
1000
    "drbd_usermode_helper",
1001
    "default_bridge",
1002
    "default_hypervisor",
1003
    "master_node",
1004
    "master_ip",
1005
    "master_netdev",
1006
    "cluster_name",
1007
    "file_storage_dir",
1008
    "enabled_hypervisors",
1009
    "hvparams",
1010
    "os_hvp",
1011
    "beparams",
1012
    "osparams",
1013
    "nicparams",
1014
    "candidate_pool_size",
1015
    "modify_etc_hosts",
1016
    "modify_ssh_setup",
1017
    "maintain_node_health",
1018
    "uid_pool",
1019
    "default_iallocator",
1020
    "hidden_os",
1021
    "blacklisted_os",
1022
    "primary_ip_family",
1023
    "prealloc_wipe_disks",
1024
    ] + _TIMESTAMPS + _UUID
1025

    
1026
  def UpgradeConfig(self):
1027
    """Fill defaults for missing configuration values.
1028

1029
    """
1030
    # pylint: disable-msg=E0203
1031
    # because these are "defined" via slots, not manually
1032
    if self.hvparams is None:
1033
      self.hvparams = constants.HVC_DEFAULTS
1034
    else:
1035
      for hypervisor in self.hvparams:
1036
        self.hvparams[hypervisor] = FillDict(
1037
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1038

    
1039
    if self.os_hvp is None:
1040
      self.os_hvp = {}
1041

    
1042
    # osparams added before 2.2
1043
    if self.osparams is None:
1044
      self.osparams = {}
1045

    
1046
    self.beparams = UpgradeGroupedParams(self.beparams,
1047
                                         constants.BEC_DEFAULTS)
1048
    migrate_default_bridge = not self.nicparams
1049
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1050
                                          constants.NICC_DEFAULTS)
1051
    if migrate_default_bridge:
1052
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1053
        self.default_bridge
1054

    
1055
    if self.modify_etc_hosts is None:
1056
      self.modify_etc_hosts = True
1057

    
1058
    if self.modify_ssh_setup is None:
1059
      self.modify_ssh_setup = True
1060

    
1061
    # default_bridge is no longer used it 2.1. The slot is left there to
1062
    # support auto-upgrading. It can be removed once we decide to deprecate
1063
    # upgrading straight from 2.0.
1064
    if self.default_bridge is not None:
1065
      self.default_bridge = None
1066

    
1067
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1068
    # code can be removed once upgrading straight from 2.0 is deprecated.
1069
    if self.default_hypervisor is not None:
1070
      self.enabled_hypervisors = ([self.default_hypervisor] +
1071
        [hvname for hvname in self.enabled_hypervisors
1072
         if hvname != self.default_hypervisor])
1073
      self.default_hypervisor = None
1074

    
1075
    # maintain_node_health added after 2.1.1
1076
    if self.maintain_node_health is None:
1077
      self.maintain_node_health = False
1078

    
1079
    if self.uid_pool is None:
1080
      self.uid_pool = []
1081

    
1082
    if self.default_iallocator is None:
1083
      self.default_iallocator = ""
1084

    
1085
    # reserved_lvs added before 2.2
1086
    if self.reserved_lvs is None:
1087
      self.reserved_lvs = []
1088

    
1089
    # hidden and blacklisted operating systems added before 2.2.1
1090
    if self.hidden_os is None:
1091
      self.hidden_os = []
1092

    
1093
    if self.blacklisted_os is None:
1094
      self.blacklisted_os = []
1095

    
1096
    # primary_ip_family added before 2.3
1097
    if self.primary_ip_family is None:
1098
      self.primary_ip_family = AF_INET
1099

    
1100
    if self.prealloc_wipe_disks is None:
1101
      self.prealloc_wipe_disks = False
1102

    
1103
  def ToDict(self):
1104
    """Custom function for cluster.
1105

1106
    """
1107
    mydict = super(Cluster, self).ToDict()
1108
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1109
    return mydict
1110

    
1111
  @classmethod
1112
  def FromDict(cls, val):
1113
    """Custom function for cluster.
1114

1115
    """
1116
    obj = super(Cluster, cls).FromDict(val)
1117
    if not isinstance(obj.tcpudp_port_pool, set):
1118
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1119
    return obj
1120

    
1121
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1122
    """Get the default hypervisor parameters for the cluster.
1123

1124
    @param hypervisor: the hypervisor name
1125
    @param os_name: if specified, we'll also update the defaults for this OS
1126
    @param skip_keys: if passed, list of keys not to use
1127
    @return: the defaults dict
1128

1129
    """
1130
    if skip_keys is None:
1131
      skip_keys = []
1132

    
1133
    fill_stack = [self.hvparams.get(hypervisor, {})]
1134
    if os_name is not None:
1135
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1136
      fill_stack.append(os_hvp)
1137

    
1138
    ret_dict = {}
1139
    for o_dict in fill_stack:
1140
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1141

    
1142
    return ret_dict
1143

    
1144
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1145
    """Fill a given hvparams dict with cluster defaults.
1146

1147
    @type hv_name: string
1148
    @param hv_name: the hypervisor to use
1149
    @type os_name: string
1150
    @param os_name: the OS to use for overriding the hypervisor defaults
1151
    @type skip_globals: boolean
1152
    @param skip_globals: if True, the global hypervisor parameters will
1153
        not be filled
1154
    @rtype: dict
1155
    @return: a copy of the given hvparams with missing keys filled from
1156
        the cluster defaults
1157

1158
    """
1159
    if skip_globals:
1160
      skip_keys = constants.HVC_GLOBALS
1161
    else:
1162
      skip_keys = []
1163

    
1164
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1165
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1166

    
1167
  def FillHV(self, instance, skip_globals=False):
1168
    """Fill an instance's hvparams dict with cluster defaults.
1169

1170
    @type instance: L{objects.Instance}
1171
    @param instance: the instance parameter to fill
1172
    @type skip_globals: boolean
1173
    @param skip_globals: if True, the global hypervisor parameters will
1174
        not be filled
1175
    @rtype: dict
1176
    @return: a copy of the instance's hvparams with missing keys filled from
1177
        the cluster defaults
1178

1179
    """
1180
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1181
                             instance.hvparams, skip_globals)
1182

    
1183
  def SimpleFillBE(self, beparams):
1184
    """Fill a given beparams dict with cluster defaults.
1185

1186
    @type beparams: dict
1187
    @param beparams: the dict to fill
1188
    @rtype: dict
1189
    @return: a copy of the passed in beparams with missing keys filled
1190
        from the cluster defaults
1191

1192
    """
1193
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1194

    
1195
  def FillBE(self, instance):
1196
    """Fill an instance's beparams dict with cluster defaults.
1197

1198
    @type instance: L{objects.Instance}
1199
    @param instance: the instance parameter to fill
1200
    @rtype: dict
1201
    @return: a copy of the instance's beparams with missing keys filled from
1202
        the cluster defaults
1203

1204
    """
1205
    return self.SimpleFillBE(instance.beparams)
1206

    
1207
  def SimpleFillNIC(self, nicparams):
1208
    """Fill a given nicparams dict with cluster defaults.
1209

1210
    @type nicparams: dict
1211
    @param nicparams: the dict to fill
1212
    @rtype: dict
1213
    @return: a copy of the passed in nicparams with missing keys filled
1214
        from the cluster defaults
1215

1216
    """
1217
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1218

    
1219
  def SimpleFillOS(self, os_name, os_params):
1220
    """Fill an instance's osparams dict with cluster defaults.
1221

1222
    @type os_name: string
1223
    @param os_name: the OS name to use
1224
    @type os_params: dict
1225
    @param os_params: the dict to fill with default values
1226
    @rtype: dict
1227
    @return: a copy of the instance's osparams with missing keys filled from
1228
        the cluster defaults
1229

1230
    """
1231
    name_only = os_name.split("+", 1)[0]
1232
    # base OS
1233
    result = self.osparams.get(name_only, {})
1234
    # OS with variant
1235
    result = FillDict(result, self.osparams.get(os_name, {}))
1236
    # specified params
1237
    return FillDict(result, os_params)
1238

    
1239

    
1240
class BlockDevStatus(ConfigObject):
1241
  """Config object representing the status of a block device."""
1242
  __slots__ = [
1243
    "dev_path",
1244
    "major",
1245
    "minor",
1246
    "sync_percent",
1247
    "estimated_time",
1248
    "is_degraded",
1249
    "ldisk_status",
1250
    ]
1251

    
1252

    
1253
class ImportExportStatus(ConfigObject):
1254
  """Config object representing the status of an import or export."""
1255
  __slots__ = [
1256
    "recent_output",
1257
    "listen_port",
1258
    "connected",
1259
    "progress_mbytes",
1260
    "progress_throughput",
1261
    "progress_eta",
1262
    "progress_percent",
1263
    "exit_status",
1264
    "error_message",
1265
    ] + _TIMESTAMPS
1266

    
1267

    
1268
class ImportExportOptions(ConfigObject):
1269
  """Options for import/export daemon
1270

1271
  @ivar key_name: X509 key name (None for cluster certificate)
1272
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1273
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1274
  @ivar magic: Used to ensure the connection goes to the right disk
1275

1276
  """
1277
  __slots__ = [
1278
    "key_name",
1279
    "ca_pem",
1280
    "compress",
1281
    "magic",
1282
    ]
1283

    
1284

    
1285
class ConfdRequest(ConfigObject):
1286
  """Object holding a confd request.
1287

1288
  @ivar protocol: confd protocol version
1289
  @ivar type: confd query type
1290
  @ivar query: query request
1291
  @ivar rsalt: requested reply salt
1292

1293
  """
1294
  __slots__ = [
1295
    "protocol",
1296
    "type",
1297
    "query",
1298
    "rsalt",
1299
    ]
1300

    
1301

    
1302
class ConfdReply(ConfigObject):
1303
  """Object holding a confd reply.
1304

1305
  @ivar protocol: confd protocol version
1306
  @ivar status: reply status code (ok, error)
1307
  @ivar answer: confd query reply
1308
  @ivar serial: configuration serial number
1309

1310
  """
1311
  __slots__ = [
1312
    "protocol",
1313
    "status",
1314
    "answer",
1315
    "serial",
1316
    ]
1317

    
1318

    
1319
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1320
  """Simple wrapper over ConfigParse that allows serialization.
1321

1322
  This class is basically ConfigParser.SafeConfigParser with two
1323
  additional methods that allow it to serialize/unserialize to/from a
1324
  buffer.
1325

1326
  """
1327
  def Dumps(self):
1328
    """Dump this instance and return the string representation."""
1329
    buf = StringIO()
1330
    self.write(buf)
1331
    return buf.getvalue()
1332

    
1333
  @classmethod
1334
  def Loads(cls, data):
1335
    """Load data from a string."""
1336
    buf = StringIO(data)
1337
    cfp = cls()
1338
    cfp.readfp(buf)
1339
    return cfp