Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 061af273

History | View | Annotate | Download (44.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class MasterNetworkParameters(ConfigObject):
319
  """Network configuration parameters for the master
320

321
  @ivar name: master name
322
  @ivar ip: master IP
323
  @ivar netmask: master netmask
324
  @ivar netdev: master network device
325
  @ivar ip_family: master IP family
326

327
  """
328
  __slots__ = [
329
    "name",
330
    "ip",
331
    "netmask",
332
    "netdev",
333
    "ip_family"
334
    ]
335

    
336

    
337
class ConfigData(ConfigObject):
338
  """Top-level config object."""
339
  __slots__ = [
340
    "version",
341
    "cluster",
342
    "nodes",
343
    "nodegroups",
344
    "instances",
345
    "serial_no",
346
    ] + _TIMESTAMPS
347

    
348
  def ToDict(self):
349
    """Custom function for top-level config data.
350

351
    This just replaces the list of instances, nodes and the cluster
352
    with standard python types.
353

354
    """
355
    mydict = super(ConfigData, self).ToDict()
356
    mydict["cluster"] = mydict["cluster"].ToDict()
357
    for key in "nodes", "instances", "nodegroups":
358
      mydict[key] = self._ContainerToDicts(mydict[key])
359

    
360
    return mydict
361

    
362
  @classmethod
363
  def FromDict(cls, val):
364
    """Custom function for top-level config data
365

366
    """
367
    obj = super(ConfigData, cls).FromDict(val)
368
    obj.cluster = Cluster.FromDict(obj.cluster)
369
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
370
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
371
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
372
    return obj
373

    
374
  def HasAnyDiskOfType(self, dev_type):
375
    """Check if in there is at disk of the given type in the configuration.
376

377
    @type dev_type: L{constants.LDS_BLOCK}
378
    @param dev_type: the type to look for
379
    @rtype: boolean
380
    @return: boolean indicating if a disk of the given type was found or not
381

382
    """
383
    for instance in self.instances.values():
384
      for disk in instance.disks:
385
        if disk.IsBasedOnDiskType(dev_type):
386
          return True
387
    return False
388

    
389
  def UpgradeConfig(self):
390
    """Fill defaults for missing configuration values.
391

392
    """
393
    self.cluster.UpgradeConfig()
394
    for node in self.nodes.values():
395
      node.UpgradeConfig()
396
    for instance in self.instances.values():
397
      instance.UpgradeConfig()
398
    if self.nodegroups is None:
399
      self.nodegroups = {}
400
    for nodegroup in self.nodegroups.values():
401
      nodegroup.UpgradeConfig()
402
    if self.cluster.drbd_usermode_helper is None:
403
      # To decide if we set an helper let's check if at least one instance has
404
      # a DRBD disk. This does not cover all the possible scenarios but it
405
      # gives a good approximation.
406
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
407
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
408

    
409

    
410
class NIC(ConfigObject):
411
  """Config object representing a network card."""
412
  __slots__ = ["mac", "ip", "nicparams"]
413

    
414
  @classmethod
415
  def CheckParameterSyntax(cls, nicparams):
416
    """Check the given parameters for validity.
417

418
    @type nicparams:  dict
419
    @param nicparams: dictionary with parameter names/value
420
    @raise errors.ConfigurationError: when a parameter is not valid
421

422
    """
423
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
424
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
425
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
426
      raise errors.ConfigurationError(err)
427

    
428
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
429
        not nicparams[constants.NIC_LINK]):
430
      err = "Missing bridged nic link"
431
      raise errors.ConfigurationError(err)
432

    
433

    
434
class Disk(ConfigObject):
435
  """Config object representing a block device."""
436
  __slots__ = ["dev_type", "logical_id", "physical_id",
437
               "children", "iv_name", "size", "mode"]
438

    
439
  def CreateOnSecondary(self):
440
    """Test if this device needs to be created on a secondary node."""
441
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
442

    
443
  def AssembleOnSecondary(self):
444
    """Test if this device needs to be assembled on a secondary node."""
445
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
446

    
447
  def OpenOnSecondary(self):
448
    """Test if this device needs to be opened on a secondary node."""
449
    return self.dev_type in (constants.LD_LV,)
450

    
451
  def StaticDevPath(self):
452
    """Return the device path if this device type has a static one.
453

454
    Some devices (LVM for example) live always at the same /dev/ path,
455
    irrespective of their status. For such devices, we return this
456
    path, for others we return None.
457

458
    @warning: The path returned is not a normalized pathname; callers
459
        should check that it is a valid path.
460

461
    """
462
    if self.dev_type == constants.LD_LV:
463
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
464
    elif self.dev_type == constants.LD_BLOCKDEV:
465
      return self.logical_id[1]
466
    return None
467

    
468
  def ChildrenNeeded(self):
469
    """Compute the needed number of children for activation.
470

471
    This method will return either -1 (all children) or a positive
472
    number denoting the minimum number of children needed for
473
    activation (only mirrored devices will usually return >=0).
474

475
    Currently, only DRBD8 supports diskless activation (therefore we
476
    return 0), for all other we keep the previous semantics and return
477
    -1.
478

479
    """
480
    if self.dev_type == constants.LD_DRBD8:
481
      return 0
482
    return -1
483

    
484
  def IsBasedOnDiskType(self, dev_type):
485
    """Check if the disk or its children are based on the given type.
486

487
    @type dev_type: L{constants.LDS_BLOCK}
488
    @param dev_type: the type to look for
489
    @rtype: boolean
490
    @return: boolean indicating if a device of the given type was found or not
491

492
    """
493
    if self.children:
494
      for child in self.children:
495
        if child.IsBasedOnDiskType(dev_type):
496
          return True
497
    return self.dev_type == dev_type
498

    
499
  def GetNodes(self, node):
500
    """This function returns the nodes this device lives on.
501

502
    Given the node on which the parent of the device lives on (or, in
503
    case of a top-level device, the primary node of the devices'
504
    instance), this function will return a list of nodes on which this
505
    devices needs to (or can) be assembled.
506

507
    """
508
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
509
                         constants.LD_BLOCKDEV]:
510
      result = [node]
511
    elif self.dev_type in constants.LDS_DRBD:
512
      result = [self.logical_id[0], self.logical_id[1]]
513
      if node not in result:
514
        raise errors.ConfigurationError("DRBD device passed unknown node")
515
    else:
516
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
517
    return result
518

    
519
  def ComputeNodeTree(self, parent_node):
520
    """Compute the node/disk tree for this disk and its children.
521

522
    This method, given the node on which the parent disk lives, will
523
    return the list of all (node, disk) pairs which describe the disk
524
    tree in the most compact way. For example, a drbd/lvm stack
525
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
526
    which represents all the top-level devices on the nodes.
527

528
    """
529
    my_nodes = self.GetNodes(parent_node)
530
    result = [(node, self) for node in my_nodes]
531
    if not self.children:
532
      # leaf device
533
      return result
534
    for node in my_nodes:
535
      for child in self.children:
536
        child_result = child.ComputeNodeTree(node)
537
        if len(child_result) == 1:
538
          # child (and all its descendants) is simple, doesn't split
539
          # over multiple hosts, so we don't need to describe it, our
540
          # own entry for this node describes it completely
541
          continue
542
        else:
543
          # check if child nodes differ from my nodes; note that
544
          # subdisk can differ from the child itself, and be instead
545
          # one of its descendants
546
          for subnode, subdisk in child_result:
547
            if subnode not in my_nodes:
548
              result.append((subnode, subdisk))
549
            # otherwise child is under our own node, so we ignore this
550
            # entry (but probably the other results in the list will
551
            # be different)
552
    return result
553

    
554
  def ComputeGrowth(self, amount):
555
    """Compute the per-VG growth requirements.
556

557
    This only works for VG-based disks.
558

559
    @type amount: integer
560
    @param amount: the desired increase in (user-visible) disk space
561
    @rtype: dict
562
    @return: a dictionary of volume-groups and the required size
563

564
    """
565
    if self.dev_type == constants.LD_LV:
566
      return {self.logical_id[0]: amount}
567
    elif self.dev_type == constants.LD_DRBD8:
568
      if self.children:
569
        return self.children[0].ComputeGrowth(amount)
570
      else:
571
        return {}
572
    else:
573
      # Other disk types do not require VG space
574
      return {}
575

    
576
  def RecordGrow(self, amount):
577
    """Update the size of this disk after growth.
578

579
    This method recurses over the disks's children and updates their
580
    size correspondigly. The method needs to be kept in sync with the
581
    actual algorithms from bdev.
582

583
    """
584
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
585
      self.size += amount
586
    elif self.dev_type == constants.LD_DRBD8:
587
      if self.children:
588
        self.children[0].RecordGrow(amount)
589
      self.size += amount
590
    else:
591
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
592
                                   " disk type %s" % self.dev_type)
593

    
594
  def UnsetSize(self):
595
    """Sets recursively the size to zero for the disk and its children.
596

597
    """
598
    if self.children:
599
      for child in self.children:
600
        child.UnsetSize()
601
    self.size = 0
602

    
603
  def SetPhysicalID(self, target_node, nodes_ip):
604
    """Convert the logical ID to the physical ID.
605

606
    This is used only for drbd, which needs ip/port configuration.
607

608
    The routine descends down and updates its children also, because
609
    this helps when the only the top device is passed to the remote
610
    node.
611

612
    Arguments:
613
      - target_node: the node we wish to configure for
614
      - nodes_ip: a mapping of node name to ip
615

616
    The target_node must exist in in nodes_ip, and must be one of the
617
    nodes in the logical ID for each of the DRBD devices encountered
618
    in the disk tree.
619

620
    """
621
    if self.children:
622
      for child in self.children:
623
        child.SetPhysicalID(target_node, nodes_ip)
624

    
625
    if self.logical_id is None and self.physical_id is not None:
626
      return
627
    if self.dev_type in constants.LDS_DRBD:
628
      pnode, snode, port, pminor, sminor, secret = self.logical_id
629
      if target_node not in (pnode, snode):
630
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
631
                                        target_node)
632
      pnode_ip = nodes_ip.get(pnode, None)
633
      snode_ip = nodes_ip.get(snode, None)
634
      if pnode_ip is None or snode_ip is None:
635
        raise errors.ConfigurationError("Can't find primary or secondary node"
636
                                        " for %s" % str(self))
637
      p_data = (pnode_ip, port)
638
      s_data = (snode_ip, port)
639
      if pnode == target_node:
640
        self.physical_id = p_data + s_data + (pminor, secret)
641
      else: # it must be secondary, we tested above
642
        self.physical_id = s_data + p_data + (sminor, secret)
643
    else:
644
      self.physical_id = self.logical_id
645
    return
646

    
647
  def ToDict(self):
648
    """Disk-specific conversion to standard python types.
649

650
    This replaces the children lists of objects with lists of
651
    standard python types.
652

653
    """
654
    bo = super(Disk, self).ToDict()
655

    
656
    for attr in ("children",):
657
      alist = bo.get(attr, None)
658
      if alist:
659
        bo[attr] = self._ContainerToDicts(alist)
660
    return bo
661

    
662
  @classmethod
663
  def FromDict(cls, val):
664
    """Custom function for Disks
665

666
    """
667
    obj = super(Disk, cls).FromDict(val)
668
    if obj.children:
669
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
670
    if obj.logical_id and isinstance(obj.logical_id, list):
671
      obj.logical_id = tuple(obj.logical_id)
672
    if obj.physical_id and isinstance(obj.physical_id, list):
673
      obj.physical_id = tuple(obj.physical_id)
674
    if obj.dev_type in constants.LDS_DRBD:
675
      # we need a tuple of length six here
676
      if len(obj.logical_id) < 6:
677
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
678
    return obj
679

    
680
  def __str__(self):
681
    """Custom str() formatter for disks.
682

683
    """
684
    if self.dev_type == constants.LD_LV:
685
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
686
    elif self.dev_type in constants.LDS_DRBD:
687
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
688
      val = "<DRBD8("
689
      if self.physical_id is None:
690
        phy = "unconfigured"
691
      else:
692
        phy = ("configured as %s:%s %s:%s" %
693
               (self.physical_id[0], self.physical_id[1],
694
                self.physical_id[2], self.physical_id[3]))
695

    
696
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
697
              (node_a, minor_a, node_b, minor_b, port, phy))
698
      if self.children and self.children.count(None) == 0:
699
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
700
      else:
701
        val += "no local storage"
702
    else:
703
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
704
             (self.dev_type, self.logical_id, self.physical_id, self.children))
705
    if self.iv_name is None:
706
      val += ", not visible"
707
    else:
708
      val += ", visible as /dev/%s" % self.iv_name
709
    if isinstance(self.size, int):
710
      val += ", size=%dm)>" % self.size
711
    else:
712
      val += ", size='%s')>" % (self.size,)
713
    return val
714

    
715
  def Verify(self):
716
    """Checks that this disk is correctly configured.
717

718
    """
719
    all_errors = []
720
    if self.mode not in constants.DISK_ACCESS_SET:
721
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
722
    return all_errors
723

    
724
  def UpgradeConfig(self):
725
    """Fill defaults for missing configuration values.
726

727
    """
728
    if self.children:
729
      for child in self.children:
730
        child.UpgradeConfig()
731
    # add here config upgrade for this disk
732

    
733

    
734
class Instance(TaggableObject):
735
  """Config object representing an instance."""
736
  __slots__ = [
737
    "name",
738
    "primary_node",
739
    "os",
740
    "hypervisor",
741
    "hvparams",
742
    "beparams",
743
    "osparams",
744
    "admin_up",
745
    "nics",
746
    "disks",
747
    "disk_template",
748
    "network_port",
749
    "serial_no",
750
    ] + _TIMESTAMPS + _UUID
751

    
752
  def _ComputeSecondaryNodes(self):
753
    """Compute the list of secondary nodes.
754

755
    This is a simple wrapper over _ComputeAllNodes.
756

757
    """
758
    all_nodes = set(self._ComputeAllNodes())
759
    all_nodes.discard(self.primary_node)
760
    return tuple(all_nodes)
761

    
762
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
763
                             "List of secondary nodes")
764

    
765
  def _ComputeAllNodes(self):
766
    """Compute the list of all nodes.
767

768
    Since the data is already there (in the drbd disks), keeping it as
769
    a separate normal attribute is redundant and if not properly
770
    synchronised can cause problems. Thus it's better to compute it
771
    dynamically.
772

773
    """
774
    def _Helper(nodes, device):
775
      """Recursively computes nodes given a top device."""
776
      if device.dev_type in constants.LDS_DRBD:
777
        nodea, nodeb = device.logical_id[:2]
778
        nodes.add(nodea)
779
        nodes.add(nodeb)
780
      if device.children:
781
        for child in device.children:
782
          _Helper(nodes, child)
783

    
784
    all_nodes = set()
785
    all_nodes.add(self.primary_node)
786
    for device in self.disks:
787
      _Helper(all_nodes, device)
788
    return tuple(all_nodes)
789

    
790
  all_nodes = property(_ComputeAllNodes, None, None,
791
                       "List of all nodes of the instance")
792

    
793
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
794
    """Provide a mapping of nodes to LVs this instance owns.
795

796
    This function figures out what logical volumes should belong on
797
    which nodes, recursing through a device tree.
798

799
    @param lvmap: optional dictionary to receive the
800
        'node' : ['lv', ...] data.
801

802
    @return: None if lvmap arg is given, otherwise, a dictionary of
803
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
804
        volumeN is of the form "vg_name/lv_name", compatible with
805
        GetVolumeList()
806

807
    """
808
    if node == None:
809
      node = self.primary_node
810

    
811
    if lvmap is None:
812
      lvmap = {
813
        node: [],
814
        }
815
      ret = lvmap
816
    else:
817
      if not node in lvmap:
818
        lvmap[node] = []
819
      ret = None
820

    
821
    if not devs:
822
      devs = self.disks
823

    
824
    for dev in devs:
825
      if dev.dev_type == constants.LD_LV:
826
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
827

    
828
      elif dev.dev_type in constants.LDS_DRBD:
829
        if dev.children:
830
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
831
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
832

    
833
      elif dev.children:
834
        self.MapLVsByNode(lvmap, dev.children, node)
835

    
836
    return ret
837

    
838
  def FindDisk(self, idx):
839
    """Find a disk given having a specified index.
840

841
    This is just a wrapper that does validation of the index.
842

843
    @type idx: int
844
    @param idx: the disk index
845
    @rtype: L{Disk}
846
    @return: the corresponding disk
847
    @raise errors.OpPrereqError: when the given index is not valid
848

849
    """
850
    try:
851
      idx = int(idx)
852
      return self.disks[idx]
853
    except (TypeError, ValueError), err:
854
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
855
                                 errors.ECODE_INVAL)
856
    except IndexError:
857
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
858
                                 " 0 to %d" % (idx, len(self.disks) - 1),
859
                                 errors.ECODE_INVAL)
860

    
861
  def ToDict(self):
862
    """Instance-specific conversion to standard python types.
863

864
    This replaces the children lists of objects with lists of standard
865
    python types.
866

867
    """
868
    bo = super(Instance, self).ToDict()
869

    
870
    for attr in "nics", "disks":
871
      alist = bo.get(attr, None)
872
      if alist:
873
        nlist = self._ContainerToDicts(alist)
874
      else:
875
        nlist = []
876
      bo[attr] = nlist
877
    return bo
878

    
879
  @classmethod
880
  def FromDict(cls, val):
881
    """Custom function for instances.
882

883
    """
884
    obj = super(Instance, cls).FromDict(val)
885
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
886
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
887
    return obj
888

    
889
  def UpgradeConfig(self):
890
    """Fill defaults for missing configuration values.
891

892
    """
893
    for nic in self.nics:
894
      nic.UpgradeConfig()
895
    for disk in self.disks:
896
      disk.UpgradeConfig()
897
    if self.hvparams:
898
      for key in constants.HVC_GLOBALS:
899
        try:
900
          del self.hvparams[key]
901
        except KeyError:
902
          pass
903
    if self.osparams is None:
904
      self.osparams = {}
905

    
906

    
907
class OS(ConfigObject):
908
  """Config object representing an operating system.
909

910
  @type supported_parameters: list
911
  @ivar supported_parameters: a list of tuples, name and description,
912
      containing the supported parameters by this OS
913

914
  @type VARIANT_DELIM: string
915
  @cvar VARIANT_DELIM: the variant delimiter
916

917
  """
918
  __slots__ = [
919
    "name",
920
    "path",
921
    "api_versions",
922
    "create_script",
923
    "export_script",
924
    "import_script",
925
    "rename_script",
926
    "verify_script",
927
    "supported_variants",
928
    "supported_parameters",
929
    ]
930

    
931
  VARIANT_DELIM = "+"
932

    
933
  @classmethod
934
  def SplitNameVariant(cls, name):
935
    """Splits the name into the proper name and variant.
936

937
    @param name: the OS (unprocessed) name
938
    @rtype: list
939
    @return: a list of two elements; if the original name didn't
940
        contain a variant, it's returned as an empty string
941

942
    """
943
    nv = name.split(cls.VARIANT_DELIM, 1)
944
    if len(nv) == 1:
945
      nv.append("")
946
    return nv
947

    
948
  @classmethod
949
  def GetName(cls, name):
950
    """Returns the proper name of the os (without the variant).
951

952
    @param name: the OS (unprocessed) name
953

954
    """
955
    return cls.SplitNameVariant(name)[0]
956

    
957
  @classmethod
958
  def GetVariant(cls, name):
959
    """Returns the variant the os (without the base name).
960

961
    @param name: the OS (unprocessed) name
962

963
    """
964
    return cls.SplitNameVariant(name)[1]
965

    
966

    
967
class Node(TaggableObject):
968
  """Config object representing a node."""
969
  __slots__ = [
970
    "name",
971
    "primary_ip",
972
    "secondary_ip",
973
    "serial_no",
974
    "master_candidate",
975
    "offline",
976
    "drained",
977
    "group",
978
    "master_capable",
979
    "vm_capable",
980
    "ndparams",
981
    "powered",
982
    ] + _TIMESTAMPS + _UUID
983

    
984
  def UpgradeConfig(self):
985
    """Fill defaults for missing configuration values.
986

987
    """
988
    # pylint: disable=E0203
989
    # because these are "defined" via slots, not manually
990
    if self.master_capable is None:
991
      self.master_capable = True
992

    
993
    if self.vm_capable is None:
994
      self.vm_capable = True
995

    
996
    if self.ndparams is None:
997
      self.ndparams = {}
998

    
999
    if self.powered is None:
1000
      self.powered = True
1001

    
1002

    
1003
class NodeGroup(TaggableObject):
1004
  """Config object representing a node group."""
1005
  __slots__ = [
1006
    "name",
1007
    "members",
1008
    "ndparams",
1009
    "serial_no",
1010
    "alloc_policy",
1011
    ] + _TIMESTAMPS + _UUID
1012

    
1013
  def ToDict(self):
1014
    """Custom function for nodegroup.
1015

1016
    This discards the members object, which gets recalculated and is only kept
1017
    in memory.
1018

1019
    """
1020
    mydict = super(NodeGroup, self).ToDict()
1021
    del mydict["members"]
1022
    return mydict
1023

    
1024
  @classmethod
1025
  def FromDict(cls, val):
1026
    """Custom function for nodegroup.
1027

1028
    The members slot is initialized to an empty list, upon deserialization.
1029

1030
    """
1031
    obj = super(NodeGroup, cls).FromDict(val)
1032
    obj.members = []
1033
    return obj
1034

    
1035
  def UpgradeConfig(self):
1036
    """Fill defaults for missing configuration values.
1037

1038
    """
1039
    if self.ndparams is None:
1040
      self.ndparams = {}
1041

    
1042
    if self.serial_no is None:
1043
      self.serial_no = 1
1044

    
1045
    if self.alloc_policy is None:
1046
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1047

    
1048
    # We only update mtime, and not ctime, since we would not be able to provide
1049
    # a correct value for creation time.
1050
    if self.mtime is None:
1051
      self.mtime = time.time()
1052

    
1053
  def FillND(self, node):
1054
    """Return filled out ndparams for L{objects.Node}
1055

1056
    @type node: L{objects.Node}
1057
    @param node: A Node object to fill
1058
    @return a copy of the node's ndparams with defaults filled
1059

1060
    """
1061
    return self.SimpleFillND(node.ndparams)
1062

    
1063
  def SimpleFillND(self, ndparams):
1064
    """Fill a given ndparams dict with defaults.
1065

1066
    @type ndparams: dict
1067
    @param ndparams: the dict to fill
1068
    @rtype: dict
1069
    @return: a copy of the passed in ndparams with missing keys filled
1070
        from the node group defaults
1071

1072
    """
1073
    return FillDict(self.ndparams, ndparams)
1074

    
1075

    
1076
class Cluster(TaggableObject):
1077
  """Config object representing the cluster."""
1078
  __slots__ = [
1079
    "serial_no",
1080
    "rsahostkeypub",
1081
    "highest_used_port",
1082
    "tcpudp_port_pool",
1083
    "mac_prefix",
1084
    "volume_group_name",
1085
    "reserved_lvs",
1086
    "drbd_usermode_helper",
1087
    "default_bridge",
1088
    "default_hypervisor",
1089
    "master_node",
1090
    "master_ip",
1091
    "master_netdev",
1092
    "master_netmask",
1093
    "cluster_name",
1094
    "file_storage_dir",
1095
    "shared_file_storage_dir",
1096
    "enabled_hypervisors",
1097
    "hvparams",
1098
    "os_hvp",
1099
    "beparams",
1100
    "osparams",
1101
    "nicparams",
1102
    "ndparams",
1103
    "candidate_pool_size",
1104
    "modify_etc_hosts",
1105
    "modify_ssh_setup",
1106
    "maintain_node_health",
1107
    "uid_pool",
1108
    "default_iallocator",
1109
    "hidden_os",
1110
    "blacklisted_os",
1111
    "primary_ip_family",
1112
    "prealloc_wipe_disks",
1113
    ] + _TIMESTAMPS + _UUID
1114

    
1115
  def UpgradeConfig(self):
1116
    """Fill defaults for missing configuration values.
1117

1118
    """
1119
    # pylint: disable=E0203
1120
    # because these are "defined" via slots, not manually
1121
    if self.hvparams is None:
1122
      self.hvparams = constants.HVC_DEFAULTS
1123
    else:
1124
      for hypervisor in self.hvparams:
1125
        self.hvparams[hypervisor] = FillDict(
1126
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1127

    
1128
    if self.os_hvp is None:
1129
      self.os_hvp = {}
1130

    
1131
    # osparams added before 2.2
1132
    if self.osparams is None:
1133
      self.osparams = {}
1134

    
1135
    if self.ndparams is None:
1136
      self.ndparams = constants.NDC_DEFAULTS
1137

    
1138
    self.beparams = UpgradeGroupedParams(self.beparams,
1139
                                         constants.BEC_DEFAULTS)
1140
    migrate_default_bridge = not self.nicparams
1141
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1142
                                          constants.NICC_DEFAULTS)
1143
    if migrate_default_bridge:
1144
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1145
        self.default_bridge
1146

    
1147
    if self.modify_etc_hosts is None:
1148
      self.modify_etc_hosts = True
1149

    
1150
    if self.modify_ssh_setup is None:
1151
      self.modify_ssh_setup = True
1152

    
1153
    # default_bridge is no longer used in 2.1. The slot is left there to
1154
    # support auto-upgrading. It can be removed once we decide to deprecate
1155
    # upgrading straight from 2.0.
1156
    if self.default_bridge is not None:
1157
      self.default_bridge = None
1158

    
1159
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1160
    # code can be removed once upgrading straight from 2.0 is deprecated.
1161
    if self.default_hypervisor is not None:
1162
      self.enabled_hypervisors = ([self.default_hypervisor] +
1163
        [hvname for hvname in self.enabled_hypervisors
1164
         if hvname != self.default_hypervisor])
1165
      self.default_hypervisor = None
1166

    
1167
    # maintain_node_health added after 2.1.1
1168
    if self.maintain_node_health is None:
1169
      self.maintain_node_health = False
1170

    
1171
    if self.uid_pool is None:
1172
      self.uid_pool = []
1173

    
1174
    if self.default_iallocator is None:
1175
      self.default_iallocator = ""
1176

    
1177
    # reserved_lvs added before 2.2
1178
    if self.reserved_lvs is None:
1179
      self.reserved_lvs = []
1180

    
1181
    # hidden and blacklisted operating systems added before 2.2.1
1182
    if self.hidden_os is None:
1183
      self.hidden_os = []
1184

    
1185
    if self.blacklisted_os is None:
1186
      self.blacklisted_os = []
1187

    
1188
    # primary_ip_family added before 2.3
1189
    if self.primary_ip_family is None:
1190
      self.primary_ip_family = AF_INET
1191

    
1192
    if self.prealloc_wipe_disks is None:
1193
      self.prealloc_wipe_disks = False
1194

    
1195
    # shared_file_storage_dir added before 2.5
1196
    if self.shared_file_storage_dir is None:
1197
      self.shared_file_storage_dir = ""
1198

    
1199
  def ToDict(self):
1200
    """Custom function for cluster.
1201

1202
    """
1203
    mydict = super(Cluster, self).ToDict()
1204
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1205
    return mydict
1206

    
1207
  @classmethod
1208
  def FromDict(cls, val):
1209
    """Custom function for cluster.
1210

1211
    """
1212
    obj = super(Cluster, cls).FromDict(val)
1213
    if not isinstance(obj.tcpudp_port_pool, set):
1214
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1215
    return obj
1216

    
1217
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1218
    """Get the default hypervisor parameters for the cluster.
1219

1220
    @param hypervisor: the hypervisor name
1221
    @param os_name: if specified, we'll also update the defaults for this OS
1222
    @param skip_keys: if passed, list of keys not to use
1223
    @return: the defaults dict
1224

1225
    """
1226
    if skip_keys is None:
1227
      skip_keys = []
1228

    
1229
    fill_stack = [self.hvparams.get(hypervisor, {})]
1230
    if os_name is not None:
1231
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1232
      fill_stack.append(os_hvp)
1233

    
1234
    ret_dict = {}
1235
    for o_dict in fill_stack:
1236
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1237

    
1238
    return ret_dict
1239

    
1240
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1241
    """Fill a given hvparams dict with cluster defaults.
1242

1243
    @type hv_name: string
1244
    @param hv_name: the hypervisor to use
1245
    @type os_name: string
1246
    @param os_name: the OS to use for overriding the hypervisor defaults
1247
    @type skip_globals: boolean
1248
    @param skip_globals: if True, the global hypervisor parameters will
1249
        not be filled
1250
    @rtype: dict
1251
    @return: a copy of the given hvparams with missing keys filled from
1252
        the cluster defaults
1253

1254
    """
1255
    if skip_globals:
1256
      skip_keys = constants.HVC_GLOBALS
1257
    else:
1258
      skip_keys = []
1259

    
1260
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1261
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1262

    
1263
  def FillHV(self, instance, skip_globals=False):
1264
    """Fill an instance's hvparams dict with cluster defaults.
1265

1266
    @type instance: L{objects.Instance}
1267
    @param instance: the instance parameter to fill
1268
    @type skip_globals: boolean
1269
    @param skip_globals: if True, the global hypervisor parameters will
1270
        not be filled
1271
    @rtype: dict
1272
    @return: a copy of the instance's hvparams with missing keys filled from
1273
        the cluster defaults
1274

1275
    """
1276
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1277
                             instance.hvparams, skip_globals)
1278

    
1279
  def SimpleFillBE(self, beparams):
1280
    """Fill a given beparams dict with cluster defaults.
1281

1282
    @type beparams: dict
1283
    @param beparams: the dict to fill
1284
    @rtype: dict
1285
    @return: a copy of the passed in beparams with missing keys filled
1286
        from the cluster defaults
1287

1288
    """
1289
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1290

    
1291
  def FillBE(self, instance):
1292
    """Fill an instance's beparams dict with cluster defaults.
1293

1294
    @type instance: L{objects.Instance}
1295
    @param instance: the instance parameter to fill
1296
    @rtype: dict
1297
    @return: a copy of the instance's beparams with missing keys filled from
1298
        the cluster defaults
1299

1300
    """
1301
    return self.SimpleFillBE(instance.beparams)
1302

    
1303
  def SimpleFillNIC(self, nicparams):
1304
    """Fill a given nicparams dict with cluster defaults.
1305

1306
    @type nicparams: dict
1307
    @param nicparams: the dict to fill
1308
    @rtype: dict
1309
    @return: a copy of the passed in nicparams with missing keys filled
1310
        from the cluster defaults
1311

1312
    """
1313
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1314

    
1315
  def SimpleFillOS(self, os_name, os_params):
1316
    """Fill an instance's osparams dict with cluster defaults.
1317

1318
    @type os_name: string
1319
    @param os_name: the OS name to use
1320
    @type os_params: dict
1321
    @param os_params: the dict to fill with default values
1322
    @rtype: dict
1323
    @return: a copy of the instance's osparams with missing keys filled from
1324
        the cluster defaults
1325

1326
    """
1327
    name_only = os_name.split("+", 1)[0]
1328
    # base OS
1329
    result = self.osparams.get(name_only, {})
1330
    # OS with variant
1331
    result = FillDict(result, self.osparams.get(os_name, {}))
1332
    # specified params
1333
    return FillDict(result, os_params)
1334

    
1335
  def FillND(self, node, nodegroup):
1336
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1337

1338
    @type node: L{objects.Node}
1339
    @param node: A Node object to fill
1340
    @type nodegroup: L{objects.NodeGroup}
1341
    @param nodegroup: A Node object to fill
1342
    @return a copy of the node's ndparams with defaults filled
1343

1344
    """
1345
    return self.SimpleFillND(nodegroup.FillND(node))
1346

    
1347
  def SimpleFillND(self, ndparams):
1348
    """Fill a given ndparams dict with defaults.
1349

1350
    @type ndparams: dict
1351
    @param ndparams: the dict to fill
1352
    @rtype: dict
1353
    @return: a copy of the passed in ndparams with missing keys filled
1354
        from the cluster defaults
1355

1356
    """
1357
    return FillDict(self.ndparams, ndparams)
1358

    
1359

    
1360
class BlockDevStatus(ConfigObject):
1361
  """Config object representing the status of a block device."""
1362
  __slots__ = [
1363
    "dev_path",
1364
    "major",
1365
    "minor",
1366
    "sync_percent",
1367
    "estimated_time",
1368
    "is_degraded",
1369
    "ldisk_status",
1370
    ]
1371

    
1372

    
1373
class ImportExportStatus(ConfigObject):
1374
  """Config object representing the status of an import or export."""
1375
  __slots__ = [
1376
    "recent_output",
1377
    "listen_port",
1378
    "connected",
1379
    "progress_mbytes",
1380
    "progress_throughput",
1381
    "progress_eta",
1382
    "progress_percent",
1383
    "exit_status",
1384
    "error_message",
1385
    ] + _TIMESTAMPS
1386

    
1387

    
1388
class ImportExportOptions(ConfigObject):
1389
  """Options for import/export daemon
1390

1391
  @ivar key_name: X509 key name (None for cluster certificate)
1392
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1393
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1394
  @ivar magic: Used to ensure the connection goes to the right disk
1395
  @ivar ipv6: Whether to use IPv6
1396
  @ivar connect_timeout: Number of seconds for establishing connection
1397

1398
  """
1399
  __slots__ = [
1400
    "key_name",
1401
    "ca_pem",
1402
    "compress",
1403
    "magic",
1404
    "ipv6",
1405
    "connect_timeout",
1406
    ]
1407

    
1408

    
1409
class ConfdRequest(ConfigObject):
1410
  """Object holding a confd request.
1411

1412
  @ivar protocol: confd protocol version
1413
  @ivar type: confd query type
1414
  @ivar query: query request
1415
  @ivar rsalt: requested reply salt
1416

1417
  """
1418
  __slots__ = [
1419
    "protocol",
1420
    "type",
1421
    "query",
1422
    "rsalt",
1423
    ]
1424

    
1425

    
1426
class ConfdReply(ConfigObject):
1427
  """Object holding a confd reply.
1428

1429
  @ivar protocol: confd protocol version
1430
  @ivar status: reply status code (ok, error)
1431
  @ivar answer: confd query reply
1432
  @ivar serial: configuration serial number
1433

1434
  """
1435
  __slots__ = [
1436
    "protocol",
1437
    "status",
1438
    "answer",
1439
    "serial",
1440
    ]
1441

    
1442

    
1443
class QueryFieldDefinition(ConfigObject):
1444
  """Object holding a query field definition.
1445

1446
  @ivar name: Field name
1447
  @ivar title: Human-readable title
1448
  @ivar kind: Field type
1449
  @ivar doc: Human-readable description
1450

1451
  """
1452
  __slots__ = [
1453
    "name",
1454
    "title",
1455
    "kind",
1456
    "doc",
1457
    ]
1458

    
1459

    
1460
class _QueryResponseBase(ConfigObject):
1461
  __slots__ = [
1462
    "fields",
1463
    ]
1464

    
1465
  def ToDict(self):
1466
    """Custom function for serializing.
1467

1468
    """
1469
    mydict = super(_QueryResponseBase, self).ToDict()
1470
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1471
    return mydict
1472

    
1473
  @classmethod
1474
  def FromDict(cls, val):
1475
    """Custom function for de-serializing.
1476

1477
    """
1478
    obj = super(_QueryResponseBase, cls).FromDict(val)
1479
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1480
    return obj
1481

    
1482

    
1483
class QueryRequest(ConfigObject):
1484
  """Object holding a query request.
1485

1486
  """
1487
  __slots__ = [
1488
    "what",
1489
    "fields",
1490
    "qfilter",
1491
    ]
1492

    
1493

    
1494
class QueryResponse(_QueryResponseBase):
1495
  """Object holding the response to a query.
1496

1497
  @ivar fields: List of L{QueryFieldDefinition} objects
1498
  @ivar data: Requested data
1499

1500
  """
1501
  __slots__ = [
1502
    "data",
1503
    ]
1504

    
1505

    
1506
class QueryFieldsRequest(ConfigObject):
1507
  """Object holding a request for querying available fields.
1508

1509
  """
1510
  __slots__ = [
1511
    "what",
1512
    "fields",
1513
    ]
1514

    
1515

    
1516
class QueryFieldsResponse(_QueryResponseBase):
1517
  """Object holding the response to a query for fields.
1518

1519
  @ivar fields: List of L{QueryFieldDefinition} objects
1520

1521
  """
1522
  __slots__ = [
1523
    ]
1524

    
1525

    
1526
class MigrationStatus(ConfigObject):
1527
  """Object holding the status of a migration.
1528

1529
  """
1530
  __slots__ = [
1531
    "status",
1532
    "transferred_ram",
1533
    "total_ram",
1534
    ]
1535

    
1536

    
1537
class InstanceConsole(ConfigObject):
1538
  """Object describing how to access the console of an instance.
1539

1540
  """
1541
  __slots__ = [
1542
    "instance",
1543
    "kind",
1544
    "message",
1545
    "host",
1546
    "port",
1547
    "user",
1548
    "command",
1549
    "display",
1550
    ]
1551

    
1552
  def Validate(self):
1553
    """Validates contents of this object.
1554

1555
    """
1556
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1557
    assert self.instance, "Missing instance name"
1558
    assert self.message or self.kind in [constants.CONS_SSH,
1559
                                         constants.CONS_SPICE,
1560
                                         constants.CONS_VNC]
1561
    assert self.host or self.kind == constants.CONS_MESSAGE
1562
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1563
                                      constants.CONS_SSH]
1564
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1565
                                      constants.CONS_SPICE,
1566
                                      constants.CONS_VNC]
1567
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1568
                                         constants.CONS_SPICE,
1569
                                         constants.CONS_VNC]
1570
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1571
                                         constants.CONS_SPICE,
1572
                                         constants.CONS_SSH]
1573
    return True
1574

    
1575

    
1576
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1577
  """Simple wrapper over ConfigParse that allows serialization.
1578

1579
  This class is basically ConfigParser.SafeConfigParser with two
1580
  additional methods that allow it to serialize/unserialize to/from a
1581
  buffer.
1582

1583
  """
1584
  def Dumps(self):
1585
    """Dump this instance and return the string representation."""
1586
    buf = StringIO()
1587
    self.write(buf)
1588
    return buf.getvalue()
1589

    
1590
  @classmethod
1591
  def Loads(cls, data):
1592
    """Load data from a string."""
1593
    buf = StringIO(data)
1594
    cfp = cls()
1595
    cfp.readfp(buf)
1596
    return cfp