Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 0007f3ab

History | View | Annotate | Download (44.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47

    
48
from socket import AF_INET
49

    
50

    
51
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
52
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
53

    
54
_TIMESTAMPS = ["ctime", "mtime"]
55
_UUID = ["uuid"]
56

    
57

    
58
def FillDict(defaults_dict, custom_dict, skip_keys=None):
59
  """Basic function to apply settings on top a default dict.
60

61
  @type defaults_dict: dict
62
  @param defaults_dict: dictionary holding the default values
63
  @type custom_dict: dict
64
  @param custom_dict: dictionary holding customized value
65
  @type skip_keys: list
66
  @param skip_keys: which keys not to fill
67
  @rtype: dict
68
  @return: dict with the 'full' values
69

70
  """
71
  ret_dict = copy.deepcopy(defaults_dict)
72
  ret_dict.update(custom_dict)
73
  if skip_keys:
74
    for k in skip_keys:
75
      try:
76
        del ret_dict[k]
77
      except KeyError:
78
        pass
79
  return ret_dict
80

    
81

    
82
def UpgradeGroupedParams(target, defaults):
83
  """Update all groups for the target parameter.
84

85
  @type target: dict of dicts
86
  @param target: {group: {parameter: value}}
87
  @type defaults: dict
88
  @param defaults: default parameter values
89

90
  """
91
  if target is None:
92
    target = {constants.PP_DEFAULT: defaults}
93
  else:
94
    for group in target:
95
      target[group] = FillDict(defaults, target[group])
96
  return target
97

    
98

    
99
class ConfigObject(object):
100
  """A generic config object.
101

102
  It has the following properties:
103

104
    - provides somewhat safe recursive unpickling and pickling for its classes
105
    - unset attributes which are defined in slots are always returned
106
      as None instead of raising an error
107

108
  Classes derived from this must always declare __slots__ (we use many
109
  config objects and the memory reduction is useful)
110

111
  """
112
  __slots__ = []
113

    
114
  def __init__(self, **kwargs):
115
    for k, v in kwargs.iteritems():
116
      setattr(self, k, v)
117

    
118
  def __getattr__(self, name):
119
    if name not in self._all_slots():
120
      raise AttributeError("Invalid object attribute %s.%s" %
121
                           (type(self).__name__, name))
122
    return None
123

    
124
  def __setstate__(self, state):
125
    slots = self._all_slots()
126
    for name in state:
127
      if name in slots:
128
        setattr(self, name, state[name])
129

    
130
  @classmethod
131
  def _all_slots(cls):
132
    """Compute the list of all declared slots for a class.
133

134
    """
135
    slots = []
136
    for parent in cls.__mro__:
137
      slots.extend(getattr(parent, "__slots__", []))
138
    return slots
139

    
140
  def ToDict(self):
141
    """Convert to a dict holding only standard python types.
142

143
    The generic routine just dumps all of this object's attributes in
144
    a dict. It does not work if the class has children who are
145
    ConfigObjects themselves (e.g. the nics list in an Instance), in
146
    which case the object should subclass the function in order to
147
    make sure all objects returned are only standard python types.
148

149
    """
150
    result = {}
151
    for name in self._all_slots():
152
      value = getattr(self, name, None)
153
      if value is not None:
154
        result[name] = value
155
    return result
156

    
157
  __getstate__ = ToDict
158

    
159
  @classmethod
160
  def FromDict(cls, val):
161
    """Create an object from a dictionary.
162

163
    This generic routine takes a dict, instantiates a new instance of
164
    the given class, and sets attributes based on the dict content.
165

166
    As for `ToDict`, this does not work if the class has children
167
    who are ConfigObjects themselves (e.g. the nics list in an
168
    Instance), in which case the object should subclass the function
169
    and alter the objects.
170

171
    """
172
    if not isinstance(val, dict):
173
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
174
                                      " expected dict, got %s" % type(val))
175
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
176
    obj = cls(**val_str) # pylint: disable=W0142
177
    return obj
178

    
179
  @staticmethod
180
  def _ContainerToDicts(container):
181
    """Convert the elements of a container to standard python types.
182

183
    This method converts a container with elements derived from
184
    ConfigData to standard python types. If the container is a dict,
185
    we don't touch the keys, only the values.
186

187
    """
188
    if isinstance(container, dict):
189
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
190
    elif isinstance(container, (list, tuple, set, frozenset)):
191
      ret = [elem.ToDict() for elem in container]
192
    else:
193
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
194
                      type(container))
195
    return ret
196

    
197
  @staticmethod
198
  def _ContainerFromDicts(source, c_type, e_type):
199
    """Convert a container from standard python types.
200

201
    This method converts a container with standard python types to
202
    ConfigData objects. If the container is a dict, we don't touch the
203
    keys, only the values.
204

205
    """
206
    if not isinstance(c_type, type):
207
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
208
                      " not a type" % type(c_type))
209
    if source is None:
210
      source = c_type()
211
    if c_type is dict:
212
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
213
    elif c_type in (list, tuple, set, frozenset):
214
      ret = c_type([e_type.FromDict(elem) for elem in source])
215
    else:
216
      raise TypeError("Invalid container type %s passed to"
217
                      " _ContainerFromDicts" % c_type)
218
    return ret
219

    
220
  def Copy(self):
221
    """Makes a deep copy of the current object and its children.
222

223
    """
224
    dict_form = self.ToDict()
225
    clone_obj = self.__class__.FromDict(dict_form)
226
    return clone_obj
227

    
228
  def __repr__(self):
229
    """Implement __repr__ for ConfigObjects."""
230
    return repr(self.ToDict())
231

    
232
  def UpgradeConfig(self):
233
    """Fill defaults for missing configuration values.
234

235
    This method will be called at configuration load time, and its
236
    implementation will be object dependent.
237

238
    """
239
    pass
240

    
241

    
242
class TaggableObject(ConfigObject):
243
  """An generic class supporting tags.
244

245
  """
246
  __slots__ = ["tags"]
247
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
248

    
249
  @classmethod
250
  def ValidateTag(cls, tag):
251
    """Check if a tag is valid.
252

253
    If the tag is invalid, an errors.TagError will be raised. The
254
    function has no return value.
255

256
    """
257
    if not isinstance(tag, basestring):
258
      raise errors.TagError("Invalid tag type (not a string)")
259
    if len(tag) > constants.MAX_TAG_LEN:
260
      raise errors.TagError("Tag too long (>%d characters)" %
261
                            constants.MAX_TAG_LEN)
262
    if not tag:
263
      raise errors.TagError("Tags cannot be empty")
264
    if not cls.VALID_TAG_RE.match(tag):
265
      raise errors.TagError("Tag contains invalid characters")
266

    
267
  def GetTags(self):
268
    """Return the tags list.
269

270
    """
271
    tags = getattr(self, "tags", None)
272
    if tags is None:
273
      tags = self.tags = set()
274
    return tags
275

    
276
  def AddTag(self, tag):
277
    """Add a new tag.
278

279
    """
280
    self.ValidateTag(tag)
281
    tags = self.GetTags()
282
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
283
      raise errors.TagError("Too many tags")
284
    self.GetTags().add(tag)
285

    
286
  def RemoveTag(self, tag):
287
    """Remove a tag.
288

289
    """
290
    self.ValidateTag(tag)
291
    tags = self.GetTags()
292
    try:
293
      tags.remove(tag)
294
    except KeyError:
295
      raise errors.TagError("Tag not found")
296

    
297
  def ToDict(self):
298
    """Taggable-object-specific conversion to standard python types.
299

300
    This replaces the tags set with a list.
301

302
    """
303
    bo = super(TaggableObject, self).ToDict()
304

    
305
    tags = bo.get("tags", None)
306
    if isinstance(tags, set):
307
      bo["tags"] = list(tags)
308
    return bo
309

    
310
  @classmethod
311
  def FromDict(cls, val):
312
    """Custom function for instances.
313

314
    """
315
    obj = super(TaggableObject, cls).FromDict(val)
316
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
317
      obj.tags = set(obj.tags)
318
    return obj
319

    
320

    
321
class MasterNetworkParameters(ConfigObject):
322
  """Network configuration parameters for the master
323

324
  @ivar name: master name
325
  @ivar ip: master IP
326
  @ivar netmask: master netmask
327
  @ivar netdev: master network device
328
  @ivar ip_family: master IP family
329

330
  """
331
  __slots__ = [
332
    "name",
333
    "ip",
334
    "netmask",
335
    "netdev",
336
    "ip_family"
337
    ]
338

    
339

    
340
class ConfigData(ConfigObject):
341
  """Top-level config object."""
342
  __slots__ = [
343
    "version",
344
    "cluster",
345
    "nodes",
346
    "nodegroups",
347
    "instances",
348
    "serial_no",
349
    ] + _TIMESTAMPS
350

    
351
  def ToDict(self):
352
    """Custom function for top-level config data.
353

354
    This just replaces the list of instances, nodes and the cluster
355
    with standard python types.
356

357
    """
358
    mydict = super(ConfigData, self).ToDict()
359
    mydict["cluster"] = mydict["cluster"].ToDict()
360
    for key in "nodes", "instances", "nodegroups":
361
      mydict[key] = self._ContainerToDicts(mydict[key])
362

    
363
    return mydict
364

    
365
  @classmethod
366
  def FromDict(cls, val):
367
    """Custom function for top-level config data
368

369
    """
370
    obj = super(ConfigData, cls).FromDict(val)
371
    obj.cluster = Cluster.FromDict(obj.cluster)
372
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
373
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
374
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
375
    return obj
376

    
377
  def HasAnyDiskOfType(self, dev_type):
378
    """Check if in there is at disk of the given type in the configuration.
379

380
    @type dev_type: L{constants.LDS_BLOCK}
381
    @param dev_type: the type to look for
382
    @rtype: boolean
383
    @return: boolean indicating if a disk of the given type was found or not
384

385
    """
386
    for instance in self.instances.values():
387
      for disk in instance.disks:
388
        if disk.IsBasedOnDiskType(dev_type):
389
          return True
390
    return False
391

    
392
  def UpgradeConfig(self):
393
    """Fill defaults for missing configuration values.
394

395
    """
396
    self.cluster.UpgradeConfig()
397
    for node in self.nodes.values():
398
      node.UpgradeConfig()
399
    for instance in self.instances.values():
400
      instance.UpgradeConfig()
401
    if self.nodegroups is None:
402
      self.nodegroups = {}
403
    for nodegroup in self.nodegroups.values():
404
      nodegroup.UpgradeConfig()
405
    if self.cluster.drbd_usermode_helper is None:
406
      # To decide if we set an helper let's check if at least one instance has
407
      # a DRBD disk. This does not cover all the possible scenarios but it
408
      # gives a good approximation.
409
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
410
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
411

    
412

    
413
class NIC(ConfigObject):
414
  """Config object representing a network card."""
415
  __slots__ = ["mac", "ip", "nicparams"]
416

    
417
  @classmethod
418
  def CheckParameterSyntax(cls, nicparams):
419
    """Check the given parameters for validity.
420

421
    @type nicparams:  dict
422
    @param nicparams: dictionary with parameter names/value
423
    @raise errors.ConfigurationError: when a parameter is not valid
424

425
    """
426
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
427
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
428
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
429
      raise errors.ConfigurationError(err)
430

    
431
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
432
        not nicparams[constants.NIC_LINK]):
433
      err = "Missing bridged nic link"
434
      raise errors.ConfigurationError(err)
435

    
436

    
437
class Disk(ConfigObject):
438
  """Config object representing a block device."""
439
  __slots__ = ["dev_type", "logical_id", "physical_id",
440
               "children", "iv_name", "size", "mode"]
441

    
442
  def CreateOnSecondary(self):
443
    """Test if this device needs to be created on a secondary node."""
444
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
445

    
446
  def AssembleOnSecondary(self):
447
    """Test if this device needs to be assembled on a secondary node."""
448
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
449

    
450
  def OpenOnSecondary(self):
451
    """Test if this device needs to be opened on a secondary node."""
452
    return self.dev_type in (constants.LD_LV,)
453

    
454
  def StaticDevPath(self):
455
    """Return the device path if this device type has a static one.
456

457
    Some devices (LVM for example) live always at the same /dev/ path,
458
    irrespective of their status. For such devices, we return this
459
    path, for others we return None.
460

461
    @warning: The path returned is not a normalized pathname; callers
462
        should check that it is a valid path.
463

464
    """
465
    if self.dev_type == constants.LD_LV:
466
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
467
    elif self.dev_type == constants.LD_BLOCKDEV:
468
      return self.logical_id[1]
469
    return None
470

    
471
  def ChildrenNeeded(self):
472
    """Compute the needed number of children for activation.
473

474
    This method will return either -1 (all children) or a positive
475
    number denoting the minimum number of children needed for
476
    activation (only mirrored devices will usually return >=0).
477

478
    Currently, only DRBD8 supports diskless activation (therefore we
479
    return 0), for all other we keep the previous semantics and return
480
    -1.
481

482
    """
483
    if self.dev_type == constants.LD_DRBD8:
484
      return 0
485
    return -1
486

    
487
  def IsBasedOnDiskType(self, dev_type):
488
    """Check if the disk or its children are based on the given type.
489

490
    @type dev_type: L{constants.LDS_BLOCK}
491
    @param dev_type: the type to look for
492
    @rtype: boolean
493
    @return: boolean indicating if a device of the given type was found or not
494

495
    """
496
    if self.children:
497
      for child in self.children:
498
        if child.IsBasedOnDiskType(dev_type):
499
          return True
500
    return self.dev_type == dev_type
501

    
502
  def GetNodes(self, node):
503
    """This function returns the nodes this device lives on.
504

505
    Given the node on which the parent of the device lives on (or, in
506
    case of a top-level device, the primary node of the devices'
507
    instance), this function will return a list of nodes on which this
508
    devices needs to (or can) be assembled.
509

510
    """
511
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
512
                         constants.LD_BLOCKDEV]:
513
      result = [node]
514
    elif self.dev_type in constants.LDS_DRBD:
515
      result = [self.logical_id[0], self.logical_id[1]]
516
      if node not in result:
517
        raise errors.ConfigurationError("DRBD device passed unknown node")
518
    else:
519
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
520
    return result
521

    
522
  def ComputeNodeTree(self, parent_node):
523
    """Compute the node/disk tree for this disk and its children.
524

525
    This method, given the node on which the parent disk lives, will
526
    return the list of all (node, disk) pairs which describe the disk
527
    tree in the most compact way. For example, a drbd/lvm stack
528
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
529
    which represents all the top-level devices on the nodes.
530

531
    """
532
    my_nodes = self.GetNodes(parent_node)
533
    result = [(node, self) for node in my_nodes]
534
    if not self.children:
535
      # leaf device
536
      return result
537
    for node in my_nodes:
538
      for child in self.children:
539
        child_result = child.ComputeNodeTree(node)
540
        if len(child_result) == 1:
541
          # child (and all its descendants) is simple, doesn't split
542
          # over multiple hosts, so we don't need to describe it, our
543
          # own entry for this node describes it completely
544
          continue
545
        else:
546
          # check if child nodes differ from my nodes; note that
547
          # subdisk can differ from the child itself, and be instead
548
          # one of its descendants
549
          for subnode, subdisk in child_result:
550
            if subnode not in my_nodes:
551
              result.append((subnode, subdisk))
552
            # otherwise child is under our own node, so we ignore this
553
            # entry (but probably the other results in the list will
554
            # be different)
555
    return result
556

    
557
  def ComputeGrowth(self, amount):
558
    """Compute the per-VG growth requirements.
559

560
    This only works for VG-based disks.
561

562
    @type amount: integer
563
    @param amount: the desired increase in (user-visible) disk space
564
    @rtype: dict
565
    @return: a dictionary of volume-groups and the required size
566

567
    """
568
    if self.dev_type == constants.LD_LV:
569
      return {self.logical_id[0]: amount}
570
    elif self.dev_type == constants.LD_DRBD8:
571
      if self.children:
572
        return self.children[0].ComputeGrowth(amount)
573
      else:
574
        return {}
575
    else:
576
      # Other disk types do not require VG space
577
      return {}
578

    
579
  def RecordGrow(self, amount):
580
    """Update the size of this disk after growth.
581

582
    This method recurses over the disks's children and updates their
583
    size correspondigly. The method needs to be kept in sync with the
584
    actual algorithms from bdev.
585

586
    """
587
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
588
      self.size += amount
589
    elif self.dev_type == constants.LD_DRBD8:
590
      if self.children:
591
        self.children[0].RecordGrow(amount)
592
      self.size += amount
593
    else:
594
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
595
                                   " disk type %s" % self.dev_type)
596

    
597
  def UnsetSize(self):
598
    """Sets recursively the size to zero for the disk and its children.
599

600
    """
601
    if self.children:
602
      for child in self.children:
603
        child.UnsetSize()
604
    self.size = 0
605

    
606
  def SetPhysicalID(self, target_node, nodes_ip):
607
    """Convert the logical ID to the physical ID.
608

609
    This is used only for drbd, which needs ip/port configuration.
610

611
    The routine descends down and updates its children also, because
612
    this helps when the only the top device is passed to the remote
613
    node.
614

615
    Arguments:
616
      - target_node: the node we wish to configure for
617
      - nodes_ip: a mapping of node name to ip
618

619
    The target_node must exist in in nodes_ip, and must be one of the
620
    nodes in the logical ID for each of the DRBD devices encountered
621
    in the disk tree.
622

623
    """
624
    if self.children:
625
      for child in self.children:
626
        child.SetPhysicalID(target_node, nodes_ip)
627

    
628
    if self.logical_id is None and self.physical_id is not None:
629
      return
630
    if self.dev_type in constants.LDS_DRBD:
631
      pnode, snode, port, pminor, sminor, secret = self.logical_id
632
      if target_node not in (pnode, snode):
633
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
634
                                        target_node)
635
      pnode_ip = nodes_ip.get(pnode, None)
636
      snode_ip = nodes_ip.get(snode, None)
637
      if pnode_ip is None or snode_ip is None:
638
        raise errors.ConfigurationError("Can't find primary or secondary node"
639
                                        " for %s" % str(self))
640
      p_data = (pnode_ip, port)
641
      s_data = (snode_ip, port)
642
      if pnode == target_node:
643
        self.physical_id = p_data + s_data + (pminor, secret)
644
      else: # it must be secondary, we tested above
645
        self.physical_id = s_data + p_data + (sminor, secret)
646
    else:
647
      self.physical_id = self.logical_id
648
    return
649

    
650
  def ToDict(self):
651
    """Disk-specific conversion to standard python types.
652

653
    This replaces the children lists of objects with lists of
654
    standard python types.
655

656
    """
657
    bo = super(Disk, self).ToDict()
658

    
659
    for attr in ("children",):
660
      alist = bo.get(attr, None)
661
      if alist:
662
        bo[attr] = self._ContainerToDicts(alist)
663
    return bo
664

    
665
  @classmethod
666
  def FromDict(cls, val):
667
    """Custom function for Disks
668

669
    """
670
    obj = super(Disk, cls).FromDict(val)
671
    if obj.children:
672
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
673
    if obj.logical_id and isinstance(obj.logical_id, list):
674
      obj.logical_id = tuple(obj.logical_id)
675
    if obj.physical_id and isinstance(obj.physical_id, list):
676
      obj.physical_id = tuple(obj.physical_id)
677
    if obj.dev_type in constants.LDS_DRBD:
678
      # we need a tuple of length six here
679
      if len(obj.logical_id) < 6:
680
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
681
    return obj
682

    
683
  def __str__(self):
684
    """Custom str() formatter for disks.
685

686
    """
687
    if self.dev_type == constants.LD_LV:
688
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
689
    elif self.dev_type in constants.LDS_DRBD:
690
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
691
      val = "<DRBD8("
692
      if self.physical_id is None:
693
        phy = "unconfigured"
694
      else:
695
        phy = ("configured as %s:%s %s:%s" %
696
               (self.physical_id[0], self.physical_id[1],
697
                self.physical_id[2], self.physical_id[3]))
698

    
699
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
700
              (node_a, minor_a, node_b, minor_b, port, phy))
701
      if self.children and self.children.count(None) == 0:
702
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
703
      else:
704
        val += "no local storage"
705
    else:
706
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
707
             (self.dev_type, self.logical_id, self.physical_id, self.children))
708
    if self.iv_name is None:
709
      val += ", not visible"
710
    else:
711
      val += ", visible as /dev/%s" % self.iv_name
712
    if isinstance(self.size, int):
713
      val += ", size=%dm)>" % self.size
714
    else:
715
      val += ", size='%s')>" % (self.size,)
716
    return val
717

    
718
  def Verify(self):
719
    """Checks that this disk is correctly configured.
720

721
    """
722
    all_errors = []
723
    if self.mode not in constants.DISK_ACCESS_SET:
724
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
725
    return all_errors
726

    
727
  def UpgradeConfig(self):
728
    """Fill defaults for missing configuration values.
729

730
    """
731
    if self.children:
732
      for child in self.children:
733
        child.UpgradeConfig()
734
    # add here config upgrade for this disk
735

    
736

    
737
class Instance(TaggableObject):
738
  """Config object representing an instance."""
739
  __slots__ = [
740
    "name",
741
    "primary_node",
742
    "os",
743
    "hypervisor",
744
    "hvparams",
745
    "beparams",
746
    "osparams",
747
    "admin_up",
748
    "nics",
749
    "disks",
750
    "disk_template",
751
    "network_port",
752
    "serial_no",
753
    ] + _TIMESTAMPS + _UUID
754

    
755
  def _ComputeSecondaryNodes(self):
756
    """Compute the list of secondary nodes.
757

758
    This is a simple wrapper over _ComputeAllNodes.
759

760
    """
761
    all_nodes = set(self._ComputeAllNodes())
762
    all_nodes.discard(self.primary_node)
763
    return tuple(all_nodes)
764

    
765
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
766
                             "List of secondary nodes")
767

    
768
  def _ComputeAllNodes(self):
769
    """Compute the list of all nodes.
770

771
    Since the data is already there (in the drbd disks), keeping it as
772
    a separate normal attribute is redundant and if not properly
773
    synchronised can cause problems. Thus it's better to compute it
774
    dynamically.
775

776
    """
777
    def _Helper(nodes, device):
778
      """Recursively computes nodes given a top device."""
779
      if device.dev_type in constants.LDS_DRBD:
780
        nodea, nodeb = device.logical_id[:2]
781
        nodes.add(nodea)
782
        nodes.add(nodeb)
783
      if device.children:
784
        for child in device.children:
785
          _Helper(nodes, child)
786

    
787
    all_nodes = set()
788
    all_nodes.add(self.primary_node)
789
    for device in self.disks:
790
      _Helper(all_nodes, device)
791
    return tuple(all_nodes)
792

    
793
  all_nodes = property(_ComputeAllNodes, None, None,
794
                       "List of all nodes of the instance")
795

    
796
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
797
    """Provide a mapping of nodes to LVs this instance owns.
798

799
    This function figures out what logical volumes should belong on
800
    which nodes, recursing through a device tree.
801

802
    @param lvmap: optional dictionary to receive the
803
        'node' : ['lv', ...] data.
804

805
    @return: None if lvmap arg is given, otherwise, a dictionary of
806
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
807
        volumeN is of the form "vg_name/lv_name", compatible with
808
        GetVolumeList()
809

810
    """
811
    if node == None:
812
      node = self.primary_node
813

    
814
    if lvmap is None:
815
      lvmap = {
816
        node: [],
817
        }
818
      ret = lvmap
819
    else:
820
      if not node in lvmap:
821
        lvmap[node] = []
822
      ret = None
823

    
824
    if not devs:
825
      devs = self.disks
826

    
827
    for dev in devs:
828
      if dev.dev_type == constants.LD_LV:
829
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
830

    
831
      elif dev.dev_type in constants.LDS_DRBD:
832
        if dev.children:
833
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
834
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
835

    
836
      elif dev.children:
837
        self.MapLVsByNode(lvmap, dev.children, node)
838

    
839
    return ret
840

    
841
  def FindDisk(self, idx):
842
    """Find a disk given having a specified index.
843

844
    This is just a wrapper that does validation of the index.
845

846
    @type idx: int
847
    @param idx: the disk index
848
    @rtype: L{Disk}
849
    @return: the corresponding disk
850
    @raise errors.OpPrereqError: when the given index is not valid
851

852
    """
853
    try:
854
      idx = int(idx)
855
      return self.disks[idx]
856
    except (TypeError, ValueError), err:
857
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
858
                                 errors.ECODE_INVAL)
859
    except IndexError:
860
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
861
                                 " 0 to %d" % (idx, len(self.disks) - 1),
862
                                 errors.ECODE_INVAL)
863

    
864
  def ToDict(self):
865
    """Instance-specific conversion to standard python types.
866

867
    This replaces the children lists of objects with lists of standard
868
    python types.
869

870
    """
871
    bo = super(Instance, self).ToDict()
872

    
873
    for attr in "nics", "disks":
874
      alist = bo.get(attr, None)
875
      if alist:
876
        nlist = self._ContainerToDicts(alist)
877
      else:
878
        nlist = []
879
      bo[attr] = nlist
880
    return bo
881

    
882
  @classmethod
883
  def FromDict(cls, val):
884
    """Custom function for instances.
885

886
    """
887
    obj = super(Instance, cls).FromDict(val)
888
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
889
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
890
    return obj
891

    
892
  def UpgradeConfig(self):
893
    """Fill defaults for missing configuration values.
894

895
    """
896
    for nic in self.nics:
897
      nic.UpgradeConfig()
898
    for disk in self.disks:
899
      disk.UpgradeConfig()
900
    if self.hvparams:
901
      for key in constants.HVC_GLOBALS:
902
        try:
903
          del self.hvparams[key]
904
        except KeyError:
905
          pass
906
    if self.osparams is None:
907
      self.osparams = {}
908

    
909

    
910
class OS(ConfigObject):
911
  """Config object representing an operating system.
912

913
  @type supported_parameters: list
914
  @ivar supported_parameters: a list of tuples, name and description,
915
      containing the supported parameters by this OS
916

917
  @type VARIANT_DELIM: string
918
  @cvar VARIANT_DELIM: the variant delimiter
919

920
  """
921
  __slots__ = [
922
    "name",
923
    "path",
924
    "api_versions",
925
    "create_script",
926
    "export_script",
927
    "import_script",
928
    "rename_script",
929
    "verify_script",
930
    "supported_variants",
931
    "supported_parameters",
932
    ]
933

    
934
  VARIANT_DELIM = "+"
935

    
936
  @classmethod
937
  def SplitNameVariant(cls, name):
938
    """Splits the name into the proper name and variant.
939

940
    @param name: the OS (unprocessed) name
941
    @rtype: list
942
    @return: a list of two elements; if the original name didn't
943
        contain a variant, it's returned as an empty string
944

945
    """
946
    nv = name.split(cls.VARIANT_DELIM, 1)
947
    if len(nv) == 1:
948
      nv.append("")
949
    return nv
950

    
951
  @classmethod
952
  def GetName(cls, name):
953
    """Returns the proper name of the os (without the variant).
954

955
    @param name: the OS (unprocessed) name
956

957
    """
958
    return cls.SplitNameVariant(name)[0]
959

    
960
  @classmethod
961
  def GetVariant(cls, name):
962
    """Returns the variant the os (without the base name).
963

964
    @param name: the OS (unprocessed) name
965

966
    """
967
    return cls.SplitNameVariant(name)[1]
968

    
969

    
970
class Node(TaggableObject):
971
  """Config object representing a node."""
972
  __slots__ = [
973
    "name",
974
    "primary_ip",
975
    "secondary_ip",
976
    "serial_no",
977
    "master_candidate",
978
    "offline",
979
    "drained",
980
    "group",
981
    "master_capable",
982
    "vm_capable",
983
    "ndparams",
984
    "powered",
985
    ] + _TIMESTAMPS + _UUID
986

    
987
  def UpgradeConfig(self):
988
    """Fill defaults for missing configuration values.
989

990
    """
991
    # pylint: disable=E0203
992
    # because these are "defined" via slots, not manually
993
    if self.master_capable is None:
994
      self.master_capable = True
995

    
996
    if self.vm_capable is None:
997
      self.vm_capable = True
998

    
999
    if self.ndparams is None:
1000
      self.ndparams = {}
1001

    
1002
    if self.powered is None:
1003
      self.powered = True
1004

    
1005

    
1006
class NodeGroup(TaggableObject):
1007
  """Config object representing a node group."""
1008
  __slots__ = [
1009
    "name",
1010
    "members",
1011
    "ndparams",
1012
    "serial_no",
1013
    "alloc_policy",
1014
    ] + _TIMESTAMPS + _UUID
1015

    
1016
  def ToDict(self):
1017
    """Custom function for nodegroup.
1018

1019
    This discards the members object, which gets recalculated and is only kept
1020
    in memory.
1021

1022
    """
1023
    mydict = super(NodeGroup, self).ToDict()
1024
    del mydict["members"]
1025
    return mydict
1026

    
1027
  @classmethod
1028
  def FromDict(cls, val):
1029
    """Custom function for nodegroup.
1030

1031
    The members slot is initialized to an empty list, upon deserialization.
1032

1033
    """
1034
    obj = super(NodeGroup, cls).FromDict(val)
1035
    obj.members = []
1036
    return obj
1037

    
1038
  def UpgradeConfig(self):
1039
    """Fill defaults for missing configuration values.
1040

1041
    """
1042
    if self.ndparams is None:
1043
      self.ndparams = {}
1044

    
1045
    if self.serial_no is None:
1046
      self.serial_no = 1
1047

    
1048
    if self.alloc_policy is None:
1049
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1050

    
1051
    # We only update mtime, and not ctime, since we would not be able to provide
1052
    # a correct value for creation time.
1053
    if self.mtime is None:
1054
      self.mtime = time.time()
1055

    
1056
  def FillND(self, node):
1057
    """Return filled out ndparams for L{objects.Node}
1058

1059
    @type node: L{objects.Node}
1060
    @param node: A Node object to fill
1061
    @return a copy of the node's ndparams with defaults filled
1062

1063
    """
1064
    return self.SimpleFillND(node.ndparams)
1065

    
1066
  def SimpleFillND(self, ndparams):
1067
    """Fill a given ndparams dict with defaults.
1068

1069
    @type ndparams: dict
1070
    @param ndparams: the dict to fill
1071
    @rtype: dict
1072
    @return: a copy of the passed in ndparams with missing keys filled
1073
        from the node group defaults
1074

1075
    """
1076
    return FillDict(self.ndparams, ndparams)
1077

    
1078

    
1079
class Cluster(TaggableObject):
1080
  """Config object representing the cluster."""
1081
  __slots__ = [
1082
    "serial_no",
1083
    "rsahostkeypub",
1084
    "highest_used_port",
1085
    "tcpudp_port_pool",
1086
    "mac_prefix",
1087
    "volume_group_name",
1088
    "reserved_lvs",
1089
    "drbd_usermode_helper",
1090
    "default_bridge",
1091
    "default_hypervisor",
1092
    "master_node",
1093
    "master_ip",
1094
    "master_netdev",
1095
    "master_netmask",
1096
    "cluster_name",
1097
    "file_storage_dir",
1098
    "shared_file_storage_dir",
1099
    "enabled_hypervisors",
1100
    "hvparams",
1101
    "os_hvp",
1102
    "beparams",
1103
    "osparams",
1104
    "nicparams",
1105
    "ndparams",
1106
    "candidate_pool_size",
1107
    "modify_etc_hosts",
1108
    "modify_ssh_setup",
1109
    "maintain_node_health",
1110
    "uid_pool",
1111
    "default_iallocator",
1112
    "hidden_os",
1113
    "blacklisted_os",
1114
    "primary_ip_family",
1115
    "prealloc_wipe_disks",
1116
    ] + _TIMESTAMPS + _UUID
1117

    
1118
  def UpgradeConfig(self):
1119
    """Fill defaults for missing configuration values.
1120

1121
    """
1122
    # pylint: disable=E0203
1123
    # because these are "defined" via slots, not manually
1124
    if self.hvparams is None:
1125
      self.hvparams = constants.HVC_DEFAULTS
1126
    else:
1127
      for hypervisor in self.hvparams:
1128
        self.hvparams[hypervisor] = FillDict(
1129
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1130

    
1131
    if self.os_hvp is None:
1132
      self.os_hvp = {}
1133

    
1134
    # osparams added before 2.2
1135
    if self.osparams is None:
1136
      self.osparams = {}
1137

    
1138
    if self.ndparams is None:
1139
      self.ndparams = constants.NDC_DEFAULTS
1140

    
1141
    self.beparams = UpgradeGroupedParams(self.beparams,
1142
                                         constants.BEC_DEFAULTS)
1143
    migrate_default_bridge = not self.nicparams
1144
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1145
                                          constants.NICC_DEFAULTS)
1146
    if migrate_default_bridge:
1147
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1148
        self.default_bridge
1149

    
1150
    if self.modify_etc_hosts is None:
1151
      self.modify_etc_hosts = True
1152

    
1153
    if self.modify_ssh_setup is None:
1154
      self.modify_ssh_setup = True
1155

    
1156
    # default_bridge is no longer used in 2.1. The slot is left there to
1157
    # support auto-upgrading. It can be removed once we decide to deprecate
1158
    # upgrading straight from 2.0.
1159
    if self.default_bridge is not None:
1160
      self.default_bridge = None
1161

    
1162
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1163
    # code can be removed once upgrading straight from 2.0 is deprecated.
1164
    if self.default_hypervisor is not None:
1165
      self.enabled_hypervisors = ([self.default_hypervisor] +
1166
        [hvname for hvname in self.enabled_hypervisors
1167
         if hvname != self.default_hypervisor])
1168
      self.default_hypervisor = None
1169

    
1170
    # maintain_node_health added after 2.1.1
1171
    if self.maintain_node_health is None:
1172
      self.maintain_node_health = False
1173

    
1174
    if self.uid_pool is None:
1175
      self.uid_pool = []
1176

    
1177
    if self.default_iallocator is None:
1178
      self.default_iallocator = ""
1179

    
1180
    # reserved_lvs added before 2.2
1181
    if self.reserved_lvs is None:
1182
      self.reserved_lvs = []
1183

    
1184
    # hidden and blacklisted operating systems added before 2.2.1
1185
    if self.hidden_os is None:
1186
      self.hidden_os = []
1187

    
1188
    if self.blacklisted_os is None:
1189
      self.blacklisted_os = []
1190

    
1191
    # primary_ip_family added before 2.3
1192
    if self.primary_ip_family is None:
1193
      self.primary_ip_family = AF_INET
1194

    
1195
    if self.master_netmask is None:
1196
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1197
      self.master_netmask = ipcls.iplen
1198

    
1199
    if self.prealloc_wipe_disks is None:
1200
      self.prealloc_wipe_disks = False
1201

    
1202
    # shared_file_storage_dir added before 2.5
1203
    if self.shared_file_storage_dir is None:
1204
      self.shared_file_storage_dir = ""
1205

    
1206
  def ToDict(self):
1207
    """Custom function for cluster.
1208

1209
    """
1210
    mydict = super(Cluster, self).ToDict()
1211
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1212
    return mydict
1213

    
1214
  @classmethod
1215
  def FromDict(cls, val):
1216
    """Custom function for cluster.
1217

1218
    """
1219
    obj = super(Cluster, cls).FromDict(val)
1220
    if not isinstance(obj.tcpudp_port_pool, set):
1221
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1222
    return obj
1223

    
1224
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1225
    """Get the default hypervisor parameters for the cluster.
1226

1227
    @param hypervisor: the hypervisor name
1228
    @param os_name: if specified, we'll also update the defaults for this OS
1229
    @param skip_keys: if passed, list of keys not to use
1230
    @return: the defaults dict
1231

1232
    """
1233
    if skip_keys is None:
1234
      skip_keys = []
1235

    
1236
    fill_stack = [self.hvparams.get(hypervisor, {})]
1237
    if os_name is not None:
1238
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1239
      fill_stack.append(os_hvp)
1240

    
1241
    ret_dict = {}
1242
    for o_dict in fill_stack:
1243
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1244

    
1245
    return ret_dict
1246

    
1247
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1248
    """Fill a given hvparams dict with cluster defaults.
1249

1250
    @type hv_name: string
1251
    @param hv_name: the hypervisor to use
1252
    @type os_name: string
1253
    @param os_name: the OS to use for overriding the hypervisor defaults
1254
    @type skip_globals: boolean
1255
    @param skip_globals: if True, the global hypervisor parameters will
1256
        not be filled
1257
    @rtype: dict
1258
    @return: a copy of the given hvparams with missing keys filled from
1259
        the cluster defaults
1260

1261
    """
1262
    if skip_globals:
1263
      skip_keys = constants.HVC_GLOBALS
1264
    else:
1265
      skip_keys = []
1266

    
1267
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1268
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1269

    
1270
  def FillHV(self, instance, skip_globals=False):
1271
    """Fill an instance's hvparams dict with cluster defaults.
1272

1273
    @type instance: L{objects.Instance}
1274
    @param instance: the instance parameter to fill
1275
    @type skip_globals: boolean
1276
    @param skip_globals: if True, the global hypervisor parameters will
1277
        not be filled
1278
    @rtype: dict
1279
    @return: a copy of the instance's hvparams with missing keys filled from
1280
        the cluster defaults
1281

1282
    """
1283
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1284
                             instance.hvparams, skip_globals)
1285

    
1286
  def SimpleFillBE(self, beparams):
1287
    """Fill a given beparams dict with cluster defaults.
1288

1289
    @type beparams: dict
1290
    @param beparams: the dict to fill
1291
    @rtype: dict
1292
    @return: a copy of the passed in beparams with missing keys filled
1293
        from the cluster defaults
1294

1295
    """
1296
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1297

    
1298
  def FillBE(self, instance):
1299
    """Fill an instance's beparams dict with cluster defaults.
1300

1301
    @type instance: L{objects.Instance}
1302
    @param instance: the instance parameter to fill
1303
    @rtype: dict
1304
    @return: a copy of the instance's beparams with missing keys filled from
1305
        the cluster defaults
1306

1307
    """
1308
    return self.SimpleFillBE(instance.beparams)
1309

    
1310
  def SimpleFillNIC(self, nicparams):
1311
    """Fill a given nicparams dict with cluster defaults.
1312

1313
    @type nicparams: dict
1314
    @param nicparams: the dict to fill
1315
    @rtype: dict
1316
    @return: a copy of the passed in nicparams with missing keys filled
1317
        from the cluster defaults
1318

1319
    """
1320
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1321

    
1322
  def SimpleFillOS(self, os_name, os_params):
1323
    """Fill an instance's osparams dict with cluster defaults.
1324

1325
    @type os_name: string
1326
    @param os_name: the OS name to use
1327
    @type os_params: dict
1328
    @param os_params: the dict to fill with default values
1329
    @rtype: dict
1330
    @return: a copy of the instance's osparams with missing keys filled from
1331
        the cluster defaults
1332

1333
    """
1334
    name_only = os_name.split("+", 1)[0]
1335
    # base OS
1336
    result = self.osparams.get(name_only, {})
1337
    # OS with variant
1338
    result = FillDict(result, self.osparams.get(os_name, {}))
1339
    # specified params
1340
    return FillDict(result, os_params)
1341

    
1342
  def FillND(self, node, nodegroup):
1343
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1344

1345
    @type node: L{objects.Node}
1346
    @param node: A Node object to fill
1347
    @type nodegroup: L{objects.NodeGroup}
1348
    @param nodegroup: A Node object to fill
1349
    @return a copy of the node's ndparams with defaults filled
1350

1351
    """
1352
    return self.SimpleFillND(nodegroup.FillND(node))
1353

    
1354
  def SimpleFillND(self, ndparams):
1355
    """Fill a given ndparams dict with defaults.
1356

1357
    @type ndparams: dict
1358
    @param ndparams: the dict to fill
1359
    @rtype: dict
1360
    @return: a copy of the passed in ndparams with missing keys filled
1361
        from the cluster defaults
1362

1363
    """
1364
    return FillDict(self.ndparams, ndparams)
1365

    
1366

    
1367
class BlockDevStatus(ConfigObject):
1368
  """Config object representing the status of a block device."""
1369
  __slots__ = [
1370
    "dev_path",
1371
    "major",
1372
    "minor",
1373
    "sync_percent",
1374
    "estimated_time",
1375
    "is_degraded",
1376
    "ldisk_status",
1377
    ]
1378

    
1379

    
1380
class ImportExportStatus(ConfigObject):
1381
  """Config object representing the status of an import or export."""
1382
  __slots__ = [
1383
    "recent_output",
1384
    "listen_port",
1385
    "connected",
1386
    "progress_mbytes",
1387
    "progress_throughput",
1388
    "progress_eta",
1389
    "progress_percent",
1390
    "exit_status",
1391
    "error_message",
1392
    ] + _TIMESTAMPS
1393

    
1394

    
1395
class ImportExportOptions(ConfigObject):
1396
  """Options for import/export daemon
1397

1398
  @ivar key_name: X509 key name (None for cluster certificate)
1399
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1400
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1401
  @ivar magic: Used to ensure the connection goes to the right disk
1402
  @ivar ipv6: Whether to use IPv6
1403
  @ivar connect_timeout: Number of seconds for establishing connection
1404

1405
  """
1406
  __slots__ = [
1407
    "key_name",
1408
    "ca_pem",
1409
    "compress",
1410
    "magic",
1411
    "ipv6",
1412
    "connect_timeout",
1413
    ]
1414

    
1415

    
1416
class ConfdRequest(ConfigObject):
1417
  """Object holding a confd request.
1418

1419
  @ivar protocol: confd protocol version
1420
  @ivar type: confd query type
1421
  @ivar query: query request
1422
  @ivar rsalt: requested reply salt
1423

1424
  """
1425
  __slots__ = [
1426
    "protocol",
1427
    "type",
1428
    "query",
1429
    "rsalt",
1430
    ]
1431

    
1432

    
1433
class ConfdReply(ConfigObject):
1434
  """Object holding a confd reply.
1435

1436
  @ivar protocol: confd protocol version
1437
  @ivar status: reply status code (ok, error)
1438
  @ivar answer: confd query reply
1439
  @ivar serial: configuration serial number
1440

1441
  """
1442
  __slots__ = [
1443
    "protocol",
1444
    "status",
1445
    "answer",
1446
    "serial",
1447
    ]
1448

    
1449

    
1450
class QueryFieldDefinition(ConfigObject):
1451
  """Object holding a query field definition.
1452

1453
  @ivar name: Field name
1454
  @ivar title: Human-readable title
1455
  @ivar kind: Field type
1456
  @ivar doc: Human-readable description
1457

1458
  """
1459
  __slots__ = [
1460
    "name",
1461
    "title",
1462
    "kind",
1463
    "doc",
1464
    ]
1465

    
1466

    
1467
class _QueryResponseBase(ConfigObject):
1468
  __slots__ = [
1469
    "fields",
1470
    ]
1471

    
1472
  def ToDict(self):
1473
    """Custom function for serializing.
1474

1475
    """
1476
    mydict = super(_QueryResponseBase, self).ToDict()
1477
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1478
    return mydict
1479

    
1480
  @classmethod
1481
  def FromDict(cls, val):
1482
    """Custom function for de-serializing.
1483

1484
    """
1485
    obj = super(_QueryResponseBase, cls).FromDict(val)
1486
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1487
    return obj
1488

    
1489

    
1490
class QueryRequest(ConfigObject):
1491
  """Object holding a query request.
1492

1493
  """
1494
  __slots__ = [
1495
    "what",
1496
    "fields",
1497
    "qfilter",
1498
    ]
1499

    
1500

    
1501
class QueryResponse(_QueryResponseBase):
1502
  """Object holding the response to a query.
1503

1504
  @ivar fields: List of L{QueryFieldDefinition} objects
1505
  @ivar data: Requested data
1506

1507
  """
1508
  __slots__ = [
1509
    "data",
1510
    ]
1511

    
1512

    
1513
class QueryFieldsRequest(ConfigObject):
1514
  """Object holding a request for querying available fields.
1515

1516
  """
1517
  __slots__ = [
1518
    "what",
1519
    "fields",
1520
    ]
1521

    
1522

    
1523
class QueryFieldsResponse(_QueryResponseBase):
1524
  """Object holding the response to a query for fields.
1525

1526
  @ivar fields: List of L{QueryFieldDefinition} objects
1527

1528
  """
1529
  __slots__ = [
1530
    ]
1531

    
1532

    
1533
class MigrationStatus(ConfigObject):
1534
  """Object holding the status of a migration.
1535

1536
  """
1537
  __slots__ = [
1538
    "status",
1539
    "transferred_ram",
1540
    "total_ram",
1541
    ]
1542

    
1543

    
1544
class InstanceConsole(ConfigObject):
1545
  """Object describing how to access the console of an instance.
1546

1547
  """
1548
  __slots__ = [
1549
    "instance",
1550
    "kind",
1551
    "message",
1552
    "host",
1553
    "port",
1554
    "user",
1555
    "command",
1556
    "display",
1557
    ]
1558

    
1559
  def Validate(self):
1560
    """Validates contents of this object.
1561

1562
    """
1563
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1564
    assert self.instance, "Missing instance name"
1565
    assert self.message or self.kind in [constants.CONS_SSH,
1566
                                         constants.CONS_SPICE,
1567
                                         constants.CONS_VNC]
1568
    assert self.host or self.kind == constants.CONS_MESSAGE
1569
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1570
                                      constants.CONS_SSH]
1571
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1572
                                      constants.CONS_SPICE,
1573
                                      constants.CONS_VNC]
1574
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1575
                                         constants.CONS_SPICE,
1576
                                         constants.CONS_VNC]
1577
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1578
                                         constants.CONS_SPICE,
1579
                                         constants.CONS_SSH]
1580
    return True
1581

    
1582

    
1583
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1584
  """Simple wrapper over ConfigParse that allows serialization.
1585

1586
  This class is basically ConfigParser.SafeConfigParser with two
1587
  additional methods that allow it to serialize/unserialize to/from a
1588
  buffer.
1589

1590
  """
1591
  def Dumps(self):
1592
    """Dump this instance and return the string representation."""
1593
    buf = StringIO()
1594
    self.write(buf)
1595
    return buf.getvalue()
1596

    
1597
  @classmethod
1598
  def Loads(cls, data):
1599
    """Load data from a string."""
1600
    buf = StringIO(data)
1601
    cfp = cls()
1602
    cfp.readfp(buf)
1603
    return cfp