Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 62a7762b

History | View | Annotate | Download (43.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable-msg=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "serial_no",
327
    ] + _TIMESTAMPS
328

    
329
  def ToDict(self):
330
    """Custom function for top-level config data.
331

332
    This just replaces the list of instances, nodes and the cluster
333
    with standard python types.
334

335
    """
336
    mydict = super(ConfigData, self).ToDict()
337
    mydict["cluster"] = mydict["cluster"].ToDict()
338
    for key in "nodes", "instances", "nodegroups":
339
      mydict[key] = self._ContainerToDicts(mydict[key])
340

    
341
    return mydict
342

    
343
  @classmethod
344
  def FromDict(cls, val):
345
    """Custom function for top-level config data
346

347
    """
348
    obj = super(ConfigData, cls).FromDict(val)
349
    obj.cluster = Cluster.FromDict(obj.cluster)
350
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
353
    return obj
354

    
355
  def HasAnyDiskOfType(self, dev_type):
356
    """Check if in there is at disk of the given type in the configuration.
357

358
    @type dev_type: L{constants.LDS_BLOCK}
359
    @param dev_type: the type to look for
360
    @rtype: boolean
361
    @return: boolean indicating if a disk of the given type was found or not
362

363
    """
364
    for instance in self.instances.values():
365
      for disk in instance.disks:
366
        if disk.IsBasedOnDiskType(dev_type):
367
          return True
368
    return False
369

    
370
  def UpgradeConfig(self):
371
    """Fill defaults for missing configuration values.
372

373
    """
374
    self.cluster.UpgradeConfig()
375
    for node in self.nodes.values():
376
      node.UpgradeConfig()
377
    for instance in self.instances.values():
378
      instance.UpgradeConfig()
379
    if self.nodegroups is None:
380
      self.nodegroups = {}
381
    for nodegroup in self.nodegroups.values():
382
      nodegroup.UpgradeConfig()
383
    if self.cluster.drbd_usermode_helper is None:
384
      # To decide if we set an helper let's check if at least one instance has
385
      # a DRBD disk. This does not cover all the possible scenarios but it
386
      # gives a good approximation.
387
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
388
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389

    
390

    
391
class NIC(ConfigObject):
392
  """Config object representing a network card."""
393
  __slots__ = ["mac", "ip", "nicparams"]
394

    
395
  @classmethod
396
  def CheckParameterSyntax(cls, nicparams):
397
    """Check the given parameters for validity.
398

399
    @type nicparams:  dict
400
    @param nicparams: dictionary with parameter names/value
401
    @raise errors.ConfigurationError: when a parameter is not valid
402

403
    """
404
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
405
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
406
      raise errors.ConfigurationError(err)
407

    
408
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
409
        not nicparams[constants.NIC_LINK]):
410
      err = "Missing bridged nic link"
411
      raise errors.ConfigurationError(err)
412

    
413

    
414
class Disk(ConfigObject):
415
  """Config object representing a block device."""
416
  __slots__ = ["dev_type", "logical_id", "physical_id",
417
               "children", "iv_name", "size", "mode"]
418

    
419
  def CreateOnSecondary(self):
420
    """Test if this device needs to be created on a secondary node."""
421
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422

    
423
  def AssembleOnSecondary(self):
424
    """Test if this device needs to be assembled on a secondary node."""
425
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426

    
427
  def OpenOnSecondary(self):
428
    """Test if this device needs to be opened on a secondary node."""
429
    return self.dev_type in (constants.LD_LV,)
430

    
431
  def StaticDevPath(self):
432
    """Return the device path if this device type has a static one.
433

434
    Some devices (LVM for example) live always at the same /dev/ path,
435
    irrespective of their status. For such devices, we return this
436
    path, for others we return None.
437

438
    @warning: The path returned is not a normalized pathname; callers
439
        should check that it is a valid path.
440

441
    """
442
    if self.dev_type == constants.LD_LV:
443
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
444
    elif self.dev_type == constants.LD_BLOCKDEV:
445
      return self.logical_id[1]
446
    return None
447

    
448
  def ChildrenNeeded(self):
449
    """Compute the needed number of children for activation.
450

451
    This method will return either -1 (all children) or a positive
452
    number denoting the minimum number of children needed for
453
    activation (only mirrored devices will usually return >=0).
454

455
    Currently, only DRBD8 supports diskless activation (therefore we
456
    return 0), for all other we keep the previous semantics and return
457
    -1.
458

459
    """
460
    if self.dev_type == constants.LD_DRBD8:
461
      return 0
462
    return -1
463

    
464
  def IsBasedOnDiskType(self, dev_type):
465
    """Check if the disk or its children are based on the given type.
466

467
    @type dev_type: L{constants.LDS_BLOCK}
468
    @param dev_type: the type to look for
469
    @rtype: boolean
470
    @return: boolean indicating if a device of the given type was found or not
471

472
    """
473
    if self.children:
474
      for child in self.children:
475
        if child.IsBasedOnDiskType(dev_type):
476
          return True
477
    return self.dev_type == dev_type
478

    
479
  def GetNodes(self, node):
480
    """This function returns the nodes this device lives on.
481

482
    Given the node on which the parent of the device lives on (or, in
483
    case of a top-level device, the primary node of the devices'
484
    instance), this function will return a list of nodes on which this
485
    devices needs to (or can) be assembled.
486

487
    """
488
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
489
                         constants.LD_BLOCKDEV]:
490
      result = [node]
491
    elif self.dev_type in constants.LDS_DRBD:
492
      result = [self.logical_id[0], self.logical_id[1]]
493
      if node not in result:
494
        raise errors.ConfigurationError("DRBD device passed unknown node")
495
    else:
496
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
497
    return result
498

    
499
  def ComputeNodeTree(self, parent_node):
500
    """Compute the node/disk tree for this disk and its children.
501

502
    This method, given the node on which the parent disk lives, will
503
    return the list of all (node, disk) pairs which describe the disk
504
    tree in the most compact way. For example, a drbd/lvm stack
505
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
506
    which represents all the top-level devices on the nodes.
507

508
    """
509
    my_nodes = self.GetNodes(parent_node)
510
    result = [(node, self) for node in my_nodes]
511
    if not self.children:
512
      # leaf device
513
      return result
514
    for node in my_nodes:
515
      for child in self.children:
516
        child_result = child.ComputeNodeTree(node)
517
        if len(child_result) == 1:
518
          # child (and all its descendants) is simple, doesn't split
519
          # over multiple hosts, so we don't need to describe it, our
520
          # own entry for this node describes it completely
521
          continue
522
        else:
523
          # check if child nodes differ from my nodes; note that
524
          # subdisk can differ from the child itself, and be instead
525
          # one of its descendants
526
          for subnode, subdisk in child_result:
527
            if subnode not in my_nodes:
528
              result.append((subnode, subdisk))
529
            # otherwise child is under our own node, so we ignore this
530
            # entry (but probably the other results in the list will
531
            # be different)
532
    return result
533

    
534
  def ComputeGrowth(self, amount):
535
    """Compute the per-VG growth requirements.
536

537
    This only works for VG-based disks.
538

539
    @type amount: integer
540
    @param amount: the desired increase in (user-visible) disk space
541
    @rtype: dict
542
    @return: a dictionary of volume-groups and the required size
543

544
    """
545
    if self.dev_type == constants.LD_LV:
546
      return {self.logical_id[0]: amount}
547
    elif self.dev_type == constants.LD_DRBD8:
548
      if self.children:
549
        return self.children[0].ComputeGrowth(amount)
550
      else:
551
        return {}
552
    else:
553
      # Other disk types do not require VG space
554
      return {}
555

    
556
  def RecordGrow(self, amount):
557
    """Update the size of this disk after growth.
558

559
    This method recurses over the disks's children and updates their
560
    size correspondigly. The method needs to be kept in sync with the
561
    actual algorithms from bdev.
562

563
    """
564
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
565
      self.size += amount
566
    elif self.dev_type == constants.LD_DRBD8:
567
      if self.children:
568
        self.children[0].RecordGrow(amount)
569
      self.size += amount
570
    else:
571
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
572
                                   " disk type %s" % self.dev_type)
573

    
574
  def UnsetSize(self):
575
    """Sets recursively the size to zero for the disk and its children.
576

577
    """
578
    if self.children:
579
      for child in self.children:
580
        child.UnsetSize()
581
    self.size = 0
582

    
583
  def SetPhysicalID(self, target_node, nodes_ip):
584
    """Convert the logical ID to the physical ID.
585

586
    This is used only for drbd, which needs ip/port configuration.
587

588
    The routine descends down and updates its children also, because
589
    this helps when the only the top device is passed to the remote
590
    node.
591

592
    Arguments:
593
      - target_node: the node we wish to configure for
594
      - nodes_ip: a mapping of node name to ip
595

596
    The target_node must exist in in nodes_ip, and must be one of the
597
    nodes in the logical ID for each of the DRBD devices encountered
598
    in the disk tree.
599

600
    """
601
    if self.children:
602
      for child in self.children:
603
        child.SetPhysicalID(target_node, nodes_ip)
604

    
605
    if self.logical_id is None and self.physical_id is not None:
606
      return
607
    if self.dev_type in constants.LDS_DRBD:
608
      pnode, snode, port, pminor, sminor, secret = self.logical_id
609
      if target_node not in (pnode, snode):
610
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
611
                                        target_node)
612
      pnode_ip = nodes_ip.get(pnode, None)
613
      snode_ip = nodes_ip.get(snode, None)
614
      if pnode_ip is None or snode_ip is None:
615
        raise errors.ConfigurationError("Can't find primary or secondary node"
616
                                        " for %s" % str(self))
617
      p_data = (pnode_ip, port)
618
      s_data = (snode_ip, port)
619
      if pnode == target_node:
620
        self.physical_id = p_data + s_data + (pminor, secret)
621
      else: # it must be secondary, we tested above
622
        self.physical_id = s_data + p_data + (sminor, secret)
623
    else:
624
      self.physical_id = self.logical_id
625
    return
626

    
627
  def ToDict(self):
628
    """Disk-specific conversion to standard python types.
629

630
    This replaces the children lists of objects with lists of
631
    standard python types.
632

633
    """
634
    bo = super(Disk, self).ToDict()
635

    
636
    for attr in ("children",):
637
      alist = bo.get(attr, None)
638
      if alist:
639
        bo[attr] = self._ContainerToDicts(alist)
640
    return bo
641

    
642
  @classmethod
643
  def FromDict(cls, val):
644
    """Custom function for Disks
645

646
    """
647
    obj = super(Disk, cls).FromDict(val)
648
    if obj.children:
649
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
650
    if obj.logical_id and isinstance(obj.logical_id, list):
651
      obj.logical_id = tuple(obj.logical_id)
652
    if obj.physical_id and isinstance(obj.physical_id, list):
653
      obj.physical_id = tuple(obj.physical_id)
654
    if obj.dev_type in constants.LDS_DRBD:
655
      # we need a tuple of length six here
656
      if len(obj.logical_id) < 6:
657
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
658
    return obj
659

    
660
  def __str__(self):
661
    """Custom str() formatter for disks.
662

663
    """
664
    if self.dev_type == constants.LD_LV:
665
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
666
    elif self.dev_type in constants.LDS_DRBD:
667
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
668
      val = "<DRBD8("
669
      if self.physical_id is None:
670
        phy = "unconfigured"
671
      else:
672
        phy = ("configured as %s:%s %s:%s" %
673
               (self.physical_id[0], self.physical_id[1],
674
                self.physical_id[2], self.physical_id[3]))
675

    
676
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
677
              (node_a, minor_a, node_b, minor_b, port, phy))
678
      if self.children and self.children.count(None) == 0:
679
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
680
      else:
681
        val += "no local storage"
682
    else:
683
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
684
             (self.dev_type, self.logical_id, self.physical_id, self.children))
685
    if self.iv_name is None:
686
      val += ", not visible"
687
    else:
688
      val += ", visible as /dev/%s" % self.iv_name
689
    if isinstance(self.size, int):
690
      val += ", size=%dm)>" % self.size
691
    else:
692
      val += ", size='%s')>" % (self.size,)
693
    return val
694

    
695
  def Verify(self):
696
    """Checks that this disk is correctly configured.
697

698
    """
699
    all_errors = []
700
    if self.mode not in constants.DISK_ACCESS_SET:
701
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
702
    return all_errors
703

    
704
  def UpgradeConfig(self):
705
    """Fill defaults for missing configuration values.
706

707
    """
708
    if self.children:
709
      for child in self.children:
710
        child.UpgradeConfig()
711
    # add here config upgrade for this disk
712

    
713

    
714
class Instance(TaggableObject):
715
  """Config object representing an instance."""
716
  __slots__ = [
717
    "name",
718
    "primary_node",
719
    "os",
720
    "hypervisor",
721
    "hvparams",
722
    "beparams",
723
    "osparams",
724
    "admin_up",
725
    "nics",
726
    "disks",
727
    "disk_template",
728
    "network_port",
729
    "serial_no",
730
    ] + _TIMESTAMPS + _UUID
731

    
732
  def _ComputeSecondaryNodes(self):
733
    """Compute the list of secondary nodes.
734

735
    This is a simple wrapper over _ComputeAllNodes.
736

737
    """
738
    all_nodes = set(self._ComputeAllNodes())
739
    all_nodes.discard(self.primary_node)
740
    return tuple(all_nodes)
741

    
742
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
743
                             "List of secondary nodes")
744

    
745
  def _ComputeAllNodes(self):
746
    """Compute the list of all nodes.
747

748
    Since the data is already there (in the drbd disks), keeping it as
749
    a separate normal attribute is redundant and if not properly
750
    synchronised can cause problems. Thus it's better to compute it
751
    dynamically.
752

753
    """
754
    def _Helper(nodes, device):
755
      """Recursively computes nodes given a top device."""
756
      if device.dev_type in constants.LDS_DRBD:
757
        nodea, nodeb = device.logical_id[:2]
758
        nodes.add(nodea)
759
        nodes.add(nodeb)
760
      if device.children:
761
        for child in device.children:
762
          _Helper(nodes, child)
763

    
764
    all_nodes = set()
765
    all_nodes.add(self.primary_node)
766
    for device in self.disks:
767
      _Helper(all_nodes, device)
768
    return tuple(all_nodes)
769

    
770
  all_nodes = property(_ComputeAllNodes, None, None,
771
                       "List of all nodes of the instance")
772

    
773
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
774
    """Provide a mapping of nodes to LVs this instance owns.
775

776
    This function figures out what logical volumes should belong on
777
    which nodes, recursing through a device tree.
778

779
    @param lvmap: optional dictionary to receive the
780
        'node' : ['lv', ...] data.
781

782
    @return: None if lvmap arg is given, otherwise, a dictionary of
783
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
784
        volumeN is of the form "vg_name/lv_name", compatible with
785
        GetVolumeList()
786

787
    """
788
    if node == None:
789
      node = self.primary_node
790

    
791
    if lvmap is None:
792
      lvmap = { node : [] }
793
      ret = lvmap
794
    else:
795
      if not node in lvmap:
796
        lvmap[node] = []
797
      ret = None
798

    
799
    if not devs:
800
      devs = self.disks
801

    
802
    for dev in devs:
803
      if dev.dev_type == constants.LD_LV:
804
        lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1])
805

    
806
      elif dev.dev_type in constants.LDS_DRBD:
807
        if dev.children:
808
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
809
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
810

    
811
      elif dev.children:
812
        self.MapLVsByNode(lvmap, dev.children, node)
813

    
814
    return ret
815

    
816
  def FindDisk(self, idx):
817
    """Find a disk given having a specified index.
818

819
    This is just a wrapper that does validation of the index.
820

821
    @type idx: int
822
    @param idx: the disk index
823
    @rtype: L{Disk}
824
    @return: the corresponding disk
825
    @raise errors.OpPrereqError: when the given index is not valid
826

827
    """
828
    try:
829
      idx = int(idx)
830
      return self.disks[idx]
831
    except (TypeError, ValueError), err:
832
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
833
                                 errors.ECODE_INVAL)
834
    except IndexError:
835
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
836
                                 " 0 to %d" % (idx, len(self.disks) - 1),
837
                                 errors.ECODE_INVAL)
838

    
839
  def ToDict(self):
840
    """Instance-specific conversion to standard python types.
841

842
    This replaces the children lists of objects with lists of standard
843
    python types.
844

845
    """
846
    bo = super(Instance, self).ToDict()
847

    
848
    for attr in "nics", "disks":
849
      alist = bo.get(attr, None)
850
      if alist:
851
        nlist = self._ContainerToDicts(alist)
852
      else:
853
        nlist = []
854
      bo[attr] = nlist
855
    return bo
856

    
857
  @classmethod
858
  def FromDict(cls, val):
859
    """Custom function for instances.
860

861
    """
862
    obj = super(Instance, cls).FromDict(val)
863
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
864
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
865
    return obj
866

    
867
  def UpgradeConfig(self):
868
    """Fill defaults for missing configuration values.
869

870
    """
871
    for nic in self.nics:
872
      nic.UpgradeConfig()
873
    for disk in self.disks:
874
      disk.UpgradeConfig()
875
    if self.hvparams:
876
      for key in constants.HVC_GLOBALS:
877
        try:
878
          del self.hvparams[key]
879
        except KeyError:
880
          pass
881
    if self.osparams is None:
882
      self.osparams = {}
883

    
884

    
885
class OS(ConfigObject):
886
  """Config object representing an operating system.
887

888
  @type supported_parameters: list
889
  @ivar supported_parameters: a list of tuples, name and description,
890
      containing the supported parameters by this OS
891

892
  @type VARIANT_DELIM: string
893
  @cvar VARIANT_DELIM: the variant delimiter
894

895
  """
896
  __slots__ = [
897
    "name",
898
    "path",
899
    "api_versions",
900
    "create_script",
901
    "export_script",
902
    "import_script",
903
    "rename_script",
904
    "verify_script",
905
    "supported_variants",
906
    "supported_parameters",
907
    ]
908

    
909
  VARIANT_DELIM = "+"
910

    
911
  @classmethod
912
  def SplitNameVariant(cls, name):
913
    """Splits the name into the proper name and variant.
914

915
    @param name: the OS (unprocessed) name
916
    @rtype: list
917
    @return: a list of two elements; if the original name didn't
918
        contain a variant, it's returned as an empty string
919

920
    """
921
    nv = name.split(cls.VARIANT_DELIM, 1)
922
    if len(nv) == 1:
923
      nv.append("")
924
    return nv
925

    
926
  @classmethod
927
  def GetName(cls, name):
928
    """Returns the proper name of the os (without the variant).
929

930
    @param name: the OS (unprocessed) name
931

932
    """
933
    return cls.SplitNameVariant(name)[0]
934

    
935
  @classmethod
936
  def GetVariant(cls, name):
937
    """Returns the variant the os (without the base name).
938

939
    @param name: the OS (unprocessed) name
940

941
    """
942
    return cls.SplitNameVariant(name)[1]
943

    
944

    
945
class Node(TaggableObject):
946
  """Config object representing a node."""
947
  __slots__ = [
948
    "name",
949
    "primary_ip",
950
    "secondary_ip",
951
    "serial_no",
952
    "master_candidate",
953
    "offline",
954
    "drained",
955
    "group",
956
    "master_capable",
957
    "vm_capable",
958
    "ndparams",
959
    "powered",
960
    ] + _TIMESTAMPS + _UUID
961

    
962
  def UpgradeConfig(self):
963
    """Fill defaults for missing configuration values.
964

965
    """
966
    # pylint: disable-msg=E0203
967
    # because these are "defined" via slots, not manually
968
    if self.master_capable is None:
969
      self.master_capable = True
970

    
971
    if self.vm_capable is None:
972
      self.vm_capable = True
973

    
974
    if self.ndparams is None:
975
      self.ndparams = {}
976

    
977
    if self.powered is None:
978
      self.powered = True
979

    
980

    
981
class NodeGroup(ConfigObject):
982
  """Config object representing a node group."""
983
  __slots__ = [
984
    "name",
985
    "members",
986
    "ndparams",
987
    "serial_no",
988
    "alloc_policy",
989
    ] + _TIMESTAMPS + _UUID
990

    
991
  def ToDict(self):
992
    """Custom function for nodegroup.
993

994
    This discards the members object, which gets recalculated and is only kept
995
    in memory.
996

997
    """
998
    mydict = super(NodeGroup, self).ToDict()
999
    del mydict["members"]
1000
    return mydict
1001

    
1002
  @classmethod
1003
  def FromDict(cls, val):
1004
    """Custom function for nodegroup.
1005

1006
    The members slot is initialized to an empty list, upon deserialization.
1007

1008
    """
1009
    obj = super(NodeGroup, cls).FromDict(val)
1010
    obj.members = []
1011
    return obj
1012

    
1013
  def UpgradeConfig(self):
1014
    """Fill defaults for missing configuration values.
1015

1016
    """
1017
    if self.ndparams is None:
1018
      self.ndparams = {}
1019

    
1020
    if self.serial_no is None:
1021
      self.serial_no = 1
1022

    
1023
    if self.alloc_policy is None:
1024
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1025

    
1026
    # We only update mtime, and not ctime, since we would not be able to provide
1027
    # a correct value for creation time.
1028
    if self.mtime is None:
1029
      self.mtime = time.time()
1030

    
1031
  def FillND(self, node):
1032
    """Return filled out ndparams for L{object.Node}
1033

1034
    @type node: L{objects.Node}
1035
    @param node: A Node object to fill
1036
    @return a copy of the node's ndparams with defaults filled
1037

1038
    """
1039
    return self.SimpleFillND(node.ndparams)
1040

    
1041
  def SimpleFillND(self, ndparams):
1042
    """Fill a given ndparams dict with defaults.
1043

1044
    @type ndparams: dict
1045
    @param ndparams: the dict to fill
1046
    @rtype: dict
1047
    @return: a copy of the passed in ndparams with missing keys filled
1048
        from the node group defaults
1049

1050
    """
1051
    return FillDict(self.ndparams, ndparams)
1052

    
1053

    
1054
class Cluster(TaggableObject):
1055
  """Config object representing the cluster."""
1056
  __slots__ = [
1057
    "serial_no",
1058
    "rsahostkeypub",
1059
    "highest_used_port",
1060
    "tcpudp_port_pool",
1061
    "mac_prefix",
1062
    "volume_group_name",
1063
    "reserved_lvs",
1064
    "drbd_usermode_helper",
1065
    "default_bridge",
1066
    "default_hypervisor",
1067
    "master_node",
1068
    "master_ip",
1069
    "master_netdev",
1070
    "cluster_name",
1071
    "file_storage_dir",
1072
    "shared_file_storage_dir",
1073
    "enabled_hypervisors",
1074
    "hvparams",
1075
    "os_hvp",
1076
    "beparams",
1077
    "osparams",
1078
    "nicparams",
1079
    "ndparams",
1080
    "candidate_pool_size",
1081
    "modify_etc_hosts",
1082
    "modify_ssh_setup",
1083
    "maintain_node_health",
1084
    "uid_pool",
1085
    "default_iallocator",
1086
    "hidden_os",
1087
    "blacklisted_os",
1088
    "primary_ip_family",
1089
    "prealloc_wipe_disks",
1090
    "networks",
1091
    ] + _TIMESTAMPS + _UUID
1092

    
1093
  def UpgradeConfig(self):
1094
    """Fill defaults for missing configuration values.
1095

1096
    """
1097
    # pylint: disable-msg=E0203
1098
    # because these are "defined" via slots, not manually
1099
    if self.hvparams is None:
1100
      self.hvparams = constants.HVC_DEFAULTS
1101
    else:
1102
      for hypervisor in self.hvparams:
1103
        self.hvparams[hypervisor] = FillDict(
1104
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1105

    
1106
    if self.os_hvp is None:
1107
      self.os_hvp = {}
1108

    
1109
    # osparams added before 2.2
1110
    if self.osparams is None:
1111
      self.osparams = {}
1112

    
1113
    if self.ndparams is None:
1114
      self.ndparams = constants.NDC_DEFAULTS
1115

    
1116
    self.beparams = UpgradeGroupedParams(self.beparams,
1117
                                         constants.BEC_DEFAULTS)
1118
    migrate_default_bridge = not self.nicparams
1119
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1120
                                          constants.NICC_DEFAULTS)
1121
    if migrate_default_bridge:
1122
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1123
        self.default_bridge
1124

    
1125
    if self.modify_etc_hosts is None:
1126
      self.modify_etc_hosts = True
1127

    
1128
    if self.modify_ssh_setup is None:
1129
      self.modify_ssh_setup = True
1130

    
1131
    # default_bridge is no longer used in 2.1. The slot is left there to
1132
    # support auto-upgrading. It can be removed once we decide to deprecate
1133
    # upgrading straight from 2.0.
1134
    if self.default_bridge is not None:
1135
      self.default_bridge = None
1136

    
1137
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1138
    # code can be removed once upgrading straight from 2.0 is deprecated.
1139
    if self.default_hypervisor is not None:
1140
      self.enabled_hypervisors = ([self.default_hypervisor] +
1141
        [hvname for hvname in self.enabled_hypervisors
1142
         if hvname != self.default_hypervisor])
1143
      self.default_hypervisor = None
1144

    
1145
    # maintain_node_health added after 2.1.1
1146
    if self.maintain_node_health is None:
1147
      self.maintain_node_health = False
1148

    
1149
    if self.uid_pool is None:
1150
      self.uid_pool = []
1151

    
1152
    if self.default_iallocator is None:
1153
      self.default_iallocator = ""
1154

    
1155
    # reserved_lvs added before 2.2
1156
    if self.reserved_lvs is None:
1157
      self.reserved_lvs = []
1158

    
1159
    # hidden and blacklisted operating systems added before 2.2.1
1160
    if self.hidden_os is None:
1161
      self.hidden_os = []
1162

    
1163
    if self.blacklisted_os is None:
1164
      self.blacklisted_os = []
1165

    
1166
    # primary_ip_family added before 2.3
1167
    if self.primary_ip_family is None:
1168
      self.primary_ip_family = AF_INET
1169

    
1170
    if self.prealloc_wipe_disks is None:
1171
      self.prealloc_wipe_disks = False
1172

    
1173
    # shared_file_storage_dir added before 2.5
1174
    if self.shared_file_storage_dir is None:
1175
      self.shared_file_storage_dir = ""
1176

    
1177
    # Network management
1178
    if self.networks is None:
1179
      self.networks = {}
1180

    
1181
  def ToDict(self):
1182
    """Custom function for cluster.
1183

1184
    """
1185
    mydict = super(Cluster, self).ToDict()
1186
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1187
    return mydict
1188

    
1189
  @classmethod
1190
  def FromDict(cls, val):
1191
    """Custom function for cluster.
1192

1193
    """
1194
    obj = super(Cluster, cls).FromDict(val)
1195
    if not isinstance(obj.tcpudp_port_pool, set):
1196
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1197
    return obj
1198

    
1199
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1200
    """Get the default hypervisor parameters for the cluster.
1201

1202
    @param hypervisor: the hypervisor name
1203
    @param os_name: if specified, we'll also update the defaults for this OS
1204
    @param skip_keys: if passed, list of keys not to use
1205
    @return: the defaults dict
1206

1207
    """
1208
    if skip_keys is None:
1209
      skip_keys = []
1210

    
1211
    fill_stack = [self.hvparams.get(hypervisor, {})]
1212
    if os_name is not None:
1213
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1214
      fill_stack.append(os_hvp)
1215

    
1216
    ret_dict = {}
1217
    for o_dict in fill_stack:
1218
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1219

    
1220
    return ret_dict
1221

    
1222
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1223
    """Fill a given hvparams dict with cluster defaults.
1224

1225
    @type hv_name: string
1226
    @param hv_name: the hypervisor to use
1227
    @type os_name: string
1228
    @param os_name: the OS to use for overriding the hypervisor defaults
1229
    @type skip_globals: boolean
1230
    @param skip_globals: if True, the global hypervisor parameters will
1231
        not be filled
1232
    @rtype: dict
1233
    @return: a copy of the given hvparams with missing keys filled from
1234
        the cluster defaults
1235

1236
    """
1237
    if skip_globals:
1238
      skip_keys = constants.HVC_GLOBALS
1239
    else:
1240
      skip_keys = []
1241

    
1242
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1243
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1244

    
1245
  def FillHV(self, instance, skip_globals=False):
1246
    """Fill an instance's hvparams dict with cluster defaults.
1247

1248
    @type instance: L{objects.Instance}
1249
    @param instance: the instance parameter to fill
1250
    @type skip_globals: boolean
1251
    @param skip_globals: if True, the global hypervisor parameters will
1252
        not be filled
1253
    @rtype: dict
1254
    @return: a copy of the instance's hvparams with missing keys filled from
1255
        the cluster defaults
1256

1257
    """
1258
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1259
                             instance.hvparams, skip_globals)
1260

    
1261
  def SimpleFillBE(self, beparams):
1262
    """Fill a given beparams dict with cluster defaults.
1263

1264
    @type beparams: dict
1265
    @param beparams: the dict to fill
1266
    @rtype: dict
1267
    @return: a copy of the passed in beparams with missing keys filled
1268
        from the cluster defaults
1269

1270
    """
1271
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1272

    
1273
  def FillBE(self, instance):
1274
    """Fill an instance's beparams dict with cluster defaults.
1275

1276
    @type instance: L{objects.Instance}
1277
    @param instance: the instance parameter to fill
1278
    @rtype: dict
1279
    @return: a copy of the instance's beparams with missing keys filled from
1280
        the cluster defaults
1281

1282
    """
1283
    return self.SimpleFillBE(instance.beparams)
1284

    
1285
  def SimpleFillNIC(self, nicparams):
1286
    """Fill a given nicparams dict with cluster defaults.
1287

1288
    @type nicparams: dict
1289
    @param nicparams: the dict to fill
1290
    @rtype: dict
1291
    @return: a copy of the passed in nicparams with missing keys filled
1292
        from the cluster defaults
1293

1294
    """
1295
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1296

    
1297
  def SimpleFillOS(self, os_name, os_params):
1298
    """Fill an instance's osparams dict with cluster defaults.
1299

1300
    @type os_name: string
1301
    @param os_name: the OS name to use
1302
    @type os_params: dict
1303
    @param os_params: the dict to fill with default values
1304
    @rtype: dict
1305
    @return: a copy of the instance's osparams with missing keys filled from
1306
        the cluster defaults
1307

1308
    """
1309
    name_only = os_name.split("+", 1)[0]
1310
    # base OS
1311
    result = self.osparams.get(name_only, {})
1312
    # OS with variant
1313
    result = FillDict(result, self.osparams.get(os_name, {}))
1314
    # specified params
1315
    return FillDict(result, os_params)
1316

    
1317
  def FillND(self, node, nodegroup):
1318
    """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node}
1319

1320
    @type node: L{objects.Node}
1321
    @param node: A Node object to fill
1322
    @type nodegroup: L{objects.NodeGroup}
1323
    @param nodegroup: A Node object to fill
1324
    @return a copy of the node's ndparams with defaults filled
1325

1326
    """
1327
    return self.SimpleFillND(nodegroup.FillND(node))
1328

    
1329
  def SimpleFillND(self, ndparams):
1330
    """Fill a given ndparams dict with defaults.
1331

1332
    @type ndparams: dict
1333
    @param ndparams: the dict to fill
1334
    @rtype: dict
1335
    @return: a copy of the passed in ndparams with missing keys filled
1336
        from the cluster defaults
1337

1338
    """
1339
    return FillDict(self.ndparams, ndparams)
1340

    
1341

    
1342
class BlockDevStatus(ConfigObject):
1343
  """Config object representing the status of a block device."""
1344
  __slots__ = [
1345
    "dev_path",
1346
    "major",
1347
    "minor",
1348
    "sync_percent",
1349
    "estimated_time",
1350
    "is_degraded",
1351
    "ldisk_status",
1352
    ]
1353

    
1354

    
1355
class ImportExportStatus(ConfigObject):
1356
  """Config object representing the status of an import or export."""
1357
  __slots__ = [
1358
    "recent_output",
1359
    "listen_port",
1360
    "connected",
1361
    "progress_mbytes",
1362
    "progress_throughput",
1363
    "progress_eta",
1364
    "progress_percent",
1365
    "exit_status",
1366
    "error_message",
1367
    ] + _TIMESTAMPS
1368

    
1369

    
1370
class ImportExportOptions(ConfigObject):
1371
  """Options for import/export daemon
1372

1373
  @ivar key_name: X509 key name (None for cluster certificate)
1374
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1375
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1376
  @ivar magic: Used to ensure the connection goes to the right disk
1377
  @ivar ipv6: Whether to use IPv6
1378
  @ivar connect_timeout: Number of seconds for establishing connection
1379

1380
  """
1381
  __slots__ = [
1382
    "key_name",
1383
    "ca_pem",
1384
    "compress",
1385
    "magic",
1386
    "ipv6",
1387
    "connect_timeout",
1388
    ]
1389

    
1390

    
1391
class ConfdRequest(ConfigObject):
1392
  """Object holding a confd request.
1393

1394
  @ivar protocol: confd protocol version
1395
  @ivar type: confd query type
1396
  @ivar query: query request
1397
  @ivar rsalt: requested reply salt
1398

1399
  """
1400
  __slots__ = [
1401
    "protocol",
1402
    "type",
1403
    "query",
1404
    "rsalt",
1405
    ]
1406

    
1407

    
1408
class ConfdReply(ConfigObject):
1409
  """Object holding a confd reply.
1410

1411
  @ivar protocol: confd protocol version
1412
  @ivar status: reply status code (ok, error)
1413
  @ivar answer: confd query reply
1414
  @ivar serial: configuration serial number
1415

1416
  """
1417
  __slots__ = [
1418
    "protocol",
1419
    "status",
1420
    "answer",
1421
    "serial",
1422
    ]
1423

    
1424

    
1425
class QueryFieldDefinition(ConfigObject):
1426
  """Object holding a query field definition.
1427

1428
  @ivar name: Field name
1429
  @ivar title: Human-readable title
1430
  @ivar kind: Field type
1431

1432
  """
1433
  __slots__ = [
1434
    "name",
1435
    "title",
1436
    "kind",
1437
    ]
1438

    
1439

    
1440
class _QueryResponseBase(ConfigObject):
1441
  __slots__ = [
1442
    "fields",
1443
    ]
1444

    
1445
  def ToDict(self):
1446
    """Custom function for serializing.
1447

1448
    """
1449
    mydict = super(_QueryResponseBase, self).ToDict()
1450
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1451
    return mydict
1452

    
1453
  @classmethod
1454
  def FromDict(cls, val):
1455
    """Custom function for de-serializing.
1456

1457
    """
1458
    obj = super(_QueryResponseBase, cls).FromDict(val)
1459
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1460
    return obj
1461

    
1462

    
1463
class QueryRequest(ConfigObject):
1464
  """Object holding a query request.
1465

1466
  """
1467
  __slots__ = [
1468
    "what",
1469
    "fields",
1470
    "filter",
1471
    ]
1472

    
1473

    
1474
class QueryResponse(_QueryResponseBase):
1475
  """Object holding the response to a query.
1476

1477
  @ivar fields: List of L{QueryFieldDefinition} objects
1478
  @ivar data: Requested data
1479

1480
  """
1481
  __slots__ = [
1482
    "data",
1483
    ]
1484

    
1485

    
1486
class QueryFieldsRequest(ConfigObject):
1487
  """Object holding a request for querying available fields.
1488

1489
  """
1490
  __slots__ = [
1491
    "what",
1492
    "fields",
1493
    ]
1494

    
1495

    
1496
class QueryFieldsResponse(_QueryResponseBase):
1497
  """Object holding the response to a query for fields.
1498

1499
  @ivar fields: List of L{QueryFieldDefinition} objects
1500

1501
  """
1502
  __slots__ = [
1503
    ]
1504

    
1505

    
1506
class InstanceConsole(ConfigObject):
1507
  """Object describing how to access the console of an instance.
1508

1509
  """
1510
  __slots__ = [
1511
    "instance",
1512
    "kind",
1513
    "message",
1514
    "host",
1515
    "port",
1516
    "user",
1517
    "command",
1518
    "display",
1519
    ]
1520

    
1521
  def Validate(self):
1522
    """Validates contents of this object.
1523

1524
    """
1525
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1526
    assert self.instance, "Missing instance name"
1527
    assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1528
    assert self.host or self.kind == constants.CONS_MESSAGE
1529
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1530
                                      constants.CONS_SSH]
1531
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1532
                                      constants.CONS_VNC]
1533
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1534
                                         constants.CONS_VNC]
1535
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1536
                                         constants.CONS_SSH]
1537
    return True
1538

    
1539

    
1540
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1541
  """Simple wrapper over ConfigParse that allows serialization.
1542

1543
  This class is basically ConfigParser.SafeConfigParser with two
1544
  additional methods that allow it to serialize/unserialize to/from a
1545
  buffer.
1546

1547
  """
1548
  def Dumps(self):
1549
    """Dump this instance and return the string representation."""
1550
    buf = StringIO()
1551
    self.write(buf)
1552
    return buf.getvalue()
1553

    
1554
  @classmethod
1555
  def Loads(cls, data):
1556
    """Load data from a string."""
1557
    buf = StringIO(data)
1558
    cfp = cls()
1559
    cfp.readfp(buf)
1560
    return cfp