Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 4b97f902

History | View | Annotate | Download (43.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable-msg=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "serial_no",
327
    ] + _TIMESTAMPS
328

    
329
  def ToDict(self):
330
    """Custom function for top-level config data.
331

332
    This just replaces the list of instances, nodes and the cluster
333
    with standard python types.
334

335
    """
336
    mydict = super(ConfigData, self).ToDict()
337
    mydict["cluster"] = mydict["cluster"].ToDict()
338
    for key in "nodes", "instances", "nodegroups":
339
      mydict[key] = self._ContainerToDicts(mydict[key])
340

    
341
    return mydict
342

    
343
  @classmethod
344
  def FromDict(cls, val):
345
    """Custom function for top-level config data
346

347
    """
348
    obj = super(ConfigData, cls).FromDict(val)
349
    obj.cluster = Cluster.FromDict(obj.cluster)
350
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
353
    return obj
354

    
355
  def HasAnyDiskOfType(self, dev_type):
356
    """Check if in there is at disk of the given type in the configuration.
357

358
    @type dev_type: L{constants.LDS_BLOCK}
359
    @param dev_type: the type to look for
360
    @rtype: boolean
361
    @return: boolean indicating if a disk of the given type was found or not
362

363
    """
364
    for instance in self.instances.values():
365
      for disk in instance.disks:
366
        if disk.IsBasedOnDiskType(dev_type):
367
          return True
368
    return False
369

    
370
  def UpgradeConfig(self):
371
    """Fill defaults for missing configuration values.
372

373
    """
374
    self.cluster.UpgradeConfig()
375
    for node in self.nodes.values():
376
      node.UpgradeConfig()
377
    for instance in self.instances.values():
378
      instance.UpgradeConfig()
379
    if self.nodegroups is None:
380
      self.nodegroups = {}
381
    for nodegroup in self.nodegroups.values():
382
      nodegroup.UpgradeConfig()
383
    if self.cluster.drbd_usermode_helper is None:
384
      # To decide if we set an helper let's check if at least one instance has
385
      # a DRBD disk. This does not cover all the possible scenarios but it
386
      # gives a good approximation.
387
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
388
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389

    
390

    
391
class NIC(ConfigObject):
392
  """Config object representing a network card."""
393
  __slots__ = ["mac", "ip", "nicparams"]
394

    
395
  @classmethod
396
  def CheckParameterSyntax(cls, nicparams):
397
    """Check the given parameters for validity.
398

399
    @type nicparams:  dict
400
    @param nicparams: dictionary with parameter names/value
401
    @raise errors.ConfigurationError: when a parameter is not valid
402

403
    """
404
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
405
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
406
      raise errors.ConfigurationError(err)
407

    
408
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
409
        not nicparams[constants.NIC_LINK]):
410
      err = "Missing bridged nic link"
411
      raise errors.ConfigurationError(err)
412

    
413

    
414
class Disk(ConfigObject):
415
  """Config object representing a block device."""
416
  __slots__ = ["dev_type", "logical_id", "physical_id",
417
               "children", "iv_name", "size", "mode"]
418

    
419
  def CreateOnSecondary(self):
420
    """Test if this device needs to be created on a secondary node."""
421
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422

    
423
  def AssembleOnSecondary(self):
424
    """Test if this device needs to be assembled on a secondary node."""
425
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426

    
427
  def OpenOnSecondary(self):
428
    """Test if this device needs to be opened on a secondary node."""
429
    return self.dev_type in (constants.LD_LV,)
430

    
431
  def StaticDevPath(self):
432
    """Return the device path if this device type has a static one.
433

434
    Some devices (LVM for example) live always at the same /dev/ path,
435
    irrespective of their status. For such devices, we return this
436
    path, for others we return None.
437

438
    @warning: The path returned is not a normalized pathname; callers
439
        should check that it is a valid path.
440

441
    """
442
    if self.dev_type == constants.LD_LV:
443
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
444
    return None
445

    
446
  def ChildrenNeeded(self):
447
    """Compute the needed number of children for activation.
448

449
    This method will return either -1 (all children) or a positive
450
    number denoting the minimum number of children needed for
451
    activation (only mirrored devices will usually return >=0).
452

453
    Currently, only DRBD8 supports diskless activation (therefore we
454
    return 0), for all other we keep the previous semantics and return
455
    -1.
456

457
    """
458
    if self.dev_type == constants.LD_DRBD8:
459
      return 0
460
    return -1
461

    
462
  def IsBasedOnDiskType(self, dev_type):
463
    """Check if the disk or its children are based on the given type.
464

465
    @type dev_type: L{constants.LDS_BLOCK}
466
    @param dev_type: the type to look for
467
    @rtype: boolean
468
    @return: boolean indicating if a device of the given type was found or not
469

470
    """
471
    if self.children:
472
      for child in self.children:
473
        if child.IsBasedOnDiskType(dev_type):
474
          return True
475
    return self.dev_type == dev_type
476

    
477
  def GetNodes(self, node):
478
    """This function returns the nodes this device lives on.
479

480
    Given the node on which the parent of the device lives on (or, in
481
    case of a top-level device, the primary node of the devices'
482
    instance), this function will return a list of nodes on which this
483
    devices needs to (or can) be assembled.
484

485
    """
486
    if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
487
      result = [node]
488
    elif self.dev_type in constants.LDS_DRBD:
489
      result = [self.logical_id[0], self.logical_id[1]]
490
      if node not in result:
491
        raise errors.ConfigurationError("DRBD device passed unknown node")
492
    else:
493
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
494
    return result
495

    
496
  def ComputeNodeTree(self, parent_node):
497
    """Compute the node/disk tree for this disk and its children.
498

499
    This method, given the node on which the parent disk lives, will
500
    return the list of all (node, disk) pairs which describe the disk
501
    tree in the most compact way. For example, a drbd/lvm stack
502
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
503
    which represents all the top-level devices on the nodes.
504

505
    """
506
    my_nodes = self.GetNodes(parent_node)
507
    result = [(node, self) for node in my_nodes]
508
    if not self.children:
509
      # leaf device
510
      return result
511
    for node in my_nodes:
512
      for child in self.children:
513
        child_result = child.ComputeNodeTree(node)
514
        if len(child_result) == 1:
515
          # child (and all its descendants) is simple, doesn't split
516
          # over multiple hosts, so we don't need to describe it, our
517
          # own entry for this node describes it completely
518
          continue
519
        else:
520
          # check if child nodes differ from my nodes; note that
521
          # subdisk can differ from the child itself, and be instead
522
          # one of its descendants
523
          for subnode, subdisk in child_result:
524
            if subnode not in my_nodes:
525
              result.append((subnode, subdisk))
526
            # otherwise child is under our own node, so we ignore this
527
            # entry (but probably the other results in the list will
528
            # be different)
529
    return result
530

    
531
  def ComputeGrowth(self, amount):
532
    """Compute the per-VG growth requirements.
533

534
    This only works for VG-based disks.
535

536
    @type amount: integer
537
    @param amount: the desired increase in (user-visible) disk space
538
    @rtype: dict
539
    @return: a dictionary of volume-groups and the required size
540

541
    """
542
    if self.dev_type == constants.LD_LV:
543
      return {self.logical_id[0]: amount}
544
    elif self.dev_type == constants.LD_DRBD8:
545
      if self.children:
546
        return self.children[0].ComputeGrowth(amount)
547
      else:
548
        return {}
549
    else:
550
      # Other disk types do not require VG space
551
      return {}
552

    
553
  def RecordGrow(self, amount):
554
    """Update the size of this disk after growth.
555

556
    This method recurses over the disks's children and updates their
557
    size correspondigly. The method needs to be kept in sync with the
558
    actual algorithms from bdev.
559

560
    """
561
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
562
      self.size += amount
563
    elif self.dev_type == constants.LD_DRBD8:
564
      if self.children:
565
        self.children[0].RecordGrow(amount)
566
      self.size += amount
567
    else:
568
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
569
                                   " disk type %s" % self.dev_type)
570

    
571
  def UnsetSize(self):
572
    """Sets recursively the size to zero for the disk and its children.
573

574
    """
575
    if self.children:
576
      for child in self.children:
577
        child.UnsetSize()
578
    self.size = 0
579

    
580
  def SetPhysicalID(self, target_node, nodes_ip):
581
    """Convert the logical ID to the physical ID.
582

583
    This is used only for drbd, which needs ip/port configuration.
584

585
    The routine descends down and updates its children also, because
586
    this helps when the only the top device is passed to the remote
587
    node.
588

589
    Arguments:
590
      - target_node: the node we wish to configure for
591
      - nodes_ip: a mapping of node name to ip
592

593
    The target_node must exist in in nodes_ip, and must be one of the
594
    nodes in the logical ID for each of the DRBD devices encountered
595
    in the disk tree.
596

597
    """
598
    if self.children:
599
      for child in self.children:
600
        child.SetPhysicalID(target_node, nodes_ip)
601

    
602
    if self.logical_id is None and self.physical_id is not None:
603
      return
604
    if self.dev_type in constants.LDS_DRBD:
605
      pnode, snode, port, pminor, sminor, secret = self.logical_id
606
      if target_node not in (pnode, snode):
607
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
608
                                        target_node)
609
      pnode_ip = nodes_ip.get(pnode, None)
610
      snode_ip = nodes_ip.get(snode, None)
611
      if pnode_ip is None or snode_ip is None:
612
        raise errors.ConfigurationError("Can't find primary or secondary node"
613
                                        " for %s" % str(self))
614
      p_data = (pnode_ip, port)
615
      s_data = (snode_ip, port)
616
      if pnode == target_node:
617
        self.physical_id = p_data + s_data + (pminor, secret)
618
      else: # it must be secondary, we tested above
619
        self.physical_id = s_data + p_data + (sminor, secret)
620
    else:
621
      self.physical_id = self.logical_id
622
    return
623

    
624
  def ToDict(self):
625
    """Disk-specific conversion to standard python types.
626

627
    This replaces the children lists of objects with lists of
628
    standard python types.
629

630
    """
631
    bo = super(Disk, self).ToDict()
632

    
633
    for attr in ("children",):
634
      alist = bo.get(attr, None)
635
      if alist:
636
        bo[attr] = self._ContainerToDicts(alist)
637
    return bo
638

    
639
  @classmethod
640
  def FromDict(cls, val):
641
    """Custom function for Disks
642

643
    """
644
    obj = super(Disk, cls).FromDict(val)
645
    if obj.children:
646
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
647
    if obj.logical_id and isinstance(obj.logical_id, list):
648
      obj.logical_id = tuple(obj.logical_id)
649
    if obj.physical_id and isinstance(obj.physical_id, list):
650
      obj.physical_id = tuple(obj.physical_id)
651
    if obj.dev_type in constants.LDS_DRBD:
652
      # we need a tuple of length six here
653
      if len(obj.logical_id) < 6:
654
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
655
    return obj
656

    
657
  def __str__(self):
658
    """Custom str() formatter for disks.
659

660
    """
661
    if self.dev_type == constants.LD_LV:
662
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
663
    elif self.dev_type in constants.LDS_DRBD:
664
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
665
      val = "<DRBD8("
666
      if self.physical_id is None:
667
        phy = "unconfigured"
668
      else:
669
        phy = ("configured as %s:%s %s:%s" %
670
               (self.physical_id[0], self.physical_id[1],
671
                self.physical_id[2], self.physical_id[3]))
672

    
673
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
674
              (node_a, minor_a, node_b, minor_b, port, phy))
675
      if self.children and self.children.count(None) == 0:
676
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
677
      else:
678
        val += "no local storage"
679
    else:
680
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
681
             (self.dev_type, self.logical_id, self.physical_id, self.children))
682
    if self.iv_name is None:
683
      val += ", not visible"
684
    else:
685
      val += ", visible as /dev/%s" % self.iv_name
686
    if isinstance(self.size, int):
687
      val += ", size=%dm)>" % self.size
688
    else:
689
      val += ", size='%s')>" % (self.size,)
690
    return val
691

    
692
  def Verify(self):
693
    """Checks that this disk is correctly configured.
694

695
    """
696
    all_errors = []
697
    if self.mode not in constants.DISK_ACCESS_SET:
698
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
699
    return all_errors
700

    
701
  def UpgradeConfig(self):
702
    """Fill defaults for missing configuration values.
703

704
    """
705
    if self.children:
706
      for child in self.children:
707
        child.UpgradeConfig()
708
    # add here config upgrade for this disk
709

    
710

    
711
class Instance(TaggableObject):
712
  """Config object representing an instance."""
713
  __slots__ = [
714
    "name",
715
    "primary_node",
716
    "os",
717
    "hypervisor",
718
    "hvparams",
719
    "beparams",
720
    "osparams",
721
    "admin_up",
722
    "nics",
723
    "disks",
724
    "disk_template",
725
    "network_port",
726
    "serial_no",
727
    ] + _TIMESTAMPS + _UUID
728

    
729
  def _ComputeSecondaryNodes(self):
730
    """Compute the list of secondary nodes.
731

732
    This is a simple wrapper over _ComputeAllNodes.
733

734
    """
735
    all_nodes = set(self._ComputeAllNodes())
736
    all_nodes.discard(self.primary_node)
737
    return tuple(all_nodes)
738

    
739
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
740
                             "List of secondary nodes")
741

    
742
  def _ComputeAllNodes(self):
743
    """Compute the list of all nodes.
744

745
    Since the data is already there (in the drbd disks), keeping it as
746
    a separate normal attribute is redundant and if not properly
747
    synchronised can cause problems. Thus it's better to compute it
748
    dynamically.
749

750
    """
751
    def _Helper(nodes, device):
752
      """Recursively computes nodes given a top device."""
753
      if device.dev_type in constants.LDS_DRBD:
754
        nodea, nodeb = device.logical_id[:2]
755
        nodes.add(nodea)
756
        nodes.add(nodeb)
757
      if device.children:
758
        for child in device.children:
759
          _Helper(nodes, child)
760

    
761
    all_nodes = set()
762
    all_nodes.add(self.primary_node)
763
    for device in self.disks:
764
      _Helper(all_nodes, device)
765
    return tuple(all_nodes)
766

    
767
  all_nodes = property(_ComputeAllNodes, None, None,
768
                       "List of all nodes of the instance")
769

    
770
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
771
    """Provide a mapping of nodes to LVs this instance owns.
772

773
    This function figures out what logical volumes should belong on
774
    which nodes, recursing through a device tree.
775

776
    @param lvmap: optional dictionary to receive the
777
        'node' : ['lv', ...] data.
778

779
    @return: None if lvmap arg is given, otherwise, a dictionary of
780
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
781
        volumeN is of the form "vg_name/lv_name", compatible with
782
        GetVolumeList()
783

784
    """
785
    if node == None:
786
      node = self.primary_node
787

    
788
    if lvmap is None:
789
      lvmap = { node : [] }
790
      ret = lvmap
791
    else:
792
      if not node in lvmap:
793
        lvmap[node] = []
794
      ret = None
795

    
796
    if not devs:
797
      devs = self.disks
798

    
799
    for dev in devs:
800
      if dev.dev_type == constants.LD_LV:
801
        lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1])
802

    
803
      elif dev.dev_type in constants.LDS_DRBD:
804
        if dev.children:
805
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
806
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
807

    
808
      elif dev.children:
809
        self.MapLVsByNode(lvmap, dev.children, node)
810

    
811
    return ret
812

    
813
  def FindDisk(self, idx):
814
    """Find a disk given having a specified index.
815

816
    This is just a wrapper that does validation of the index.
817

818
    @type idx: int
819
    @param idx: the disk index
820
    @rtype: L{Disk}
821
    @return: the corresponding disk
822
    @raise errors.OpPrereqError: when the given index is not valid
823

824
    """
825
    try:
826
      idx = int(idx)
827
      return self.disks[idx]
828
    except (TypeError, ValueError), err:
829
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
830
                                 errors.ECODE_INVAL)
831
    except IndexError:
832
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
833
                                 " 0 to %d" % (idx, len(self.disks) - 1),
834
                                 errors.ECODE_INVAL)
835

    
836
  def ToDict(self):
837
    """Instance-specific conversion to standard python types.
838

839
    This replaces the children lists of objects with lists of standard
840
    python types.
841

842
    """
843
    bo = super(Instance, self).ToDict()
844

    
845
    for attr in "nics", "disks":
846
      alist = bo.get(attr, None)
847
      if alist:
848
        nlist = self._ContainerToDicts(alist)
849
      else:
850
        nlist = []
851
      bo[attr] = nlist
852
    return bo
853

    
854
  @classmethod
855
  def FromDict(cls, val):
856
    """Custom function for instances.
857

858
    """
859
    obj = super(Instance, cls).FromDict(val)
860
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
861
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
862
    return obj
863

    
864
  def UpgradeConfig(self):
865
    """Fill defaults for missing configuration values.
866

867
    """
868
    for nic in self.nics:
869
      nic.UpgradeConfig()
870
    for disk in self.disks:
871
      disk.UpgradeConfig()
872
    if self.hvparams:
873
      for key in constants.HVC_GLOBALS:
874
        try:
875
          del self.hvparams[key]
876
        except KeyError:
877
          pass
878
    if self.osparams is None:
879
      self.osparams = {}
880

    
881

    
882
class OS(ConfigObject):
883
  """Config object representing an operating system.
884

885
  @type supported_parameters: list
886
  @ivar supported_parameters: a list of tuples, name and description,
887
      containing the supported parameters by this OS
888

889
  @type VARIANT_DELIM: string
890
  @cvar VARIANT_DELIM: the variant delimiter
891

892
  """
893
  __slots__ = [
894
    "name",
895
    "path",
896
    "api_versions",
897
    "create_script",
898
    "export_script",
899
    "import_script",
900
    "rename_script",
901
    "verify_script",
902
    "supported_variants",
903
    "supported_parameters",
904
    ]
905

    
906
  VARIANT_DELIM = "+"
907

    
908
  @classmethod
909
  def SplitNameVariant(cls, name):
910
    """Splits the name into the proper name and variant.
911

912
    @param name: the OS (unprocessed) name
913
    @rtype: list
914
    @return: a list of two elements; if the original name didn't
915
        contain a variant, it's returned as an empty string
916

917
    """
918
    nv = name.split(cls.VARIANT_DELIM, 1)
919
    if len(nv) == 1:
920
      nv.append("")
921
    return nv
922

    
923
  @classmethod
924
  def GetName(cls, name):
925
    """Returns the proper name of the os (without the variant).
926

927
    @param name: the OS (unprocessed) name
928

929
    """
930
    return cls.SplitNameVariant(name)[0]
931

    
932
  @classmethod
933
  def GetVariant(cls, name):
934
    """Returns the variant the os (without the base name).
935

936
    @param name: the OS (unprocessed) name
937

938
    """
939
    return cls.SplitNameVariant(name)[1]
940

    
941

    
942
class Node(TaggableObject):
943
  """Config object representing a node."""
944
  __slots__ = [
945
    "name",
946
    "primary_ip",
947
    "secondary_ip",
948
    "serial_no",
949
    "master_candidate",
950
    "offline",
951
    "drained",
952
    "group",
953
    "master_capable",
954
    "vm_capable",
955
    "ndparams",
956
    "powered",
957
    ] + _TIMESTAMPS + _UUID
958

    
959
  def UpgradeConfig(self):
960
    """Fill defaults for missing configuration values.
961

962
    """
963
    # pylint: disable-msg=E0203
964
    # because these are "defined" via slots, not manually
965
    if self.master_capable is None:
966
      self.master_capable = True
967

    
968
    if self.vm_capable is None:
969
      self.vm_capable = True
970

    
971
    if self.ndparams is None:
972
      self.ndparams = {}
973

    
974
    if self.powered is None:
975
      self.powered = True
976

    
977

    
978
class NodeGroup(ConfigObject):
979
  """Config object representing a node group."""
980
  __slots__ = [
981
    "name",
982
    "members",
983
    "ndparams",
984
    "serial_no",
985
    "alloc_policy",
986
    ] + _TIMESTAMPS + _UUID
987

    
988
  def ToDict(self):
989
    """Custom function for nodegroup.
990

991
    This discards the members object, which gets recalculated and is only kept
992
    in memory.
993

994
    """
995
    mydict = super(NodeGroup, self).ToDict()
996
    del mydict["members"]
997
    return mydict
998

    
999
  @classmethod
1000
  def FromDict(cls, val):
1001
    """Custom function for nodegroup.
1002

1003
    The members slot is initialized to an empty list, upon deserialization.
1004

1005
    """
1006
    obj = super(NodeGroup, cls).FromDict(val)
1007
    obj.members = []
1008
    return obj
1009

    
1010
  def UpgradeConfig(self):
1011
    """Fill defaults for missing configuration values.
1012

1013
    """
1014
    if self.ndparams is None:
1015
      self.ndparams = {}
1016

    
1017
    if self.serial_no is None:
1018
      self.serial_no = 1
1019

    
1020
    if self.alloc_policy is None:
1021
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1022

    
1023
    # We only update mtime, and not ctime, since we would not be able to provide
1024
    # a correct value for creation time.
1025
    if self.mtime is None:
1026
      self.mtime = time.time()
1027

    
1028
  def FillND(self, node):
1029
    """Return filled out ndparams for L{object.Node}
1030

1031
    @type node: L{objects.Node}
1032
    @param node: A Node object to fill
1033
    @return a copy of the node's ndparams with defaults filled
1034

1035
    """
1036
    return self.SimpleFillND(node.ndparams)
1037

    
1038
  def SimpleFillND(self, ndparams):
1039
    """Fill a given ndparams dict with defaults.
1040

1041
    @type ndparams: dict
1042
    @param ndparams: the dict to fill
1043
    @rtype: dict
1044
    @return: a copy of the passed in ndparams with missing keys filled
1045
        from the node group defaults
1046

1047
    """
1048
    return FillDict(self.ndparams, ndparams)
1049

    
1050

    
1051
class Cluster(TaggableObject):
1052
  """Config object representing the cluster."""
1053
  __slots__ = [
1054
    "serial_no",
1055
    "rsahostkeypub",
1056
    "highest_used_port",
1057
    "tcpudp_port_pool",
1058
    "mac_prefix",
1059
    "volume_group_name",
1060
    "reserved_lvs",
1061
    "drbd_usermode_helper",
1062
    "default_bridge",
1063
    "default_hypervisor",
1064
    "master_node",
1065
    "master_ip",
1066
    "master_netdev",
1067
    "cluster_name",
1068
    "file_storage_dir",
1069
    "shared_file_storage_dir",
1070
    "enabled_hypervisors",
1071
    "hvparams",
1072
    "os_hvp",
1073
    "beparams",
1074
    "osparams",
1075
    "nicparams",
1076
    "ndparams",
1077
    "candidate_pool_size",
1078
    "modify_etc_hosts",
1079
    "modify_ssh_setup",
1080
    "maintain_node_health",
1081
    "uid_pool",
1082
    "default_iallocator",
1083
    "hidden_os",
1084
    "blacklisted_os",
1085
    "primary_ip_family",
1086
    "prealloc_wipe_disks",
1087
    ] + _TIMESTAMPS + _UUID
1088

    
1089
  def UpgradeConfig(self):
1090
    """Fill defaults for missing configuration values.
1091

1092
    """
1093
    # pylint: disable-msg=E0203
1094
    # because these are "defined" via slots, not manually
1095
    if self.hvparams is None:
1096
      self.hvparams = constants.HVC_DEFAULTS
1097
    else:
1098
      for hypervisor in self.hvparams:
1099
        self.hvparams[hypervisor] = FillDict(
1100
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1101

    
1102
    if self.os_hvp is None:
1103
      self.os_hvp = {}
1104

    
1105
    # osparams added before 2.2
1106
    if self.osparams is None:
1107
      self.osparams = {}
1108

    
1109
    if self.ndparams is None:
1110
      self.ndparams = constants.NDC_DEFAULTS
1111

    
1112
    self.beparams = UpgradeGroupedParams(self.beparams,
1113
                                         constants.BEC_DEFAULTS)
1114
    migrate_default_bridge = not self.nicparams
1115
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1116
                                          constants.NICC_DEFAULTS)
1117
    if migrate_default_bridge:
1118
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1119
        self.default_bridge
1120

    
1121
    if self.modify_etc_hosts is None:
1122
      self.modify_etc_hosts = True
1123

    
1124
    if self.modify_ssh_setup is None:
1125
      self.modify_ssh_setup = True
1126

    
1127
    # default_bridge is no longer used it 2.1. The slot is left there to
1128
    # support auto-upgrading. It can be removed once we decide to deprecate
1129
    # upgrading straight from 2.0.
1130
    if self.default_bridge is not None:
1131
      self.default_bridge = None
1132

    
1133
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1134
    # code can be removed once upgrading straight from 2.0 is deprecated.
1135
    if self.default_hypervisor is not None:
1136
      self.enabled_hypervisors = ([self.default_hypervisor] +
1137
        [hvname for hvname in self.enabled_hypervisors
1138
         if hvname != self.default_hypervisor])
1139
      self.default_hypervisor = None
1140

    
1141
    # maintain_node_health added after 2.1.1
1142
    if self.maintain_node_health is None:
1143
      self.maintain_node_health = False
1144

    
1145
    if self.uid_pool is None:
1146
      self.uid_pool = []
1147

    
1148
    if self.default_iallocator is None:
1149
      self.default_iallocator = ""
1150

    
1151
    # reserved_lvs added before 2.2
1152
    if self.reserved_lvs is None:
1153
      self.reserved_lvs = []
1154

    
1155
    # hidden and blacklisted operating systems added before 2.2.1
1156
    if self.hidden_os is None:
1157
      self.hidden_os = []
1158

    
1159
    if self.blacklisted_os is None:
1160
      self.blacklisted_os = []
1161

    
1162
    # primary_ip_family added before 2.3
1163
    if self.primary_ip_family is None:
1164
      self.primary_ip_family = AF_INET
1165

    
1166
    if self.prealloc_wipe_disks is None:
1167
      self.prealloc_wipe_disks = False
1168

    
1169
  def ToDict(self):
1170
    """Custom function for cluster.
1171

1172
    """
1173
    mydict = super(Cluster, self).ToDict()
1174
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1175
    return mydict
1176

    
1177
  @classmethod
1178
  def FromDict(cls, val):
1179
    """Custom function for cluster.
1180

1181
    """
1182
    obj = super(Cluster, cls).FromDict(val)
1183
    if not isinstance(obj.tcpudp_port_pool, set):
1184
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1185
    return obj
1186

    
1187
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1188
    """Get the default hypervisor parameters for the cluster.
1189

1190
    @param hypervisor: the hypervisor name
1191
    @param os_name: if specified, we'll also update the defaults for this OS
1192
    @param skip_keys: if passed, list of keys not to use
1193
    @return: the defaults dict
1194

1195
    """
1196
    if skip_keys is None:
1197
      skip_keys = []
1198

    
1199
    fill_stack = [self.hvparams.get(hypervisor, {})]
1200
    if os_name is not None:
1201
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1202
      fill_stack.append(os_hvp)
1203

    
1204
    ret_dict = {}
1205
    for o_dict in fill_stack:
1206
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1207

    
1208
    return ret_dict
1209

    
1210
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1211
    """Fill a given hvparams dict with cluster defaults.
1212

1213
    @type hv_name: string
1214
    @param hv_name: the hypervisor to use
1215
    @type os_name: string
1216
    @param os_name: the OS to use for overriding the hypervisor defaults
1217
    @type skip_globals: boolean
1218
    @param skip_globals: if True, the global hypervisor parameters will
1219
        not be filled
1220
    @rtype: dict
1221
    @return: a copy of the given hvparams with missing keys filled from
1222
        the cluster defaults
1223

1224
    """
1225
    if skip_globals:
1226
      skip_keys = constants.HVC_GLOBALS
1227
    else:
1228
      skip_keys = []
1229

    
1230
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1231
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1232

    
1233
  def FillHV(self, instance, skip_globals=False):
1234
    """Fill an instance's hvparams dict with cluster defaults.
1235

1236
    @type instance: L{objects.Instance}
1237
    @param instance: the instance parameter to fill
1238
    @type skip_globals: boolean
1239
    @param skip_globals: if True, the global hypervisor parameters will
1240
        not be filled
1241
    @rtype: dict
1242
    @return: a copy of the instance's hvparams with missing keys filled from
1243
        the cluster defaults
1244

1245
    """
1246
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1247
                             instance.hvparams, skip_globals)
1248

    
1249
  def SimpleFillBE(self, beparams):
1250
    """Fill a given beparams dict with cluster defaults.
1251

1252
    @type beparams: dict
1253
    @param beparams: the dict to fill
1254
    @rtype: dict
1255
    @return: a copy of the passed in beparams with missing keys filled
1256
        from the cluster defaults
1257

1258
    """
1259
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1260

    
1261
  def FillBE(self, instance):
1262
    """Fill an instance's beparams dict with cluster defaults.
1263

1264
    @type instance: L{objects.Instance}
1265
    @param instance: the instance parameter to fill
1266
    @rtype: dict
1267
    @return: a copy of the instance's beparams with missing keys filled from
1268
        the cluster defaults
1269

1270
    """
1271
    return self.SimpleFillBE(instance.beparams)
1272

    
1273
  def SimpleFillNIC(self, nicparams):
1274
    """Fill a given nicparams dict with cluster defaults.
1275

1276
    @type nicparams: dict
1277
    @param nicparams: the dict to fill
1278
    @rtype: dict
1279
    @return: a copy of the passed in nicparams with missing keys filled
1280
        from the cluster defaults
1281

1282
    """
1283
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1284

    
1285
  def SimpleFillOS(self, os_name, os_params):
1286
    """Fill an instance's osparams dict with cluster defaults.
1287

1288
    @type os_name: string
1289
    @param os_name: the OS name to use
1290
    @type os_params: dict
1291
    @param os_params: the dict to fill with default values
1292
    @rtype: dict
1293
    @return: a copy of the instance's osparams with missing keys filled from
1294
        the cluster defaults
1295

1296
    """
1297
    name_only = os_name.split("+", 1)[0]
1298
    # base OS
1299
    result = self.osparams.get(name_only, {})
1300
    # OS with variant
1301
    result = FillDict(result, self.osparams.get(os_name, {}))
1302
    # specified params
1303
    return FillDict(result, os_params)
1304

    
1305
  def FillND(self, node, nodegroup):
1306
    """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node}
1307

1308
    @type node: L{objects.Node}
1309
    @param node: A Node object to fill
1310
    @type nodegroup: L{objects.NodeGroup}
1311
    @param nodegroup: A Node object to fill
1312
    @return a copy of the node's ndparams with defaults filled
1313

1314
    """
1315
    return self.SimpleFillND(nodegroup.FillND(node))
1316

    
1317
  def SimpleFillND(self, ndparams):
1318
    """Fill a given ndparams dict with defaults.
1319

1320
    @type ndparams: dict
1321
    @param ndparams: the dict to fill
1322
    @rtype: dict
1323
    @return: a copy of the passed in ndparams with missing keys filled
1324
        from the cluster defaults
1325

1326
    """
1327
    return FillDict(self.ndparams, ndparams)
1328

    
1329

    
1330
class BlockDevStatus(ConfigObject):
1331
  """Config object representing the status of a block device."""
1332
  __slots__ = [
1333
    "dev_path",
1334
    "major",
1335
    "minor",
1336
    "sync_percent",
1337
    "estimated_time",
1338
    "is_degraded",
1339
    "ldisk_status",
1340
    ]
1341

    
1342

    
1343
class ImportExportStatus(ConfigObject):
1344
  """Config object representing the status of an import or export."""
1345
  __slots__ = [
1346
    "recent_output",
1347
    "listen_port",
1348
    "connected",
1349
    "progress_mbytes",
1350
    "progress_throughput",
1351
    "progress_eta",
1352
    "progress_percent",
1353
    "exit_status",
1354
    "error_message",
1355
    ] + _TIMESTAMPS
1356

    
1357

    
1358
class ImportExportOptions(ConfigObject):
1359
  """Options for import/export daemon
1360

1361
  @ivar key_name: X509 key name (None for cluster certificate)
1362
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1363
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1364
  @ivar magic: Used to ensure the connection goes to the right disk
1365
  @ivar ipv6: Whether to use IPv6
1366
  @ivar connect_timeout: Number of seconds for establishing connection
1367

1368
  """
1369
  __slots__ = [
1370
    "key_name",
1371
    "ca_pem",
1372
    "compress",
1373
    "magic",
1374
    "ipv6",
1375
    "connect_timeout",
1376
    ]
1377

    
1378

    
1379
class ConfdRequest(ConfigObject):
1380
  """Object holding a confd request.
1381

1382
  @ivar protocol: confd protocol version
1383
  @ivar type: confd query type
1384
  @ivar query: query request
1385
  @ivar rsalt: requested reply salt
1386

1387
  """
1388
  __slots__ = [
1389
    "protocol",
1390
    "type",
1391
    "query",
1392
    "rsalt",
1393
    ]
1394

    
1395

    
1396
class ConfdReply(ConfigObject):
1397
  """Object holding a confd reply.
1398

1399
  @ivar protocol: confd protocol version
1400
  @ivar status: reply status code (ok, error)
1401
  @ivar answer: confd query reply
1402
  @ivar serial: configuration serial number
1403

1404
  """
1405
  __slots__ = [
1406
    "protocol",
1407
    "status",
1408
    "answer",
1409
    "serial",
1410
    ]
1411

    
1412

    
1413
class QueryFieldDefinition(ConfigObject):
1414
  """Object holding a query field definition.
1415

1416
  @ivar name: Field name
1417
  @ivar title: Human-readable title
1418
  @ivar kind: Field type
1419
  @ivar doc: Human-readable description
1420

1421
  """
1422
  __slots__ = [
1423
    "name",
1424
    "title",
1425
    "kind",
1426
    "doc",
1427
    ]
1428

    
1429

    
1430
class _QueryResponseBase(ConfigObject):
1431
  __slots__ = [
1432
    "fields",
1433
    ]
1434

    
1435
  def ToDict(self):
1436
    """Custom function for serializing.
1437

1438
    """
1439
    mydict = super(_QueryResponseBase, self).ToDict()
1440
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1441
    return mydict
1442

    
1443
  @classmethod
1444
  def FromDict(cls, val):
1445
    """Custom function for de-serializing.
1446

1447
    """
1448
    obj = super(_QueryResponseBase, cls).FromDict(val)
1449
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1450
    return obj
1451

    
1452

    
1453
class QueryRequest(ConfigObject):
1454
  """Object holding a query request.
1455

1456
  """
1457
  __slots__ = [
1458
    "what",
1459
    "fields",
1460
    "filter",
1461
    ]
1462

    
1463

    
1464
class QueryResponse(_QueryResponseBase):
1465
  """Object holding the response to a query.
1466

1467
  @ivar fields: List of L{QueryFieldDefinition} objects
1468
  @ivar data: Requested data
1469

1470
  """
1471
  __slots__ = [
1472
    "data",
1473
    ]
1474

    
1475

    
1476
class QueryFieldsRequest(ConfigObject):
1477
  """Object holding a request for querying available fields.
1478

1479
  """
1480
  __slots__ = [
1481
    "what",
1482
    "fields",
1483
    ]
1484

    
1485

    
1486
class QueryFieldsResponse(_QueryResponseBase):
1487
  """Object holding the response to a query for fields.
1488

1489
  @ivar fields: List of L{QueryFieldDefinition} objects
1490

1491
  """
1492
  __slots__ = [
1493
    ]
1494

    
1495

    
1496
class InstanceConsole(ConfigObject):
1497
  """Object describing how to access the console of an instance.
1498

1499
  """
1500
  __slots__ = [
1501
    "instance",
1502
    "kind",
1503
    "message",
1504
    "host",
1505
    "port",
1506
    "user",
1507
    "command",
1508
    "display",
1509
    ]
1510

    
1511
  def Validate(self):
1512
    """Validates contents of this object.
1513

1514
    """
1515
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1516
    assert self.instance, "Missing instance name"
1517
    assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1518
    assert self.host or self.kind == constants.CONS_MESSAGE
1519
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1520
                                      constants.CONS_SSH]
1521
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1522
                                      constants.CONS_VNC]
1523
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1524
                                         constants.CONS_VNC]
1525
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1526
                                         constants.CONS_SSH]
1527
    return True
1528

    
1529

    
1530
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1531
  """Simple wrapper over ConfigParse that allows serialization.
1532

1533
  This class is basically ConfigParser.SafeConfigParser with two
1534
  additional methods that allow it to serialize/unserialize to/from a
1535
  buffer.
1536

1537
  """
1538
  def Dumps(self):
1539
    """Dump this instance and return the string representation."""
1540
    buf = StringIO()
1541
    self.write(buf)
1542
    return buf.getvalue()
1543

    
1544
  @classmethod
1545
  def Loads(cls, data):
1546
    """Load data from a string."""
1547
    buf = StringIO(data)
1548
    cfp = cls()
1549
    cfp.readfp(buf)
1550
    return cfp