Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ b6135bbc

History | View | Annotate | Download (43.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable-msg=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "serial_no",
327
    ] + _TIMESTAMPS
328

    
329
  def ToDict(self):
330
    """Custom function for top-level config data.
331

332
    This just replaces the list of instances, nodes and the cluster
333
    with standard python types.
334

335
    """
336
    mydict = super(ConfigData, self).ToDict()
337
    mydict["cluster"] = mydict["cluster"].ToDict()
338
    for key in "nodes", "instances", "nodegroups":
339
      mydict[key] = self._ContainerToDicts(mydict[key])
340

    
341
    return mydict
342

    
343
  @classmethod
344
  def FromDict(cls, val):
345
    """Custom function for top-level config data
346

347
    """
348
    obj = super(ConfigData, cls).FromDict(val)
349
    obj.cluster = Cluster.FromDict(obj.cluster)
350
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
353
    return obj
354

    
355
  def HasAnyDiskOfType(self, dev_type):
356
    """Check if in there is at disk of the given type in the configuration.
357

358
    @type dev_type: L{constants.LDS_BLOCK}
359
    @param dev_type: the type to look for
360
    @rtype: boolean
361
    @return: boolean indicating if a disk of the given type was found or not
362

363
    """
364
    for instance in self.instances.values():
365
      for disk in instance.disks:
366
        if disk.IsBasedOnDiskType(dev_type):
367
          return True
368
    return False
369

    
370
  def UpgradeConfig(self):
371
    """Fill defaults for missing configuration values.
372

373
    """
374
    self.cluster.UpgradeConfig()
375
    for node in self.nodes.values():
376
      node.UpgradeConfig()
377
    for instance in self.instances.values():
378
      instance.UpgradeConfig()
379
    if self.nodegroups is None:
380
      self.nodegroups = {}
381
    for nodegroup in self.nodegroups.values():
382
      nodegroup.UpgradeConfig()
383
    if self.cluster.drbd_usermode_helper is None:
384
      # To decide if we set an helper let's check if at least one instance has
385
      # a DRBD disk. This does not cover all the possible scenarios but it
386
      # gives a good approximation.
387
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
388
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389

    
390

    
391
class NIC(ConfigObject):
392
  """Config object representing a network card."""
393
  __slots__ = ["mac", "ip", "nicparams"]
394

    
395
  @classmethod
396
  def CheckParameterSyntax(cls, nicparams):
397
    """Check the given parameters for validity.
398

399
    @type nicparams:  dict
400
    @param nicparams: dictionary with parameter names/value
401
    @raise errors.ConfigurationError: when a parameter is not valid
402

403
    """
404
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
405
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
406
      raise errors.ConfigurationError(err)
407

    
408
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
409
        not nicparams[constants.NIC_LINK]):
410
      err = "Missing bridged nic link"
411
      raise errors.ConfigurationError(err)
412

    
413

    
414
class Disk(ConfigObject):
415
  """Config object representing a block device."""
416
  __slots__ = ["dev_type", "logical_id", "physical_id",
417
               "children", "iv_name", "size", "mode"]
418

    
419
  def CreateOnSecondary(self):
420
    """Test if this device needs to be created on a secondary node."""
421
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422

    
423
  def AssembleOnSecondary(self):
424
    """Test if this device needs to be assembled on a secondary node."""
425
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426

    
427
  def OpenOnSecondary(self):
428
    """Test if this device needs to be opened on a secondary node."""
429
    return self.dev_type in (constants.LD_LV,)
430

    
431
  def StaticDevPath(self):
432
    """Return the device path if this device type has a static one.
433

434
    Some devices (LVM for example) live always at the same /dev/ path,
435
    irrespective of their status. For such devices, we return this
436
    path, for others we return None.
437

438
    @warning: The path returned is not a normalized pathname; callers
439
        should check that it is a valid path.
440

441
    """
442
    if self.dev_type == constants.LD_LV:
443
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
444
    elif self.dev_type == constants.LD_BLOCKDEV:
445
      return self.logical_id[1]
446
    return None
447

    
448
  def ChildrenNeeded(self):
449
    """Compute the needed number of children for activation.
450

451
    This method will return either -1 (all children) or a positive
452
    number denoting the minimum number of children needed for
453
    activation (only mirrored devices will usually return >=0).
454

455
    Currently, only DRBD8 supports diskless activation (therefore we
456
    return 0), for all other we keep the previous semantics and return
457
    -1.
458

459
    """
460
    if self.dev_type == constants.LD_DRBD8:
461
      return 0
462
    return -1
463

    
464
  def IsBasedOnDiskType(self, dev_type):
465
    """Check if the disk or its children are based on the given type.
466

467
    @type dev_type: L{constants.LDS_BLOCK}
468
    @param dev_type: the type to look for
469
    @rtype: boolean
470
    @return: boolean indicating if a device of the given type was found or not
471

472
    """
473
    if self.children:
474
      for child in self.children:
475
        if child.IsBasedOnDiskType(dev_type):
476
          return True
477
    return self.dev_type == dev_type
478

    
479
  def GetNodes(self, node):
480
    """This function returns the nodes this device lives on.
481

482
    Given the node on which the parent of the device lives on (or, in
483
    case of a top-level device, the primary node of the devices'
484
    instance), this function will return a list of nodes on which this
485
    devices needs to (or can) be assembled.
486

487
    """
488
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
489
                         constants.LD_BLOCKDEV]:
490
      result = [node]
491
    elif self.dev_type in constants.LDS_DRBD:
492
      result = [self.logical_id[0], self.logical_id[1]]
493
      if node not in result:
494
        raise errors.ConfigurationError("DRBD device passed unknown node")
495
    else:
496
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
497
    return result
498

    
499
  def ComputeNodeTree(self, parent_node):
500
    """Compute the node/disk tree for this disk and its children.
501

502
    This method, given the node on which the parent disk lives, will
503
    return the list of all (node, disk) pairs which describe the disk
504
    tree in the most compact way. For example, a drbd/lvm stack
505
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
506
    which represents all the top-level devices on the nodes.
507

508
    """
509
    my_nodes = self.GetNodes(parent_node)
510
    result = [(node, self) for node in my_nodes]
511
    if not self.children:
512
      # leaf device
513
      return result
514
    for node in my_nodes:
515
      for child in self.children:
516
        child_result = child.ComputeNodeTree(node)
517
        if len(child_result) == 1:
518
          # child (and all its descendants) is simple, doesn't split
519
          # over multiple hosts, so we don't need to describe it, our
520
          # own entry for this node describes it completely
521
          continue
522
        else:
523
          # check if child nodes differ from my nodes; note that
524
          # subdisk can differ from the child itself, and be instead
525
          # one of its descendants
526
          for subnode, subdisk in child_result:
527
            if subnode not in my_nodes:
528
              result.append((subnode, subdisk))
529
            # otherwise child is under our own node, so we ignore this
530
            # entry (but probably the other results in the list will
531
            # be different)
532
    return result
533

    
534
  def ComputeGrowth(self, amount):
535
    """Compute the per-VG growth requirements.
536

537
    This only works for VG-based disks.
538

539
    @type amount: integer
540
    @param amount: the desired increase in (user-visible) disk space
541
    @rtype: dict
542
    @return: a dictionary of volume-groups and the required size
543

544
    """
545
    if self.dev_type == constants.LD_LV:
546
      return {self.logical_id[0]: amount}
547
    elif self.dev_type == constants.LD_DRBD8:
548
      if self.children:
549
        return self.children[0].ComputeGrowth(amount)
550
      else:
551
        return {}
552
    else:
553
      # Other disk types do not require VG space
554
      return {}
555

    
556
  def RecordGrow(self, amount):
557
    """Update the size of this disk after growth.
558

559
    This method recurses over the disks's children and updates their
560
    size correspondigly. The method needs to be kept in sync with the
561
    actual algorithms from bdev.
562

563
    """
564
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
565
      self.size += amount
566
    elif self.dev_type == constants.LD_DRBD8:
567
      if self.children:
568
        self.children[0].RecordGrow(amount)
569
      self.size += amount
570
    else:
571
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
572
                                   " disk type %s" % self.dev_type)
573

    
574
  def UnsetSize(self):
575
    """Sets recursively the size to zero for the disk and its children.
576

577
    """
578
    if self.children:
579
      for child in self.children:
580
        child.UnsetSize()
581
    self.size = 0
582

    
583
  def SetPhysicalID(self, target_node, nodes_ip):
584
    """Convert the logical ID to the physical ID.
585

586
    This is used only for drbd, which needs ip/port configuration.
587

588
    The routine descends down and updates its children also, because
589
    this helps when the only the top device is passed to the remote
590
    node.
591

592
    Arguments:
593
      - target_node: the node we wish to configure for
594
      - nodes_ip: a mapping of node name to ip
595

596
    The target_node must exist in in nodes_ip, and must be one of the
597
    nodes in the logical ID for each of the DRBD devices encountered
598
    in the disk tree.
599

600
    """
601
    if self.children:
602
      for child in self.children:
603
        child.SetPhysicalID(target_node, nodes_ip)
604

    
605
    if self.logical_id is None and self.physical_id is not None:
606
      return
607
    if self.dev_type in constants.LDS_DRBD:
608
      pnode, snode, port, pminor, sminor, secret = self.logical_id
609
      if target_node not in (pnode, snode):
610
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
611
                                        target_node)
612
      pnode_ip = nodes_ip.get(pnode, None)
613
      snode_ip = nodes_ip.get(snode, None)
614
      if pnode_ip is None or snode_ip is None:
615
        raise errors.ConfigurationError("Can't find primary or secondary node"
616
                                        " for %s" % str(self))
617
      p_data = (pnode_ip, port)
618
      s_data = (snode_ip, port)
619
      if pnode == target_node:
620
        self.physical_id = p_data + s_data + (pminor, secret)
621
      else: # it must be secondary, we tested above
622
        self.physical_id = s_data + p_data + (sminor, secret)
623
    else:
624
      self.physical_id = self.logical_id
625
    return
626

    
627
  def ToDict(self):
628
    """Disk-specific conversion to standard python types.
629

630
    This replaces the children lists of objects with lists of
631
    standard python types.
632

633
    """
634
    bo = super(Disk, self).ToDict()
635

    
636
    for attr in ("children",):
637
      alist = bo.get(attr, None)
638
      if alist:
639
        bo[attr] = self._ContainerToDicts(alist)
640
    return bo
641

    
642
  @classmethod
643
  def FromDict(cls, val):
644
    """Custom function for Disks
645

646
    """
647
    obj = super(Disk, cls).FromDict(val)
648
    if obj.children:
649
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
650
    if obj.logical_id and isinstance(obj.logical_id, list):
651
      obj.logical_id = tuple(obj.logical_id)
652
    if obj.physical_id and isinstance(obj.physical_id, list):
653
      obj.physical_id = tuple(obj.physical_id)
654
    if obj.dev_type in constants.LDS_DRBD:
655
      # we need a tuple of length six here
656
      if len(obj.logical_id) < 6:
657
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
658
    return obj
659

    
660
  def __str__(self):
661
    """Custom str() formatter for disks.
662

663
    """
664
    if self.dev_type == constants.LD_LV:
665
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
666
    elif self.dev_type in constants.LDS_DRBD:
667
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
668
      val = "<DRBD8("
669
      if self.physical_id is None:
670
        phy = "unconfigured"
671
      else:
672
        phy = ("configured as %s:%s %s:%s" %
673
               (self.physical_id[0], self.physical_id[1],
674
                self.physical_id[2], self.physical_id[3]))
675

    
676
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
677
              (node_a, minor_a, node_b, minor_b, port, phy))
678
      if self.children and self.children.count(None) == 0:
679
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
680
      else:
681
        val += "no local storage"
682
    else:
683
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
684
             (self.dev_type, self.logical_id, self.physical_id, self.children))
685
    if self.iv_name is None:
686
      val += ", not visible"
687
    else:
688
      val += ", visible as /dev/%s" % self.iv_name
689
    if isinstance(self.size, int):
690
      val += ", size=%dm)>" % self.size
691
    else:
692
      val += ", size='%s')>" % (self.size,)
693
    return val
694

    
695
  def Verify(self):
696
    """Checks that this disk is correctly configured.
697

698
    """
699
    all_errors = []
700
    if self.mode not in constants.DISK_ACCESS_SET:
701
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
702
    return all_errors
703

    
704
  def UpgradeConfig(self):
705
    """Fill defaults for missing configuration values.
706

707
    """
708
    if self.children:
709
      for child in self.children:
710
        child.UpgradeConfig()
711
    # add here config upgrade for this disk
712

    
713

    
714
class Instance(TaggableObject):
715
  """Config object representing an instance."""
716
  __slots__ = [
717
    "name",
718
    "primary_node",
719
    "os",
720
    "hypervisor",
721
    "hvparams",
722
    "beparams",
723
    "osparams",
724
    "admin_up",
725
    "nics",
726
    "disks",
727
    "disk_template",
728
    "network_port",
729
    "serial_no",
730
    ] + _TIMESTAMPS + _UUID
731

    
732
  def _ComputeSecondaryNodes(self):
733
    """Compute the list of secondary nodes.
734

735
    This is a simple wrapper over _ComputeAllNodes.
736

737
    """
738
    all_nodes = set(self._ComputeAllNodes())
739
    all_nodes.discard(self.primary_node)
740
    return tuple(all_nodes)
741

    
742
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
743
                             "List of secondary nodes")
744

    
745
  def _ComputeAllNodes(self):
746
    """Compute the list of all nodes.
747

748
    Since the data is already there (in the drbd disks), keeping it as
749
    a separate normal attribute is redundant and if not properly
750
    synchronised can cause problems. Thus it's better to compute it
751
    dynamically.
752

753
    """
754
    def _Helper(nodes, device):
755
      """Recursively computes nodes given a top device."""
756
      if device.dev_type in constants.LDS_DRBD:
757
        nodea, nodeb = device.logical_id[:2]
758
        nodes.add(nodea)
759
        nodes.add(nodeb)
760
      if device.children:
761
        for child in device.children:
762
          _Helper(nodes, child)
763

    
764
    all_nodes = set()
765
    all_nodes.add(self.primary_node)
766
    for device in self.disks:
767
      _Helper(all_nodes, device)
768
    return tuple(all_nodes)
769

    
770
  all_nodes = property(_ComputeAllNodes, None, None,
771
                       "List of all nodes of the instance")
772

    
773
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
774
    """Provide a mapping of nodes to LVs this instance owns.
775

776
    This function figures out what logical volumes should belong on
777
    which nodes, recursing through a device tree.
778

779
    @param lvmap: optional dictionary to receive the
780
        'node' : ['lv', ...] data.
781

782
    @return: None if lvmap arg is given, otherwise, a dictionary of
783
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
784
        volumeN is of the form "vg_name/lv_name", compatible with
785
        GetVolumeList()
786

787
    """
788
    if node == None:
789
      node = self.primary_node
790

    
791
    if lvmap is None:
792
      lvmap = { node : [] }
793
      ret = lvmap
794
    else:
795
      if not node in lvmap:
796
        lvmap[node] = []
797
      ret = None
798

    
799
    if not devs:
800
      devs = self.disks
801

    
802
    for dev in devs:
803
      if dev.dev_type == constants.LD_LV:
804
        lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1])
805

    
806
      elif dev.dev_type in constants.LDS_DRBD:
807
        if dev.children:
808
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
809
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
810

    
811
      elif dev.children:
812
        self.MapLVsByNode(lvmap, dev.children, node)
813

    
814
    return ret
815

    
816
  def FindDisk(self, idx):
817
    """Find a disk given having a specified index.
818

819
    This is just a wrapper that does validation of the index.
820

821
    @type idx: int
822
    @param idx: the disk index
823
    @rtype: L{Disk}
824
    @return: the corresponding disk
825
    @raise errors.OpPrereqError: when the given index is not valid
826

827
    """
828
    try:
829
      idx = int(idx)
830
      return self.disks[idx]
831
    except (TypeError, ValueError), err:
832
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
833
                                 errors.ECODE_INVAL)
834
    except IndexError:
835
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
836
                                 " 0 to %d" % (idx, len(self.disks) - 1),
837
                                 errors.ECODE_INVAL)
838

    
839
  def ToDict(self):
840
    """Instance-specific conversion to standard python types.
841

842
    This replaces the children lists of objects with lists of standard
843
    python types.
844

845
    """
846
    bo = super(Instance, self).ToDict()
847

    
848
    for attr in "nics", "disks":
849
      alist = bo.get(attr, None)
850
      if alist:
851
        nlist = self._ContainerToDicts(alist)
852
      else:
853
        nlist = []
854
      bo[attr] = nlist
855
    return bo
856

    
857
  @classmethod
858
  def FromDict(cls, val):
859
    """Custom function for instances.
860

861
    """
862
    obj = super(Instance, cls).FromDict(val)
863
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
864
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
865
    return obj
866

    
867
  def UpgradeConfig(self):
868
    """Fill defaults for missing configuration values.
869

870
    """
871
    for nic in self.nics:
872
      nic.UpgradeConfig()
873
    for disk in self.disks:
874
      disk.UpgradeConfig()
875
    if self.hvparams:
876
      for key in constants.HVC_GLOBALS:
877
        try:
878
          del self.hvparams[key]
879
        except KeyError:
880
          pass
881
    if self.osparams is None:
882
      self.osparams = {}
883

    
884

    
885
class OS(ConfigObject):
886
  """Config object representing an operating system.
887

888
  @type supported_parameters: list
889
  @ivar supported_parameters: a list of tuples, name and description,
890
      containing the supported parameters by this OS
891

892
  @type VARIANT_DELIM: string
893
  @cvar VARIANT_DELIM: the variant delimiter
894

895
  """
896
  __slots__ = [
897
    "name",
898
    "path",
899
    "api_versions",
900
    "create_script",
901
    "export_script",
902
    "import_script",
903
    "rename_script",
904
    "verify_script",
905
    "supported_variants",
906
    "supported_parameters",
907
    ]
908

    
909
  VARIANT_DELIM = "+"
910

    
911
  @classmethod
912
  def SplitNameVariant(cls, name):
913
    """Splits the name into the proper name and variant.
914

915
    @param name: the OS (unprocessed) name
916
    @rtype: list
917
    @return: a list of two elements; if the original name didn't
918
        contain a variant, it's returned as an empty string
919

920
    """
921
    nv = name.split(cls.VARIANT_DELIM, 1)
922
    if len(nv) == 1:
923
      nv.append("")
924
    return nv
925

    
926
  @classmethod
927
  def GetName(cls, name):
928
    """Returns the proper name of the os (without the variant).
929

930
    @param name: the OS (unprocessed) name
931

932
    """
933
    return cls.SplitNameVariant(name)[0]
934

    
935
  @classmethod
936
  def GetVariant(cls, name):
937
    """Returns the variant the os (without the base name).
938

939
    @param name: the OS (unprocessed) name
940

941
    """
942
    return cls.SplitNameVariant(name)[1]
943

    
944

    
945
class Node(TaggableObject):
946
  """Config object representing a node."""
947
  __slots__ = [
948
    "name",
949
    "primary_ip",
950
    "secondary_ip",
951
    "serial_no",
952
    "master_candidate",
953
    "offline",
954
    "drained",
955
    "group",
956
    "master_capable",
957
    "vm_capable",
958
    "ndparams",
959
    "powered",
960
    ] + _TIMESTAMPS + _UUID
961

    
962
  def UpgradeConfig(self):
963
    """Fill defaults for missing configuration values.
964

965
    """
966
    # pylint: disable-msg=E0203
967
    # because these are "defined" via slots, not manually
968
    if self.master_capable is None:
969
      self.master_capable = True
970

    
971
    if self.vm_capable is None:
972
      self.vm_capable = True
973

    
974
    if self.ndparams is None:
975
      self.ndparams = {}
976

    
977
    if self.powered is None:
978
      self.powered = True
979

    
980

    
981
class NodeGroup(ConfigObject):
982
  """Config object representing a node group."""
983
  __slots__ = [
984
    "name",
985
    "members",
986
    "ndparams",
987
    "serial_no",
988
    "alloc_policy",
989
    ] + _TIMESTAMPS + _UUID
990

    
991
  def ToDict(self):
992
    """Custom function for nodegroup.
993

994
    This discards the members object, which gets recalculated and is only kept
995
    in memory.
996

997
    """
998
    mydict = super(NodeGroup, self).ToDict()
999
    del mydict["members"]
1000
    return mydict
1001

    
1002
  @classmethod
1003
  def FromDict(cls, val):
1004
    """Custom function for nodegroup.
1005

1006
    The members slot is initialized to an empty list, upon deserialization.
1007

1008
    """
1009
    obj = super(NodeGroup, cls).FromDict(val)
1010
    obj.members = []
1011
    return obj
1012

    
1013
  def UpgradeConfig(self):
1014
    """Fill defaults for missing configuration values.
1015

1016
    """
1017
    if self.ndparams is None:
1018
      self.ndparams = {}
1019

    
1020
    if self.serial_no is None:
1021
      self.serial_no = 1
1022

    
1023
    if self.alloc_policy is None:
1024
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1025

    
1026
    # We only update mtime, and not ctime, since we would not be able to provide
1027
    # a correct value for creation time.
1028
    if self.mtime is None:
1029
      self.mtime = time.time()
1030

    
1031
  def FillND(self, node):
1032
    """Return filled out ndparams for L{object.Node}
1033

1034
    @type node: L{objects.Node}
1035
    @param node: A Node object to fill
1036
    @return a copy of the node's ndparams with defaults filled
1037

1038
    """
1039
    return self.SimpleFillND(node.ndparams)
1040

    
1041
  def SimpleFillND(self, ndparams):
1042
    """Fill a given ndparams dict with defaults.
1043

1044
    @type ndparams: dict
1045
    @param ndparams: the dict to fill
1046
    @rtype: dict
1047
    @return: a copy of the passed in ndparams with missing keys filled
1048
        from the node group defaults
1049

1050
    """
1051
    return FillDict(self.ndparams, ndparams)
1052

    
1053

    
1054
class Cluster(TaggableObject):
1055
  """Config object representing the cluster."""
1056
  __slots__ = [
1057
    "serial_no",
1058
    "rsahostkeypub",
1059
    "highest_used_port",
1060
    "tcpudp_port_pool",
1061
    "mac_prefix",
1062
    "volume_group_name",
1063
    "reserved_lvs",
1064
    "drbd_usermode_helper",
1065
    "default_bridge",
1066
    "default_hypervisor",
1067
    "master_node",
1068
    "master_ip",
1069
    "master_netdev",
1070
    "cluster_name",
1071
    "file_storage_dir",
1072
    "shared_file_storage_dir",
1073
    "enabled_hypervisors",
1074
    "hvparams",
1075
    "os_hvp",
1076
    "beparams",
1077
    "osparams",
1078
    "nicparams",
1079
    "ndparams",
1080
    "candidate_pool_size",
1081
    "modify_etc_hosts",
1082
    "modify_ssh_setup",
1083
    "maintain_node_health",
1084
    "uid_pool",
1085
    "default_iallocator",
1086
    "hidden_os",
1087
    "blacklisted_os",
1088
    "primary_ip_family",
1089
    "prealloc_wipe_disks",
1090
    ] + _TIMESTAMPS + _UUID
1091

    
1092
  def UpgradeConfig(self):
1093
    """Fill defaults for missing configuration values.
1094

1095
    """
1096
    # pylint: disable-msg=E0203
1097
    # because these are "defined" via slots, not manually
1098
    if self.hvparams is None:
1099
      self.hvparams = constants.HVC_DEFAULTS
1100
    else:
1101
      for hypervisor in self.hvparams:
1102
        self.hvparams[hypervisor] = FillDict(
1103
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1104

    
1105
    if self.os_hvp is None:
1106
      self.os_hvp = {}
1107

    
1108
    # osparams added before 2.2
1109
    if self.osparams is None:
1110
      self.osparams = {}
1111

    
1112
    if self.ndparams is None:
1113
      self.ndparams = constants.NDC_DEFAULTS
1114

    
1115
    self.beparams = UpgradeGroupedParams(self.beparams,
1116
                                         constants.BEC_DEFAULTS)
1117
    migrate_default_bridge = not self.nicparams
1118
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1119
                                          constants.NICC_DEFAULTS)
1120
    if migrate_default_bridge:
1121
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1122
        self.default_bridge
1123

    
1124
    if self.modify_etc_hosts is None:
1125
      self.modify_etc_hosts = True
1126

    
1127
    if self.modify_ssh_setup is None:
1128
      self.modify_ssh_setup = True
1129

    
1130
    # default_bridge is no longer used in 2.1. The slot is left there to
1131
    # support auto-upgrading. It can be removed once we decide to deprecate
1132
    # upgrading straight from 2.0.
1133
    if self.default_bridge is not None:
1134
      self.default_bridge = None
1135

    
1136
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1137
    # code can be removed once upgrading straight from 2.0 is deprecated.
1138
    if self.default_hypervisor is not None:
1139
      self.enabled_hypervisors = ([self.default_hypervisor] +
1140
        [hvname for hvname in self.enabled_hypervisors
1141
         if hvname != self.default_hypervisor])
1142
      self.default_hypervisor = None
1143

    
1144
    # maintain_node_health added after 2.1.1
1145
    if self.maintain_node_health is None:
1146
      self.maintain_node_health = False
1147

    
1148
    if self.uid_pool is None:
1149
      self.uid_pool = []
1150

    
1151
    if self.default_iallocator is None:
1152
      self.default_iallocator = ""
1153

    
1154
    # reserved_lvs added before 2.2
1155
    if self.reserved_lvs is None:
1156
      self.reserved_lvs = []
1157

    
1158
    # hidden and blacklisted operating systems added before 2.2.1
1159
    if self.hidden_os is None:
1160
      self.hidden_os = []
1161

    
1162
    if self.blacklisted_os is None:
1163
      self.blacklisted_os = []
1164

    
1165
    # primary_ip_family added before 2.3
1166
    if self.primary_ip_family is None:
1167
      self.primary_ip_family = AF_INET
1168

    
1169
    if self.prealloc_wipe_disks is None:
1170
      self.prealloc_wipe_disks = False
1171

    
1172
  def ToDict(self):
1173
    """Custom function for cluster.
1174

1175
    """
1176
    mydict = super(Cluster, self).ToDict()
1177
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1178
    return mydict
1179

    
1180
  @classmethod
1181
  def FromDict(cls, val):
1182
    """Custom function for cluster.
1183

1184
    """
1185
    obj = super(Cluster, cls).FromDict(val)
1186
    if not isinstance(obj.tcpudp_port_pool, set):
1187
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1188
    return obj
1189

    
1190
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1191
    """Get the default hypervisor parameters for the cluster.
1192

1193
    @param hypervisor: the hypervisor name
1194
    @param os_name: if specified, we'll also update the defaults for this OS
1195
    @param skip_keys: if passed, list of keys not to use
1196
    @return: the defaults dict
1197

1198
    """
1199
    if skip_keys is None:
1200
      skip_keys = []
1201

    
1202
    fill_stack = [self.hvparams.get(hypervisor, {})]
1203
    if os_name is not None:
1204
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1205
      fill_stack.append(os_hvp)
1206

    
1207
    ret_dict = {}
1208
    for o_dict in fill_stack:
1209
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1210

    
1211
    return ret_dict
1212

    
1213
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1214
    """Fill a given hvparams dict with cluster defaults.
1215

1216
    @type hv_name: string
1217
    @param hv_name: the hypervisor to use
1218
    @type os_name: string
1219
    @param os_name: the OS to use for overriding the hypervisor defaults
1220
    @type skip_globals: boolean
1221
    @param skip_globals: if True, the global hypervisor parameters will
1222
        not be filled
1223
    @rtype: dict
1224
    @return: a copy of the given hvparams with missing keys filled from
1225
        the cluster defaults
1226

1227
    """
1228
    if skip_globals:
1229
      skip_keys = constants.HVC_GLOBALS
1230
    else:
1231
      skip_keys = []
1232

    
1233
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1234
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1235

    
1236
  def FillHV(self, instance, skip_globals=False):
1237
    """Fill an instance's hvparams dict with cluster defaults.
1238

1239
    @type instance: L{objects.Instance}
1240
    @param instance: the instance parameter to fill
1241
    @type skip_globals: boolean
1242
    @param skip_globals: if True, the global hypervisor parameters will
1243
        not be filled
1244
    @rtype: dict
1245
    @return: a copy of the instance's hvparams with missing keys filled from
1246
        the cluster defaults
1247

1248
    """
1249
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1250
                             instance.hvparams, skip_globals)
1251

    
1252
  def SimpleFillBE(self, beparams):
1253
    """Fill a given beparams dict with cluster defaults.
1254

1255
    @type beparams: dict
1256
    @param beparams: the dict to fill
1257
    @rtype: dict
1258
    @return: a copy of the passed in beparams with missing keys filled
1259
        from the cluster defaults
1260

1261
    """
1262
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1263

    
1264
  def FillBE(self, instance):
1265
    """Fill an instance's beparams dict with cluster defaults.
1266

1267
    @type instance: L{objects.Instance}
1268
    @param instance: the instance parameter to fill
1269
    @rtype: dict
1270
    @return: a copy of the instance's beparams with missing keys filled from
1271
        the cluster defaults
1272

1273
    """
1274
    return self.SimpleFillBE(instance.beparams)
1275

    
1276
  def SimpleFillNIC(self, nicparams):
1277
    """Fill a given nicparams dict with cluster defaults.
1278

1279
    @type nicparams: dict
1280
    @param nicparams: the dict to fill
1281
    @rtype: dict
1282
    @return: a copy of the passed in nicparams with missing keys filled
1283
        from the cluster defaults
1284

1285
    """
1286
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1287

    
1288
  def SimpleFillOS(self, os_name, os_params):
1289
    """Fill an instance's osparams dict with cluster defaults.
1290

1291
    @type os_name: string
1292
    @param os_name: the OS name to use
1293
    @type os_params: dict
1294
    @param os_params: the dict to fill with default values
1295
    @rtype: dict
1296
    @return: a copy of the instance's osparams with missing keys filled from
1297
        the cluster defaults
1298

1299
    """
1300
    name_only = os_name.split("+", 1)[0]
1301
    # base OS
1302
    result = self.osparams.get(name_only, {})
1303
    # OS with variant
1304
    result = FillDict(result, self.osparams.get(os_name, {}))
1305
    # specified params
1306
    return FillDict(result, os_params)
1307

    
1308
  def FillND(self, node, nodegroup):
1309
    """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node}
1310

1311
    @type node: L{objects.Node}
1312
    @param node: A Node object to fill
1313
    @type nodegroup: L{objects.NodeGroup}
1314
    @param nodegroup: A Node object to fill
1315
    @return a copy of the node's ndparams with defaults filled
1316

1317
    """
1318
    return self.SimpleFillND(nodegroup.FillND(node))
1319

    
1320
  def SimpleFillND(self, ndparams):
1321
    """Fill a given ndparams dict with defaults.
1322

1323
    @type ndparams: dict
1324
    @param ndparams: the dict to fill
1325
    @rtype: dict
1326
    @return: a copy of the passed in ndparams with missing keys filled
1327
        from the cluster defaults
1328

1329
    """
1330
    return FillDict(self.ndparams, ndparams)
1331

    
1332

    
1333
class BlockDevStatus(ConfigObject):
1334
  """Config object representing the status of a block device."""
1335
  __slots__ = [
1336
    "dev_path",
1337
    "major",
1338
    "minor",
1339
    "sync_percent",
1340
    "estimated_time",
1341
    "is_degraded",
1342
    "ldisk_status",
1343
    ]
1344

    
1345

    
1346
class ImportExportStatus(ConfigObject):
1347
  """Config object representing the status of an import or export."""
1348
  __slots__ = [
1349
    "recent_output",
1350
    "listen_port",
1351
    "connected",
1352
    "progress_mbytes",
1353
    "progress_throughput",
1354
    "progress_eta",
1355
    "progress_percent",
1356
    "exit_status",
1357
    "error_message",
1358
    ] + _TIMESTAMPS
1359

    
1360

    
1361
class ImportExportOptions(ConfigObject):
1362
  """Options for import/export daemon
1363

1364
  @ivar key_name: X509 key name (None for cluster certificate)
1365
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1366
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1367
  @ivar magic: Used to ensure the connection goes to the right disk
1368
  @ivar ipv6: Whether to use IPv6
1369
  @ivar connect_timeout: Number of seconds for establishing connection
1370

1371
  """
1372
  __slots__ = [
1373
    "key_name",
1374
    "ca_pem",
1375
    "compress",
1376
    "magic",
1377
    "ipv6",
1378
    "connect_timeout",
1379
    ]
1380

    
1381

    
1382
class ConfdRequest(ConfigObject):
1383
  """Object holding a confd request.
1384

1385
  @ivar protocol: confd protocol version
1386
  @ivar type: confd query type
1387
  @ivar query: query request
1388
  @ivar rsalt: requested reply salt
1389

1390
  """
1391
  __slots__ = [
1392
    "protocol",
1393
    "type",
1394
    "query",
1395
    "rsalt",
1396
    ]
1397

    
1398

    
1399
class ConfdReply(ConfigObject):
1400
  """Object holding a confd reply.
1401

1402
  @ivar protocol: confd protocol version
1403
  @ivar status: reply status code (ok, error)
1404
  @ivar answer: confd query reply
1405
  @ivar serial: configuration serial number
1406

1407
  """
1408
  __slots__ = [
1409
    "protocol",
1410
    "status",
1411
    "answer",
1412
    "serial",
1413
    ]
1414

    
1415

    
1416
class QueryFieldDefinition(ConfigObject):
1417
  """Object holding a query field definition.
1418

1419
  @ivar name: Field name
1420
  @ivar title: Human-readable title
1421
  @ivar kind: Field type
1422
  @ivar doc: Human-readable description
1423

1424
  """
1425
  __slots__ = [
1426
    "name",
1427
    "title",
1428
    "kind",
1429
    "doc",
1430
    ]
1431

    
1432

    
1433
class _QueryResponseBase(ConfigObject):
1434
  __slots__ = [
1435
    "fields",
1436
    ]
1437

    
1438
  def ToDict(self):
1439
    """Custom function for serializing.
1440

1441
    """
1442
    mydict = super(_QueryResponseBase, self).ToDict()
1443
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1444
    return mydict
1445

    
1446
  @classmethod
1447
  def FromDict(cls, val):
1448
    """Custom function for de-serializing.
1449

1450
    """
1451
    obj = super(_QueryResponseBase, cls).FromDict(val)
1452
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1453
    return obj
1454

    
1455

    
1456
class QueryRequest(ConfigObject):
1457
  """Object holding a query request.
1458

1459
  """
1460
  __slots__ = [
1461
    "what",
1462
    "fields",
1463
    "filter",
1464
    ]
1465

    
1466

    
1467
class QueryResponse(_QueryResponseBase):
1468
  """Object holding the response to a query.
1469

1470
  @ivar fields: List of L{QueryFieldDefinition} objects
1471
  @ivar data: Requested data
1472

1473
  """
1474
  __slots__ = [
1475
    "data",
1476
    ]
1477

    
1478

    
1479
class QueryFieldsRequest(ConfigObject):
1480
  """Object holding a request for querying available fields.
1481

1482
  """
1483
  __slots__ = [
1484
    "what",
1485
    "fields",
1486
    ]
1487

    
1488

    
1489
class QueryFieldsResponse(_QueryResponseBase):
1490
  """Object holding the response to a query for fields.
1491

1492
  @ivar fields: List of L{QueryFieldDefinition} objects
1493

1494
  """
1495
  __slots__ = [
1496
    ]
1497

    
1498

    
1499
class InstanceConsole(ConfigObject):
1500
  """Object describing how to access the console of an instance.
1501

1502
  """
1503
  __slots__ = [
1504
    "instance",
1505
    "kind",
1506
    "message",
1507
    "host",
1508
    "port",
1509
    "user",
1510
    "command",
1511
    "display",
1512
    ]
1513

    
1514
  def Validate(self):
1515
    """Validates contents of this object.
1516

1517
    """
1518
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1519
    assert self.instance, "Missing instance name"
1520
    assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1521
    assert self.host or self.kind == constants.CONS_MESSAGE
1522
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1523
                                      constants.CONS_SSH]
1524
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1525
                                      constants.CONS_VNC]
1526
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1527
                                         constants.CONS_VNC]
1528
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1529
                                         constants.CONS_SSH]
1530
    return True
1531

    
1532

    
1533
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1534
  """Simple wrapper over ConfigParse that allows serialization.
1535

1536
  This class is basically ConfigParser.SafeConfigParser with two
1537
  additional methods that allow it to serialize/unserialize to/from a
1538
  buffer.
1539

1540
  """
1541
  def Dumps(self):
1542
    """Dump this instance and return the string representation."""
1543
    buf = StringIO()
1544
    self.write(buf)
1545
    return buf.getvalue()
1546

    
1547
  @classmethod
1548
  def Loads(cls, data):
1549
    """Load data from a string."""
1550
    buf = StringIO(data)
1551
    cfp = cls()
1552
    cfp.readfp(buf)
1553
    return cfp