Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 98dfcaff

History | View | Annotate | Download (43.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "serial_no",
327
    ] + _TIMESTAMPS
328

    
329
  def ToDict(self):
330
    """Custom function for top-level config data.
331

332
    This just replaces the list of instances, nodes and the cluster
333
    with standard python types.
334

335
    """
336
    mydict = super(ConfigData, self).ToDict()
337
    mydict["cluster"] = mydict["cluster"].ToDict()
338
    for key in "nodes", "instances", "nodegroups":
339
      mydict[key] = self._ContainerToDicts(mydict[key])
340

    
341
    return mydict
342

    
343
  @classmethod
344
  def FromDict(cls, val):
345
    """Custom function for top-level config data
346

347
    """
348
    obj = super(ConfigData, cls).FromDict(val)
349
    obj.cluster = Cluster.FromDict(obj.cluster)
350
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
353
    return obj
354

    
355
  def HasAnyDiskOfType(self, dev_type):
356
    """Check if in there is at disk of the given type in the configuration.
357

358
    @type dev_type: L{constants.LDS_BLOCK}
359
    @param dev_type: the type to look for
360
    @rtype: boolean
361
    @return: boolean indicating if a disk of the given type was found or not
362

363
    """
364
    for instance in self.instances.values():
365
      for disk in instance.disks:
366
        if disk.IsBasedOnDiskType(dev_type):
367
          return True
368
    return False
369

    
370
  def UpgradeConfig(self):
371
    """Fill defaults for missing configuration values.
372

373
    """
374
    self.cluster.UpgradeConfig()
375
    for node in self.nodes.values():
376
      node.UpgradeConfig()
377
    for instance in self.instances.values():
378
      instance.UpgradeConfig()
379
    if self.nodegroups is None:
380
      self.nodegroups = {}
381
    for nodegroup in self.nodegroups.values():
382
      nodegroup.UpgradeConfig()
383
    if self.cluster.drbd_usermode_helper is None:
384
      # To decide if we set an helper let's check if at least one instance has
385
      # a DRBD disk. This does not cover all the possible scenarios but it
386
      # gives a good approximation.
387
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
388
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389

    
390

    
391
class NIC(ConfigObject):
392
  """Config object representing a network card."""
393
  __slots__ = ["mac", "ip", "nicparams"]
394

    
395
  @classmethod
396
  def CheckParameterSyntax(cls, nicparams):
397
    """Check the given parameters for validity.
398

399
    @type nicparams:  dict
400
    @param nicparams: dictionary with parameter names/value
401
    @raise errors.ConfigurationError: when a parameter is not valid
402

403
    """
404
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
405
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
406
      raise errors.ConfigurationError(err)
407

    
408
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
409
        not nicparams[constants.NIC_LINK]):
410
      err = "Missing bridged nic link"
411
      raise errors.ConfigurationError(err)
412

    
413

    
414
class Disk(ConfigObject):
415
  """Config object representing a block device."""
416
  __slots__ = ["dev_type", "logical_id", "physical_id",
417
               "children", "iv_name", "size", "mode"]
418

    
419
  def CreateOnSecondary(self):
420
    """Test if this device needs to be created on a secondary node."""
421
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422

    
423
  def AssembleOnSecondary(self):
424
    """Test if this device needs to be assembled on a secondary node."""
425
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426

    
427
  def OpenOnSecondary(self):
428
    """Test if this device needs to be opened on a secondary node."""
429
    return self.dev_type in (constants.LD_LV,)
430

    
431
  def StaticDevPath(self):
432
    """Return the device path if this device type has a static one.
433

434
    Some devices (LVM for example) live always at the same /dev/ path,
435
    irrespective of their status. For such devices, we return this
436
    path, for others we return None.
437

438
    @warning: The path returned is not a normalized pathname; callers
439
        should check that it is a valid path.
440

441
    """
442
    if self.dev_type == constants.LD_LV:
443
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
444
    elif self.dev_type == constants.LD_BLOCKDEV:
445
      return self.logical_id[1]
446
    return None
447

    
448
  def ChildrenNeeded(self):
449
    """Compute the needed number of children for activation.
450

451
    This method will return either -1 (all children) or a positive
452
    number denoting the minimum number of children needed for
453
    activation (only mirrored devices will usually return >=0).
454

455
    Currently, only DRBD8 supports diskless activation (therefore we
456
    return 0), for all other we keep the previous semantics and return
457
    -1.
458

459
    """
460
    if self.dev_type == constants.LD_DRBD8:
461
      return 0
462
    return -1
463

    
464
  def IsBasedOnDiskType(self, dev_type):
465
    """Check if the disk or its children are based on the given type.
466

467
    @type dev_type: L{constants.LDS_BLOCK}
468
    @param dev_type: the type to look for
469
    @rtype: boolean
470
    @return: boolean indicating if a device of the given type was found or not
471

472
    """
473
    if self.children:
474
      for child in self.children:
475
        if child.IsBasedOnDiskType(dev_type):
476
          return True
477
    return self.dev_type == dev_type
478

    
479
  def GetNodes(self, node):
480
    """This function returns the nodes this device lives on.
481

482
    Given the node on which the parent of the device lives on (or, in
483
    case of a top-level device, the primary node of the devices'
484
    instance), this function will return a list of nodes on which this
485
    devices needs to (or can) be assembled.
486

487
    """
488
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
489
                         constants.LD_BLOCKDEV]:
490
      result = [node]
491
    elif self.dev_type in constants.LDS_DRBD:
492
      result = [self.logical_id[0], self.logical_id[1]]
493
      if node not in result:
494
        raise errors.ConfigurationError("DRBD device passed unknown node")
495
    else:
496
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
497
    return result
498

    
499
  def ComputeNodeTree(self, parent_node):
500
    """Compute the node/disk tree for this disk and its children.
501

502
    This method, given the node on which the parent disk lives, will
503
    return the list of all (node, disk) pairs which describe the disk
504
    tree in the most compact way. For example, a drbd/lvm stack
505
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
506
    which represents all the top-level devices on the nodes.
507

508
    """
509
    my_nodes = self.GetNodes(parent_node)
510
    result = [(node, self) for node in my_nodes]
511
    if not self.children:
512
      # leaf device
513
      return result
514
    for node in my_nodes:
515
      for child in self.children:
516
        child_result = child.ComputeNodeTree(node)
517
        if len(child_result) == 1:
518
          # child (and all its descendants) is simple, doesn't split
519
          # over multiple hosts, so we don't need to describe it, our
520
          # own entry for this node describes it completely
521
          continue
522
        else:
523
          # check if child nodes differ from my nodes; note that
524
          # subdisk can differ from the child itself, and be instead
525
          # one of its descendants
526
          for subnode, subdisk in child_result:
527
            if subnode not in my_nodes:
528
              result.append((subnode, subdisk))
529
            # otherwise child is under our own node, so we ignore this
530
            # entry (but probably the other results in the list will
531
            # be different)
532
    return result
533

    
534
  def ComputeGrowth(self, amount):
535
    """Compute the per-VG growth requirements.
536

537
    This only works for VG-based disks.
538

539
    @type amount: integer
540
    @param amount: the desired increase in (user-visible) disk space
541
    @rtype: dict
542
    @return: a dictionary of volume-groups and the required size
543

544
    """
545
    if self.dev_type == constants.LD_LV:
546
      return {self.logical_id[0]: amount}
547
    elif self.dev_type == constants.LD_DRBD8:
548
      if self.children:
549
        return self.children[0].ComputeGrowth(amount)
550
      else:
551
        return {}
552
    else:
553
      # Other disk types do not require VG space
554
      return {}
555

    
556
  def RecordGrow(self, amount):
557
    """Update the size of this disk after growth.
558

559
    This method recurses over the disks's children and updates their
560
    size correspondigly. The method needs to be kept in sync with the
561
    actual algorithms from bdev.
562

563
    """
564
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
565
      self.size += amount
566
    elif self.dev_type == constants.LD_DRBD8:
567
      if self.children:
568
        self.children[0].RecordGrow(amount)
569
      self.size += amount
570
    else:
571
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
572
                                   " disk type %s" % self.dev_type)
573

    
574
  def UnsetSize(self):
575
    """Sets recursively the size to zero for the disk and its children.
576

577
    """
578
    if self.children:
579
      for child in self.children:
580
        child.UnsetSize()
581
    self.size = 0
582

    
583
  def SetPhysicalID(self, target_node, nodes_ip):
584
    """Convert the logical ID to the physical ID.
585

586
    This is used only for drbd, which needs ip/port configuration.
587

588
    The routine descends down and updates its children also, because
589
    this helps when the only the top device is passed to the remote
590
    node.
591

592
    Arguments:
593
      - target_node: the node we wish to configure for
594
      - nodes_ip: a mapping of node name to ip
595

596
    The target_node must exist in in nodes_ip, and must be one of the
597
    nodes in the logical ID for each of the DRBD devices encountered
598
    in the disk tree.
599

600
    """
601
    if self.children:
602
      for child in self.children:
603
        child.SetPhysicalID(target_node, nodes_ip)
604

    
605
    if self.logical_id is None and self.physical_id is not None:
606
      return
607
    if self.dev_type in constants.LDS_DRBD:
608
      pnode, snode, port, pminor, sminor, secret = self.logical_id
609
      if target_node not in (pnode, snode):
610
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
611
                                        target_node)
612
      pnode_ip = nodes_ip.get(pnode, None)
613
      snode_ip = nodes_ip.get(snode, None)
614
      if pnode_ip is None or snode_ip is None:
615
        raise errors.ConfigurationError("Can't find primary or secondary node"
616
                                        " for %s" % str(self))
617
      p_data = (pnode_ip, port)
618
      s_data = (snode_ip, port)
619
      if pnode == target_node:
620
        self.physical_id = p_data + s_data + (pminor, secret)
621
      else: # it must be secondary, we tested above
622
        self.physical_id = s_data + p_data + (sminor, secret)
623
    else:
624
      self.physical_id = self.logical_id
625
    return
626

    
627
  def ToDict(self):
628
    """Disk-specific conversion to standard python types.
629

630
    This replaces the children lists of objects with lists of
631
    standard python types.
632

633
    """
634
    bo = super(Disk, self).ToDict()
635

    
636
    for attr in ("children",):
637
      alist = bo.get(attr, None)
638
      if alist:
639
        bo[attr] = self._ContainerToDicts(alist)
640
    return bo
641

    
642
  @classmethod
643
  def FromDict(cls, val):
644
    """Custom function for Disks
645

646
    """
647
    obj = super(Disk, cls).FromDict(val)
648
    if obj.children:
649
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
650
    if obj.logical_id and isinstance(obj.logical_id, list):
651
      obj.logical_id = tuple(obj.logical_id)
652
    if obj.physical_id and isinstance(obj.physical_id, list):
653
      obj.physical_id = tuple(obj.physical_id)
654
    if obj.dev_type in constants.LDS_DRBD:
655
      # we need a tuple of length six here
656
      if len(obj.logical_id) < 6:
657
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
658
    return obj
659

    
660
  def __str__(self):
661
    """Custom str() formatter for disks.
662

663
    """
664
    if self.dev_type == constants.LD_LV:
665
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
666
    elif self.dev_type in constants.LDS_DRBD:
667
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
668
      val = "<DRBD8("
669
      if self.physical_id is None:
670
        phy = "unconfigured"
671
      else:
672
        phy = ("configured as %s:%s %s:%s" %
673
               (self.physical_id[0], self.physical_id[1],
674
                self.physical_id[2], self.physical_id[3]))
675

    
676
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
677
              (node_a, minor_a, node_b, minor_b, port, phy))
678
      if self.children and self.children.count(None) == 0:
679
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
680
      else:
681
        val += "no local storage"
682
    else:
683
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
684
             (self.dev_type, self.logical_id, self.physical_id, self.children))
685
    if self.iv_name is None:
686
      val += ", not visible"
687
    else:
688
      val += ", visible as /dev/%s" % self.iv_name
689
    if isinstance(self.size, int):
690
      val += ", size=%dm)>" % self.size
691
    else:
692
      val += ", size='%s')>" % (self.size,)
693
    return val
694

    
695
  def Verify(self):
696
    """Checks that this disk is correctly configured.
697

698
    """
699
    all_errors = []
700
    if self.mode not in constants.DISK_ACCESS_SET:
701
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
702
    return all_errors
703

    
704
  def UpgradeConfig(self):
705
    """Fill defaults for missing configuration values.
706

707
    """
708
    if self.children:
709
      for child in self.children:
710
        child.UpgradeConfig()
711
    # add here config upgrade for this disk
712

    
713

    
714
class Instance(TaggableObject):
715
  """Config object representing an instance."""
716
  __slots__ = [
717
    "name",
718
    "primary_node",
719
    "os",
720
    "hypervisor",
721
    "hvparams",
722
    "beparams",
723
    "osparams",
724
    "admin_up",
725
    "nics",
726
    "disks",
727
    "disk_template",
728
    "network_port",
729
    "serial_no",
730
    ] + _TIMESTAMPS + _UUID
731

    
732
  def _ComputeSecondaryNodes(self):
733
    """Compute the list of secondary nodes.
734

735
    This is a simple wrapper over _ComputeAllNodes.
736

737
    """
738
    all_nodes = set(self._ComputeAllNodes())
739
    all_nodes.discard(self.primary_node)
740
    return tuple(all_nodes)
741

    
742
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
743
                             "List of secondary nodes")
744

    
745
  def _ComputeAllNodes(self):
746
    """Compute the list of all nodes.
747

748
    Since the data is already there (in the drbd disks), keeping it as
749
    a separate normal attribute is redundant and if not properly
750
    synchronised can cause problems. Thus it's better to compute it
751
    dynamically.
752

753
    """
754
    def _Helper(nodes, device):
755
      """Recursively computes nodes given a top device."""
756
      if device.dev_type in constants.LDS_DRBD:
757
        nodea, nodeb = device.logical_id[:2]
758
        nodes.add(nodea)
759
        nodes.add(nodeb)
760
      if device.children:
761
        for child in device.children:
762
          _Helper(nodes, child)
763

    
764
    all_nodes = set()
765
    all_nodes.add(self.primary_node)
766
    for device in self.disks:
767
      _Helper(all_nodes, device)
768
    return tuple(all_nodes)
769

    
770
  all_nodes = property(_ComputeAllNodes, None, None,
771
                       "List of all nodes of the instance")
772

    
773
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
774
    """Provide a mapping of nodes to LVs this instance owns.
775

776
    This function figures out what logical volumes should belong on
777
    which nodes, recursing through a device tree.
778

779
    @param lvmap: optional dictionary to receive the
780
        'node' : ['lv', ...] data.
781

782
    @return: None if lvmap arg is given, otherwise, a dictionary of
783
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
784
        volumeN is of the form "vg_name/lv_name", compatible with
785
        GetVolumeList()
786

787
    """
788
    if node == None:
789
      node = self.primary_node
790

    
791
    if lvmap is None:
792
      lvmap = {
793
        node: [],
794
        }
795
      ret = lvmap
796
    else:
797
      if not node in lvmap:
798
        lvmap[node] = []
799
      ret = None
800

    
801
    if not devs:
802
      devs = self.disks
803

    
804
    for dev in devs:
805
      if dev.dev_type == constants.LD_LV:
806
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
807

    
808
      elif dev.dev_type in constants.LDS_DRBD:
809
        if dev.children:
810
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
811
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
812

    
813
      elif dev.children:
814
        self.MapLVsByNode(lvmap, dev.children, node)
815

    
816
    return ret
817

    
818
  def FindDisk(self, idx):
819
    """Find a disk given having a specified index.
820

821
    This is just a wrapper that does validation of the index.
822

823
    @type idx: int
824
    @param idx: the disk index
825
    @rtype: L{Disk}
826
    @return: the corresponding disk
827
    @raise errors.OpPrereqError: when the given index is not valid
828

829
    """
830
    try:
831
      idx = int(idx)
832
      return self.disks[idx]
833
    except (TypeError, ValueError), err:
834
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
835
                                 errors.ECODE_INVAL)
836
    except IndexError:
837
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
838
                                 " 0 to %d" % (idx, len(self.disks) - 1),
839
                                 errors.ECODE_INVAL)
840

    
841
  def ToDict(self):
842
    """Instance-specific conversion to standard python types.
843

844
    This replaces the children lists of objects with lists of standard
845
    python types.
846

847
    """
848
    bo = super(Instance, self).ToDict()
849

    
850
    for attr in "nics", "disks":
851
      alist = bo.get(attr, None)
852
      if alist:
853
        nlist = self._ContainerToDicts(alist)
854
      else:
855
        nlist = []
856
      bo[attr] = nlist
857
    return bo
858

    
859
  @classmethod
860
  def FromDict(cls, val):
861
    """Custom function for instances.
862

863
    """
864
    obj = super(Instance, cls).FromDict(val)
865
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
866
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
867
    return obj
868

    
869
  def UpgradeConfig(self):
870
    """Fill defaults for missing configuration values.
871

872
    """
873
    for nic in self.nics:
874
      nic.UpgradeConfig()
875
    for disk in self.disks:
876
      disk.UpgradeConfig()
877
    if self.hvparams:
878
      for key in constants.HVC_GLOBALS:
879
        try:
880
          del self.hvparams[key]
881
        except KeyError:
882
          pass
883
    if self.osparams is None:
884
      self.osparams = {}
885

    
886

    
887
class OS(ConfigObject):
888
  """Config object representing an operating system.
889

890
  @type supported_parameters: list
891
  @ivar supported_parameters: a list of tuples, name and description,
892
      containing the supported parameters by this OS
893

894
  @type VARIANT_DELIM: string
895
  @cvar VARIANT_DELIM: the variant delimiter
896

897
  """
898
  __slots__ = [
899
    "name",
900
    "path",
901
    "api_versions",
902
    "create_script",
903
    "export_script",
904
    "import_script",
905
    "rename_script",
906
    "verify_script",
907
    "supported_variants",
908
    "supported_parameters",
909
    ]
910

    
911
  VARIANT_DELIM = "+"
912

    
913
  @classmethod
914
  def SplitNameVariant(cls, name):
915
    """Splits the name into the proper name and variant.
916

917
    @param name: the OS (unprocessed) name
918
    @rtype: list
919
    @return: a list of two elements; if the original name didn't
920
        contain a variant, it's returned as an empty string
921

922
    """
923
    nv = name.split(cls.VARIANT_DELIM, 1)
924
    if len(nv) == 1:
925
      nv.append("")
926
    return nv
927

    
928
  @classmethod
929
  def GetName(cls, name):
930
    """Returns the proper name of the os (without the variant).
931

932
    @param name: the OS (unprocessed) name
933

934
    """
935
    return cls.SplitNameVariant(name)[0]
936

    
937
  @classmethod
938
  def GetVariant(cls, name):
939
    """Returns the variant the os (without the base name).
940

941
    @param name: the OS (unprocessed) name
942

943
    """
944
    return cls.SplitNameVariant(name)[1]
945

    
946

    
947
class Node(TaggableObject):
948
  """Config object representing a node."""
949
  __slots__ = [
950
    "name",
951
    "primary_ip",
952
    "secondary_ip",
953
    "serial_no",
954
    "master_candidate",
955
    "offline",
956
    "drained",
957
    "group",
958
    "master_capable",
959
    "vm_capable",
960
    "ndparams",
961
    "powered",
962
    ] + _TIMESTAMPS + _UUID
963

    
964
  def UpgradeConfig(self):
965
    """Fill defaults for missing configuration values.
966

967
    """
968
    # pylint: disable=E0203
969
    # because these are "defined" via slots, not manually
970
    if self.master_capable is None:
971
      self.master_capable = True
972

    
973
    if self.vm_capable is None:
974
      self.vm_capable = True
975

    
976
    if self.ndparams is None:
977
      self.ndparams = {}
978

    
979
    if self.powered is None:
980
      self.powered = True
981

    
982

    
983
class NodeGroup(TaggableObject):
984
  """Config object representing a node group."""
985
  __slots__ = [
986
    "name",
987
    "members",
988
    "ndparams",
989
    "serial_no",
990
    "alloc_policy",
991
    ] + _TIMESTAMPS + _UUID
992

    
993
  def ToDict(self):
994
    """Custom function for nodegroup.
995

996
    This discards the members object, which gets recalculated and is only kept
997
    in memory.
998

999
    """
1000
    mydict = super(NodeGroup, self).ToDict()
1001
    del mydict["members"]
1002
    return mydict
1003

    
1004
  @classmethod
1005
  def FromDict(cls, val):
1006
    """Custom function for nodegroup.
1007

1008
    The members slot is initialized to an empty list, upon deserialization.
1009

1010
    """
1011
    obj = super(NodeGroup, cls).FromDict(val)
1012
    obj.members = []
1013
    return obj
1014

    
1015
  def UpgradeConfig(self):
1016
    """Fill defaults for missing configuration values.
1017

1018
    """
1019
    if self.ndparams is None:
1020
      self.ndparams = {}
1021

    
1022
    if self.serial_no is None:
1023
      self.serial_no = 1
1024

    
1025
    if self.alloc_policy is None:
1026
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1027

    
1028
    # We only update mtime, and not ctime, since we would not be able to provide
1029
    # a correct value for creation time.
1030
    if self.mtime is None:
1031
      self.mtime = time.time()
1032

    
1033
  def FillND(self, node):
1034
    """Return filled out ndparams for L{objects.Node}
1035

1036
    @type node: L{objects.Node}
1037
    @param node: A Node object to fill
1038
    @return a copy of the node's ndparams with defaults filled
1039

1040
    """
1041
    return self.SimpleFillND(node.ndparams)
1042

    
1043
  def SimpleFillND(self, ndparams):
1044
    """Fill a given ndparams dict with defaults.
1045

1046
    @type ndparams: dict
1047
    @param ndparams: the dict to fill
1048
    @rtype: dict
1049
    @return: a copy of the passed in ndparams with missing keys filled
1050
        from the node group defaults
1051

1052
    """
1053
    return FillDict(self.ndparams, ndparams)
1054

    
1055

    
1056
class Cluster(TaggableObject):
1057
  """Config object representing the cluster."""
1058
  __slots__ = [
1059
    "serial_no",
1060
    "rsahostkeypub",
1061
    "highest_used_port",
1062
    "tcpudp_port_pool",
1063
    "mac_prefix",
1064
    "volume_group_name",
1065
    "reserved_lvs",
1066
    "drbd_usermode_helper",
1067
    "default_bridge",
1068
    "default_hypervisor",
1069
    "master_node",
1070
    "master_ip",
1071
    "master_netdev",
1072
    "cluster_name",
1073
    "file_storage_dir",
1074
    "shared_file_storage_dir",
1075
    "enabled_hypervisors",
1076
    "hvparams",
1077
    "os_hvp",
1078
    "beparams",
1079
    "osparams",
1080
    "nicparams",
1081
    "ndparams",
1082
    "candidate_pool_size",
1083
    "modify_etc_hosts",
1084
    "modify_ssh_setup",
1085
    "maintain_node_health",
1086
    "uid_pool",
1087
    "default_iallocator",
1088
    "hidden_os",
1089
    "blacklisted_os",
1090
    "primary_ip_family",
1091
    "prealloc_wipe_disks",
1092
    ] + _TIMESTAMPS + _UUID
1093

    
1094
  def UpgradeConfig(self):
1095
    """Fill defaults for missing configuration values.
1096

1097
    """
1098
    # pylint: disable=E0203
1099
    # because these are "defined" via slots, not manually
1100
    if self.hvparams is None:
1101
      self.hvparams = constants.HVC_DEFAULTS
1102
    else:
1103
      for hypervisor in self.hvparams:
1104
        self.hvparams[hypervisor] = FillDict(
1105
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1106

    
1107
    if self.os_hvp is None:
1108
      self.os_hvp = {}
1109

    
1110
    # osparams added before 2.2
1111
    if self.osparams is None:
1112
      self.osparams = {}
1113

    
1114
    if self.ndparams is None:
1115
      self.ndparams = constants.NDC_DEFAULTS
1116

    
1117
    self.beparams = UpgradeGroupedParams(self.beparams,
1118
                                         constants.BEC_DEFAULTS)
1119
    migrate_default_bridge = not self.nicparams
1120
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1121
                                          constants.NICC_DEFAULTS)
1122
    if migrate_default_bridge:
1123
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1124
        self.default_bridge
1125

    
1126
    if self.modify_etc_hosts is None:
1127
      self.modify_etc_hosts = True
1128

    
1129
    if self.modify_ssh_setup is None:
1130
      self.modify_ssh_setup = True
1131

    
1132
    # default_bridge is no longer used in 2.1. The slot is left there to
1133
    # support auto-upgrading. It can be removed once we decide to deprecate
1134
    # upgrading straight from 2.0.
1135
    if self.default_bridge is not None:
1136
      self.default_bridge = None
1137

    
1138
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1139
    # code can be removed once upgrading straight from 2.0 is deprecated.
1140
    if self.default_hypervisor is not None:
1141
      self.enabled_hypervisors = ([self.default_hypervisor] +
1142
        [hvname for hvname in self.enabled_hypervisors
1143
         if hvname != self.default_hypervisor])
1144
      self.default_hypervisor = None
1145

    
1146
    # maintain_node_health added after 2.1.1
1147
    if self.maintain_node_health is None:
1148
      self.maintain_node_health = False
1149

    
1150
    if self.uid_pool is None:
1151
      self.uid_pool = []
1152

    
1153
    if self.default_iallocator is None:
1154
      self.default_iallocator = ""
1155

    
1156
    # reserved_lvs added before 2.2
1157
    if self.reserved_lvs is None:
1158
      self.reserved_lvs = []
1159

    
1160
    # hidden and blacklisted operating systems added before 2.2.1
1161
    if self.hidden_os is None:
1162
      self.hidden_os = []
1163

    
1164
    if self.blacklisted_os is None:
1165
      self.blacklisted_os = []
1166

    
1167
    # primary_ip_family added before 2.3
1168
    if self.primary_ip_family is None:
1169
      self.primary_ip_family = AF_INET
1170

    
1171
    if self.prealloc_wipe_disks is None:
1172
      self.prealloc_wipe_disks = False
1173

    
1174
    # shared_file_storage_dir added before 2.5
1175
    if self.shared_file_storage_dir is None:
1176
      self.shared_file_storage_dir = ""
1177

    
1178
  def ToDict(self):
1179
    """Custom function for cluster.
1180

1181
    """
1182
    mydict = super(Cluster, self).ToDict()
1183
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1184
    return mydict
1185

    
1186
  @classmethod
1187
  def FromDict(cls, val):
1188
    """Custom function for cluster.
1189

1190
    """
1191
    obj = super(Cluster, cls).FromDict(val)
1192
    if not isinstance(obj.tcpudp_port_pool, set):
1193
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1194
    return obj
1195

    
1196
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1197
    """Get the default hypervisor parameters for the cluster.
1198

1199
    @param hypervisor: the hypervisor name
1200
    @param os_name: if specified, we'll also update the defaults for this OS
1201
    @param skip_keys: if passed, list of keys not to use
1202
    @return: the defaults dict
1203

1204
    """
1205
    if skip_keys is None:
1206
      skip_keys = []
1207

    
1208
    fill_stack = [self.hvparams.get(hypervisor, {})]
1209
    if os_name is not None:
1210
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1211
      fill_stack.append(os_hvp)
1212

    
1213
    ret_dict = {}
1214
    for o_dict in fill_stack:
1215
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1216

    
1217
    return ret_dict
1218

    
1219
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1220
    """Fill a given hvparams dict with cluster defaults.
1221

1222
    @type hv_name: string
1223
    @param hv_name: the hypervisor to use
1224
    @type os_name: string
1225
    @param os_name: the OS to use for overriding the hypervisor defaults
1226
    @type skip_globals: boolean
1227
    @param skip_globals: if True, the global hypervisor parameters will
1228
        not be filled
1229
    @rtype: dict
1230
    @return: a copy of the given hvparams with missing keys filled from
1231
        the cluster defaults
1232

1233
    """
1234
    if skip_globals:
1235
      skip_keys = constants.HVC_GLOBALS
1236
    else:
1237
      skip_keys = []
1238

    
1239
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1240
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1241

    
1242
  def FillHV(self, instance, skip_globals=False):
1243
    """Fill an instance's hvparams dict with cluster defaults.
1244

1245
    @type instance: L{objects.Instance}
1246
    @param instance: the instance parameter to fill
1247
    @type skip_globals: boolean
1248
    @param skip_globals: if True, the global hypervisor parameters will
1249
        not be filled
1250
    @rtype: dict
1251
    @return: a copy of the instance's hvparams with missing keys filled from
1252
        the cluster defaults
1253

1254
    """
1255
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1256
                             instance.hvparams, skip_globals)
1257

    
1258
  def SimpleFillBE(self, beparams):
1259
    """Fill a given beparams dict with cluster defaults.
1260

1261
    @type beparams: dict
1262
    @param beparams: the dict to fill
1263
    @rtype: dict
1264
    @return: a copy of the passed in beparams with missing keys filled
1265
        from the cluster defaults
1266

1267
    """
1268
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1269

    
1270
  def FillBE(self, instance):
1271
    """Fill an instance's beparams dict with cluster defaults.
1272

1273
    @type instance: L{objects.Instance}
1274
    @param instance: the instance parameter to fill
1275
    @rtype: dict
1276
    @return: a copy of the instance's beparams with missing keys filled from
1277
        the cluster defaults
1278

1279
    """
1280
    return self.SimpleFillBE(instance.beparams)
1281

    
1282
  def SimpleFillNIC(self, nicparams):
1283
    """Fill a given nicparams dict with cluster defaults.
1284

1285
    @type nicparams: dict
1286
    @param nicparams: the dict to fill
1287
    @rtype: dict
1288
    @return: a copy of the passed in nicparams with missing keys filled
1289
        from the cluster defaults
1290

1291
    """
1292
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1293

    
1294
  def SimpleFillOS(self, os_name, os_params):
1295
    """Fill an instance's osparams dict with cluster defaults.
1296

1297
    @type os_name: string
1298
    @param os_name: the OS name to use
1299
    @type os_params: dict
1300
    @param os_params: the dict to fill with default values
1301
    @rtype: dict
1302
    @return: a copy of the instance's osparams with missing keys filled from
1303
        the cluster defaults
1304

1305
    """
1306
    name_only = os_name.split("+", 1)[0]
1307
    # base OS
1308
    result = self.osparams.get(name_only, {})
1309
    # OS with variant
1310
    result = FillDict(result, self.osparams.get(os_name, {}))
1311
    # specified params
1312
    return FillDict(result, os_params)
1313

    
1314
  def FillND(self, node, nodegroup):
1315
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1316

1317
    @type node: L{objects.Node}
1318
    @param node: A Node object to fill
1319
    @type nodegroup: L{objects.NodeGroup}
1320
    @param nodegroup: A Node object to fill
1321
    @return a copy of the node's ndparams with defaults filled
1322

1323
    """
1324
    return self.SimpleFillND(nodegroup.FillND(node))
1325

    
1326
  def SimpleFillND(self, ndparams):
1327
    """Fill a given ndparams dict with defaults.
1328

1329
    @type ndparams: dict
1330
    @param ndparams: the dict to fill
1331
    @rtype: dict
1332
    @return: a copy of the passed in ndparams with missing keys filled
1333
        from the cluster defaults
1334

1335
    """
1336
    return FillDict(self.ndparams, ndparams)
1337

    
1338

    
1339
class BlockDevStatus(ConfigObject):
1340
  """Config object representing the status of a block device."""
1341
  __slots__ = [
1342
    "dev_path",
1343
    "major",
1344
    "minor",
1345
    "sync_percent",
1346
    "estimated_time",
1347
    "is_degraded",
1348
    "ldisk_status",
1349
    ]
1350

    
1351

    
1352
class ImportExportStatus(ConfigObject):
1353
  """Config object representing the status of an import or export."""
1354
  __slots__ = [
1355
    "recent_output",
1356
    "listen_port",
1357
    "connected",
1358
    "progress_mbytes",
1359
    "progress_throughput",
1360
    "progress_eta",
1361
    "progress_percent",
1362
    "exit_status",
1363
    "error_message",
1364
    ] + _TIMESTAMPS
1365

    
1366

    
1367
class ImportExportOptions(ConfigObject):
1368
  """Options for import/export daemon
1369

1370
  @ivar key_name: X509 key name (None for cluster certificate)
1371
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1372
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1373
  @ivar magic: Used to ensure the connection goes to the right disk
1374
  @ivar ipv6: Whether to use IPv6
1375
  @ivar connect_timeout: Number of seconds for establishing connection
1376

1377
  """
1378
  __slots__ = [
1379
    "key_name",
1380
    "ca_pem",
1381
    "compress",
1382
    "magic",
1383
    "ipv6",
1384
    "connect_timeout",
1385
    ]
1386

    
1387

    
1388
class ConfdRequest(ConfigObject):
1389
  """Object holding a confd request.
1390

1391
  @ivar protocol: confd protocol version
1392
  @ivar type: confd query type
1393
  @ivar query: query request
1394
  @ivar rsalt: requested reply salt
1395

1396
  """
1397
  __slots__ = [
1398
    "protocol",
1399
    "type",
1400
    "query",
1401
    "rsalt",
1402
    ]
1403

    
1404

    
1405
class ConfdReply(ConfigObject):
1406
  """Object holding a confd reply.
1407

1408
  @ivar protocol: confd protocol version
1409
  @ivar status: reply status code (ok, error)
1410
  @ivar answer: confd query reply
1411
  @ivar serial: configuration serial number
1412

1413
  """
1414
  __slots__ = [
1415
    "protocol",
1416
    "status",
1417
    "answer",
1418
    "serial",
1419
    ]
1420

    
1421

    
1422
class QueryFieldDefinition(ConfigObject):
1423
  """Object holding a query field definition.
1424

1425
  @ivar name: Field name
1426
  @ivar title: Human-readable title
1427
  @ivar kind: Field type
1428
  @ivar doc: Human-readable description
1429

1430
  """
1431
  __slots__ = [
1432
    "name",
1433
    "title",
1434
    "kind",
1435
    "doc",
1436
    ]
1437

    
1438

    
1439
class _QueryResponseBase(ConfigObject):
1440
  __slots__ = [
1441
    "fields",
1442
    ]
1443

    
1444
  def ToDict(self):
1445
    """Custom function for serializing.
1446

1447
    """
1448
    mydict = super(_QueryResponseBase, self).ToDict()
1449
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1450
    return mydict
1451

    
1452
  @classmethod
1453
  def FromDict(cls, val):
1454
    """Custom function for de-serializing.
1455

1456
    """
1457
    obj = super(_QueryResponseBase, cls).FromDict(val)
1458
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1459
    return obj
1460

    
1461

    
1462
class QueryRequest(ConfigObject):
1463
  """Object holding a query request.
1464

1465
  """
1466
  __slots__ = [
1467
    "what",
1468
    "fields",
1469
    "filter",
1470
    ]
1471

    
1472

    
1473
class QueryResponse(_QueryResponseBase):
1474
  """Object holding the response to a query.
1475

1476
  @ivar fields: List of L{QueryFieldDefinition} objects
1477
  @ivar data: Requested data
1478

1479
  """
1480
  __slots__ = [
1481
    "data",
1482
    ]
1483

    
1484

    
1485
class QueryFieldsRequest(ConfigObject):
1486
  """Object holding a request for querying available fields.
1487

1488
  """
1489
  __slots__ = [
1490
    "what",
1491
    "fields",
1492
    ]
1493

    
1494

    
1495
class QueryFieldsResponse(_QueryResponseBase):
1496
  """Object holding the response to a query for fields.
1497

1498
  @ivar fields: List of L{QueryFieldDefinition} objects
1499

1500
  """
1501
  __slots__ = [
1502
    ]
1503

    
1504

    
1505
class InstanceConsole(ConfigObject):
1506
  """Object describing how to access the console of an instance.
1507

1508
  """
1509
  __slots__ = [
1510
    "instance",
1511
    "kind",
1512
    "message",
1513
    "host",
1514
    "port",
1515
    "user",
1516
    "command",
1517
    "display",
1518
    ]
1519

    
1520
  def Validate(self):
1521
    """Validates contents of this object.
1522

1523
    """
1524
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1525
    assert self.instance, "Missing instance name"
1526
    assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1527
    assert self.host or self.kind == constants.CONS_MESSAGE
1528
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1529
                                      constants.CONS_SSH]
1530
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1531
                                      constants.CONS_VNC]
1532
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1533
                                         constants.CONS_VNC]
1534
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1535
                                         constants.CONS_SSH]
1536
    return True
1537

    
1538

    
1539
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1540
  """Simple wrapper over ConfigParse that allows serialization.
1541

1542
  This class is basically ConfigParser.SafeConfigParser with two
1543
  additional methods that allow it to serialize/unserialize to/from a
1544
  buffer.
1545

1546
  """
1547
  def Dumps(self):
1548
    """Dump this instance and return the string representation."""
1549
    buf = StringIO()
1550
    self.write(buf)
1551
    return buf.getvalue()
1552

    
1553
  @classmethod
1554
  def Loads(cls, data):
1555
    """Load data from a string."""
1556
    buf = StringIO(data)
1557
    cfp = cls()
1558
    cfp.readfp(buf)
1559
    return cfp