Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 6915fe26

History | View | Annotate | Download (44.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "serial_no",
327
    ] + _TIMESTAMPS
328

    
329
  def ToDict(self):
330
    """Custom function for top-level config data.
331

332
    This just replaces the list of instances, nodes and the cluster
333
    with standard python types.
334

335
    """
336
    mydict = super(ConfigData, self).ToDict()
337
    mydict["cluster"] = mydict["cluster"].ToDict()
338
    for key in "nodes", "instances", "nodegroups":
339
      mydict[key] = self._ContainerToDicts(mydict[key])
340

    
341
    return mydict
342

    
343
  @classmethod
344
  def FromDict(cls, val):
345
    """Custom function for top-level config data
346

347
    """
348
    obj = super(ConfigData, cls).FromDict(val)
349
    obj.cluster = Cluster.FromDict(obj.cluster)
350
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
353
    return obj
354

    
355
  def HasAnyDiskOfType(self, dev_type):
356
    """Check if in there is at disk of the given type in the configuration.
357

358
    @type dev_type: L{constants.LDS_BLOCK}
359
    @param dev_type: the type to look for
360
    @rtype: boolean
361
    @return: boolean indicating if a disk of the given type was found or not
362

363
    """
364
    for instance in self.instances.values():
365
      for disk in instance.disks:
366
        if disk.IsBasedOnDiskType(dev_type):
367
          return True
368
    return False
369

    
370
  def UpgradeConfig(self):
371
    """Fill defaults for missing configuration values.
372

373
    """
374
    self.cluster.UpgradeConfig()
375
    for node in self.nodes.values():
376
      node.UpgradeConfig()
377
    for instance in self.instances.values():
378
      instance.UpgradeConfig()
379
    if self.nodegroups is None:
380
      self.nodegroups = {}
381
    for nodegroup in self.nodegroups.values():
382
      nodegroup.UpgradeConfig()
383
    if self.cluster.drbd_usermode_helper is None:
384
      # To decide if we set an helper let's check if at least one instance has
385
      # a DRBD disk. This does not cover all the possible scenarios but it
386
      # gives a good approximation.
387
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
388
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389

    
390

    
391
class NIC(ConfigObject):
392
  """Config object representing a network card."""
393
  __slots__ = ["mac", "ip", "nicparams"]
394

    
395
  @classmethod
396
  def CheckParameterSyntax(cls, nicparams):
397
    """Check the given parameters for validity.
398

399
    @type nicparams:  dict
400
    @param nicparams: dictionary with parameter names/value
401
    @raise errors.ConfigurationError: when a parameter is not valid
402

403
    """
404
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
405
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
406
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
407
      raise errors.ConfigurationError(err)
408

    
409
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
410
        not nicparams[constants.NIC_LINK]):
411
      err = "Missing bridged nic link"
412
      raise errors.ConfigurationError(err)
413

    
414

    
415
class Disk(ConfigObject):
416
  """Config object representing a block device."""
417
  __slots__ = ["dev_type", "logical_id", "physical_id",
418
               "children", "iv_name", "size", "mode"]
419

    
420
  def CreateOnSecondary(self):
421
    """Test if this device needs to be created on a secondary node."""
422
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
423

    
424
  def AssembleOnSecondary(self):
425
    """Test if this device needs to be assembled on a secondary node."""
426
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
427

    
428
  def OpenOnSecondary(self):
429
    """Test if this device needs to be opened on a secondary node."""
430
    return self.dev_type in (constants.LD_LV,)
431

    
432
  def StaticDevPath(self):
433
    """Return the device path if this device type has a static one.
434

435
    Some devices (LVM for example) live always at the same /dev/ path,
436
    irrespective of their status. For such devices, we return this
437
    path, for others we return None.
438

439
    @warning: The path returned is not a normalized pathname; callers
440
        should check that it is a valid path.
441

442
    """
443
    if self.dev_type == constants.LD_LV:
444
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
445
    elif self.dev_type == constants.LD_BLOCKDEV:
446
      return self.logical_id[1]
447
    return None
448

    
449
  def ChildrenNeeded(self):
450
    """Compute the needed number of children for activation.
451

452
    This method will return either -1 (all children) or a positive
453
    number denoting the minimum number of children needed for
454
    activation (only mirrored devices will usually return >=0).
455

456
    Currently, only DRBD8 supports diskless activation (therefore we
457
    return 0), for all other we keep the previous semantics and return
458
    -1.
459

460
    """
461
    if self.dev_type == constants.LD_DRBD8:
462
      return 0
463
    return -1
464

    
465
  def IsBasedOnDiskType(self, dev_type):
466
    """Check if the disk or its children are based on the given type.
467

468
    @type dev_type: L{constants.LDS_BLOCK}
469
    @param dev_type: the type to look for
470
    @rtype: boolean
471
    @return: boolean indicating if a device of the given type was found or not
472

473
    """
474
    if self.children:
475
      for child in self.children:
476
        if child.IsBasedOnDiskType(dev_type):
477
          return True
478
    return self.dev_type == dev_type
479

    
480
  def GetNodes(self, node):
481
    """This function returns the nodes this device lives on.
482

483
    Given the node on which the parent of the device lives on (or, in
484
    case of a top-level device, the primary node of the devices'
485
    instance), this function will return a list of nodes on which this
486
    devices needs to (or can) be assembled.
487

488
    """
489
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
490
                         constants.LD_BLOCKDEV]:
491
      result = [node]
492
    elif self.dev_type in constants.LDS_DRBD:
493
      result = [self.logical_id[0], self.logical_id[1]]
494
      if node not in result:
495
        raise errors.ConfigurationError("DRBD device passed unknown node")
496
    else:
497
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
498
    return result
499

    
500
  def ComputeNodeTree(self, parent_node):
501
    """Compute the node/disk tree for this disk and its children.
502

503
    This method, given the node on which the parent disk lives, will
504
    return the list of all (node, disk) pairs which describe the disk
505
    tree in the most compact way. For example, a drbd/lvm stack
506
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
507
    which represents all the top-level devices on the nodes.
508

509
    """
510
    my_nodes = self.GetNodes(parent_node)
511
    result = [(node, self) for node in my_nodes]
512
    if not self.children:
513
      # leaf device
514
      return result
515
    for node in my_nodes:
516
      for child in self.children:
517
        child_result = child.ComputeNodeTree(node)
518
        if len(child_result) == 1:
519
          # child (and all its descendants) is simple, doesn't split
520
          # over multiple hosts, so we don't need to describe it, our
521
          # own entry for this node describes it completely
522
          continue
523
        else:
524
          # check if child nodes differ from my nodes; note that
525
          # subdisk can differ from the child itself, and be instead
526
          # one of its descendants
527
          for subnode, subdisk in child_result:
528
            if subnode not in my_nodes:
529
              result.append((subnode, subdisk))
530
            # otherwise child is under our own node, so we ignore this
531
            # entry (but probably the other results in the list will
532
            # be different)
533
    return result
534

    
535
  def ComputeGrowth(self, amount):
536
    """Compute the per-VG growth requirements.
537

538
    This only works for VG-based disks.
539

540
    @type amount: integer
541
    @param amount: the desired increase in (user-visible) disk space
542
    @rtype: dict
543
    @return: a dictionary of volume-groups and the required size
544

545
    """
546
    if self.dev_type == constants.LD_LV:
547
      return {self.logical_id[0]: amount}
548
    elif self.dev_type == constants.LD_DRBD8:
549
      if self.children:
550
        return self.children[0].ComputeGrowth(amount)
551
      else:
552
        return {}
553
    else:
554
      # Other disk types do not require VG space
555
      return {}
556

    
557
  def RecordGrow(self, amount):
558
    """Update the size of this disk after growth.
559

560
    This method recurses over the disks's children and updates their
561
    size correspondigly. The method needs to be kept in sync with the
562
    actual algorithms from bdev.
563

564
    """
565
    if self.dev_type in (constants.LD_LV, constants.LD_FILE):
566
      self.size += amount
567
    elif self.dev_type == constants.LD_DRBD8:
568
      if self.children:
569
        self.children[0].RecordGrow(amount)
570
      self.size += amount
571
    else:
572
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
573
                                   " disk type %s" % self.dev_type)
574

    
575
  def UnsetSize(self):
576
    """Sets recursively the size to zero for the disk and its children.
577

578
    """
579
    if self.children:
580
      for child in self.children:
581
        child.UnsetSize()
582
    self.size = 0
583

    
584
  def SetPhysicalID(self, target_node, nodes_ip):
585
    """Convert the logical ID to the physical ID.
586

587
    This is used only for drbd, which needs ip/port configuration.
588

589
    The routine descends down and updates its children also, because
590
    this helps when the only the top device is passed to the remote
591
    node.
592

593
    Arguments:
594
      - target_node: the node we wish to configure for
595
      - nodes_ip: a mapping of node name to ip
596

597
    The target_node must exist in in nodes_ip, and must be one of the
598
    nodes in the logical ID for each of the DRBD devices encountered
599
    in the disk tree.
600

601
    """
602
    if self.children:
603
      for child in self.children:
604
        child.SetPhysicalID(target_node, nodes_ip)
605

    
606
    if self.logical_id is None and self.physical_id is not None:
607
      return
608
    if self.dev_type in constants.LDS_DRBD:
609
      pnode, snode, port, pminor, sminor, secret = self.logical_id
610
      if target_node not in (pnode, snode):
611
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
612
                                        target_node)
613
      pnode_ip = nodes_ip.get(pnode, None)
614
      snode_ip = nodes_ip.get(snode, None)
615
      if pnode_ip is None or snode_ip is None:
616
        raise errors.ConfigurationError("Can't find primary or secondary node"
617
                                        " for %s" % str(self))
618
      p_data = (pnode_ip, port)
619
      s_data = (snode_ip, port)
620
      if pnode == target_node:
621
        self.physical_id = p_data + s_data + (pminor, secret)
622
      else: # it must be secondary, we tested above
623
        self.physical_id = s_data + p_data + (sminor, secret)
624
    else:
625
      self.physical_id = self.logical_id
626
    return
627

    
628
  def ToDict(self):
629
    """Disk-specific conversion to standard python types.
630

631
    This replaces the children lists of objects with lists of
632
    standard python types.
633

634
    """
635
    bo = super(Disk, self).ToDict()
636

    
637
    for attr in ("children",):
638
      alist = bo.get(attr, None)
639
      if alist:
640
        bo[attr] = self._ContainerToDicts(alist)
641
    return bo
642

    
643
  @classmethod
644
  def FromDict(cls, val):
645
    """Custom function for Disks
646

647
    """
648
    obj = super(Disk, cls).FromDict(val)
649
    if obj.children:
650
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
651
    if obj.logical_id and isinstance(obj.logical_id, list):
652
      obj.logical_id = tuple(obj.logical_id)
653
    if obj.physical_id and isinstance(obj.physical_id, list):
654
      obj.physical_id = tuple(obj.physical_id)
655
    if obj.dev_type in constants.LDS_DRBD:
656
      # we need a tuple of length six here
657
      if len(obj.logical_id) < 6:
658
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
659
    return obj
660

    
661
  def __str__(self):
662
    """Custom str() formatter for disks.
663

664
    """
665
    if self.dev_type == constants.LD_LV:
666
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
667
    elif self.dev_type in constants.LDS_DRBD:
668
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
669
      val = "<DRBD8("
670
      if self.physical_id is None:
671
        phy = "unconfigured"
672
      else:
673
        phy = ("configured as %s:%s %s:%s" %
674
               (self.physical_id[0], self.physical_id[1],
675
                self.physical_id[2], self.physical_id[3]))
676

    
677
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
678
              (node_a, minor_a, node_b, minor_b, port, phy))
679
      if self.children and self.children.count(None) == 0:
680
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
681
      else:
682
        val += "no local storage"
683
    else:
684
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
685
             (self.dev_type, self.logical_id, self.physical_id, self.children))
686
    if self.iv_name is None:
687
      val += ", not visible"
688
    else:
689
      val += ", visible as /dev/%s" % self.iv_name
690
    if isinstance(self.size, int):
691
      val += ", size=%dm)>" % self.size
692
    else:
693
      val += ", size='%s')>" % (self.size,)
694
    return val
695

    
696
  def Verify(self):
697
    """Checks that this disk is correctly configured.
698

699
    """
700
    all_errors = []
701
    if self.mode not in constants.DISK_ACCESS_SET:
702
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
703
    return all_errors
704

    
705
  def UpgradeConfig(self):
706
    """Fill defaults for missing configuration values.
707

708
    """
709
    if self.children:
710
      for child in self.children:
711
        child.UpgradeConfig()
712
    # add here config upgrade for this disk
713

    
714

    
715
class Instance(TaggableObject):
716
  """Config object representing an instance."""
717
  __slots__ = [
718
    "name",
719
    "primary_node",
720
    "os",
721
    "hypervisor",
722
    "hvparams",
723
    "beparams",
724
    "osparams",
725
    "admin_up",
726
    "nics",
727
    "disks",
728
    "disk_template",
729
    "network_port",
730
    "serial_no",
731
    ] + _TIMESTAMPS + _UUID
732

    
733
  def _ComputeSecondaryNodes(self):
734
    """Compute the list of secondary nodes.
735

736
    This is a simple wrapper over _ComputeAllNodes.
737

738
    """
739
    all_nodes = set(self._ComputeAllNodes())
740
    all_nodes.discard(self.primary_node)
741
    return tuple(all_nodes)
742

    
743
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
744
                             "List of secondary nodes")
745

    
746
  def _ComputeAllNodes(self):
747
    """Compute the list of all nodes.
748

749
    Since the data is already there (in the drbd disks), keeping it as
750
    a separate normal attribute is redundant and if not properly
751
    synchronised can cause problems. Thus it's better to compute it
752
    dynamically.
753

754
    """
755
    def _Helper(nodes, device):
756
      """Recursively computes nodes given a top device."""
757
      if device.dev_type in constants.LDS_DRBD:
758
        nodea, nodeb = device.logical_id[:2]
759
        nodes.add(nodea)
760
        nodes.add(nodeb)
761
      if device.children:
762
        for child in device.children:
763
          _Helper(nodes, child)
764

    
765
    all_nodes = set()
766
    all_nodes.add(self.primary_node)
767
    for device in self.disks:
768
      _Helper(all_nodes, device)
769
    return tuple(all_nodes)
770

    
771
  all_nodes = property(_ComputeAllNodes, None, None,
772
                       "List of all nodes of the instance")
773

    
774
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
775
    """Provide a mapping of nodes to LVs this instance owns.
776

777
    This function figures out what logical volumes should belong on
778
    which nodes, recursing through a device tree.
779

780
    @param lvmap: optional dictionary to receive the
781
        'node' : ['lv', ...] data.
782

783
    @return: None if lvmap arg is given, otherwise, a dictionary of
784
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
785
        volumeN is of the form "vg_name/lv_name", compatible with
786
        GetVolumeList()
787

788
    """
789
    if node == None:
790
      node = self.primary_node
791

    
792
    if lvmap is None:
793
      lvmap = {
794
        node: [],
795
        }
796
      ret = lvmap
797
    else:
798
      if not node in lvmap:
799
        lvmap[node] = []
800
      ret = None
801

    
802
    if not devs:
803
      devs = self.disks
804

    
805
    for dev in devs:
806
      if dev.dev_type == constants.LD_LV:
807
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
808

    
809
      elif dev.dev_type in constants.LDS_DRBD:
810
        if dev.children:
811
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
812
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
813

    
814
      elif dev.children:
815
        self.MapLVsByNode(lvmap, dev.children, node)
816

    
817
    return ret
818

    
819
  def FindDisk(self, idx):
820
    """Find a disk given having a specified index.
821

822
    This is just a wrapper that does validation of the index.
823

824
    @type idx: int
825
    @param idx: the disk index
826
    @rtype: L{Disk}
827
    @return: the corresponding disk
828
    @raise errors.OpPrereqError: when the given index is not valid
829

830
    """
831
    try:
832
      idx = int(idx)
833
      return self.disks[idx]
834
    except (TypeError, ValueError), err:
835
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
836
                                 errors.ECODE_INVAL)
837
    except IndexError:
838
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
839
                                 " 0 to %d" % (idx, len(self.disks) - 1),
840
                                 errors.ECODE_INVAL)
841

    
842
  def ToDict(self):
843
    """Instance-specific conversion to standard python types.
844

845
    This replaces the children lists of objects with lists of standard
846
    python types.
847

848
    """
849
    bo = super(Instance, self).ToDict()
850

    
851
    for attr in "nics", "disks":
852
      alist = bo.get(attr, None)
853
      if alist:
854
        nlist = self._ContainerToDicts(alist)
855
      else:
856
        nlist = []
857
      bo[attr] = nlist
858
    return bo
859

    
860
  @classmethod
861
  def FromDict(cls, val):
862
    """Custom function for instances.
863

864
    """
865
    obj = super(Instance, cls).FromDict(val)
866
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
867
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
868
    return obj
869

    
870
  def UpgradeConfig(self):
871
    """Fill defaults for missing configuration values.
872

873
    """
874
    for nic in self.nics:
875
      nic.UpgradeConfig()
876
    for disk in self.disks:
877
      disk.UpgradeConfig()
878
    if self.hvparams:
879
      for key in constants.HVC_GLOBALS:
880
        try:
881
          del self.hvparams[key]
882
        except KeyError:
883
          pass
884
    if self.osparams is None:
885
      self.osparams = {}
886

    
887

    
888
class OS(ConfigObject):
889
  """Config object representing an operating system.
890

891
  @type supported_parameters: list
892
  @ivar supported_parameters: a list of tuples, name and description,
893
      containing the supported parameters by this OS
894

895
  @type VARIANT_DELIM: string
896
  @cvar VARIANT_DELIM: the variant delimiter
897

898
  """
899
  __slots__ = [
900
    "name",
901
    "path",
902
    "api_versions",
903
    "create_script",
904
    "export_script",
905
    "import_script",
906
    "rename_script",
907
    "verify_script",
908
    "supported_variants",
909
    "supported_parameters",
910
    ]
911

    
912
  VARIANT_DELIM = "+"
913

    
914
  @classmethod
915
  def SplitNameVariant(cls, name):
916
    """Splits the name into the proper name and variant.
917

918
    @param name: the OS (unprocessed) name
919
    @rtype: list
920
    @return: a list of two elements; if the original name didn't
921
        contain a variant, it's returned as an empty string
922

923
    """
924
    nv = name.split(cls.VARIANT_DELIM, 1)
925
    if len(nv) == 1:
926
      nv.append("")
927
    return nv
928

    
929
  @classmethod
930
  def GetName(cls, name):
931
    """Returns the proper name of the os (without the variant).
932

933
    @param name: the OS (unprocessed) name
934

935
    """
936
    return cls.SplitNameVariant(name)[0]
937

    
938
  @classmethod
939
  def GetVariant(cls, name):
940
    """Returns the variant the os (without the base name).
941

942
    @param name: the OS (unprocessed) name
943

944
    """
945
    return cls.SplitNameVariant(name)[1]
946

    
947

    
948
class Node(TaggableObject):
949
  """Config object representing a node."""
950
  __slots__ = [
951
    "name",
952
    "primary_ip",
953
    "secondary_ip",
954
    "serial_no",
955
    "master_candidate",
956
    "offline",
957
    "drained",
958
    "group",
959
    "master_capable",
960
    "vm_capable",
961
    "ndparams",
962
    "powered",
963
    ] + _TIMESTAMPS + _UUID
964

    
965
  def UpgradeConfig(self):
966
    """Fill defaults for missing configuration values.
967

968
    """
969
    # pylint: disable=E0203
970
    # because these are "defined" via slots, not manually
971
    if self.master_capable is None:
972
      self.master_capable = True
973

    
974
    if self.vm_capable is None:
975
      self.vm_capable = True
976

    
977
    if self.ndparams is None:
978
      self.ndparams = {}
979

    
980
    if self.powered is None:
981
      self.powered = True
982

    
983

    
984
class NodeGroup(TaggableObject):
985
  """Config object representing a node group."""
986
  __slots__ = [
987
    "name",
988
    "members",
989
    "ndparams",
990
    "serial_no",
991
    "alloc_policy",
992
    ] + _TIMESTAMPS + _UUID
993

    
994
  def ToDict(self):
995
    """Custom function for nodegroup.
996

997
    This discards the members object, which gets recalculated and is only kept
998
    in memory.
999

1000
    """
1001
    mydict = super(NodeGroup, self).ToDict()
1002
    del mydict["members"]
1003
    return mydict
1004

    
1005
  @classmethod
1006
  def FromDict(cls, val):
1007
    """Custom function for nodegroup.
1008

1009
    The members slot is initialized to an empty list, upon deserialization.
1010

1011
    """
1012
    obj = super(NodeGroup, cls).FromDict(val)
1013
    obj.members = []
1014
    return obj
1015

    
1016
  def UpgradeConfig(self):
1017
    """Fill defaults for missing configuration values.
1018

1019
    """
1020
    if self.ndparams is None:
1021
      self.ndparams = {}
1022

    
1023
    if self.serial_no is None:
1024
      self.serial_no = 1
1025

    
1026
    if self.alloc_policy is None:
1027
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1028

    
1029
    # We only update mtime, and not ctime, since we would not be able to provide
1030
    # a correct value for creation time.
1031
    if self.mtime is None:
1032
      self.mtime = time.time()
1033

    
1034
  def FillND(self, node):
1035
    """Return filled out ndparams for L{objects.Node}
1036

1037
    @type node: L{objects.Node}
1038
    @param node: A Node object to fill
1039
    @return a copy of the node's ndparams with defaults filled
1040

1041
    """
1042
    return self.SimpleFillND(node.ndparams)
1043

    
1044
  def SimpleFillND(self, ndparams):
1045
    """Fill a given ndparams dict with defaults.
1046

1047
    @type ndparams: dict
1048
    @param ndparams: the dict to fill
1049
    @rtype: dict
1050
    @return: a copy of the passed in ndparams with missing keys filled
1051
        from the node group defaults
1052

1053
    """
1054
    return FillDict(self.ndparams, ndparams)
1055

    
1056

    
1057
class Cluster(TaggableObject):
1058
  """Config object representing the cluster."""
1059
  __slots__ = [
1060
    "serial_no",
1061
    "rsahostkeypub",
1062
    "highest_used_port",
1063
    "tcpudp_port_pool",
1064
    "mac_prefix",
1065
    "volume_group_name",
1066
    "reserved_lvs",
1067
    "drbd_usermode_helper",
1068
    "default_bridge",
1069
    "default_hypervisor",
1070
    "master_node",
1071
    "master_ip",
1072
    "master_netdev",
1073
    "master_netmask",
1074
    "cluster_name",
1075
    "file_storage_dir",
1076
    "shared_file_storage_dir",
1077
    "enabled_hypervisors",
1078
    "hvparams",
1079
    "os_hvp",
1080
    "beparams",
1081
    "osparams",
1082
    "nicparams",
1083
    "ndparams",
1084
    "candidate_pool_size",
1085
    "modify_etc_hosts",
1086
    "modify_ssh_setup",
1087
    "maintain_node_health",
1088
    "uid_pool",
1089
    "default_iallocator",
1090
    "hidden_os",
1091
    "blacklisted_os",
1092
    "primary_ip_family",
1093
    "prealloc_wipe_disks",
1094
    ] + _TIMESTAMPS + _UUID
1095

    
1096
  def UpgradeConfig(self):
1097
    """Fill defaults for missing configuration values.
1098

1099
    """
1100
    # pylint: disable=E0203
1101
    # because these are "defined" via slots, not manually
1102
    if self.hvparams is None:
1103
      self.hvparams = constants.HVC_DEFAULTS
1104
    else:
1105
      for hypervisor in self.hvparams:
1106
        self.hvparams[hypervisor] = FillDict(
1107
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1108

    
1109
    if self.os_hvp is None:
1110
      self.os_hvp = {}
1111

    
1112
    # osparams added before 2.2
1113
    if self.osparams is None:
1114
      self.osparams = {}
1115

    
1116
    if self.ndparams is None:
1117
      self.ndparams = constants.NDC_DEFAULTS
1118

    
1119
    self.beparams = UpgradeGroupedParams(self.beparams,
1120
                                         constants.BEC_DEFAULTS)
1121
    migrate_default_bridge = not self.nicparams
1122
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1123
                                          constants.NICC_DEFAULTS)
1124
    if migrate_default_bridge:
1125
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1126
        self.default_bridge
1127

    
1128
    if self.modify_etc_hosts is None:
1129
      self.modify_etc_hosts = True
1130

    
1131
    if self.modify_ssh_setup is None:
1132
      self.modify_ssh_setup = True
1133

    
1134
    # default_bridge is no longer used in 2.1. The slot is left there to
1135
    # support auto-upgrading. It can be removed once we decide to deprecate
1136
    # upgrading straight from 2.0.
1137
    if self.default_bridge is not None:
1138
      self.default_bridge = None
1139

    
1140
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1141
    # code can be removed once upgrading straight from 2.0 is deprecated.
1142
    if self.default_hypervisor is not None:
1143
      self.enabled_hypervisors = ([self.default_hypervisor] +
1144
        [hvname for hvname in self.enabled_hypervisors
1145
         if hvname != self.default_hypervisor])
1146
      self.default_hypervisor = None
1147

    
1148
    # maintain_node_health added after 2.1.1
1149
    if self.maintain_node_health is None:
1150
      self.maintain_node_health = False
1151

    
1152
    if self.uid_pool is None:
1153
      self.uid_pool = []
1154

    
1155
    if self.default_iallocator is None:
1156
      self.default_iallocator = ""
1157

    
1158
    # reserved_lvs added before 2.2
1159
    if self.reserved_lvs is None:
1160
      self.reserved_lvs = []
1161

    
1162
    # hidden and blacklisted operating systems added before 2.2.1
1163
    if self.hidden_os is None:
1164
      self.hidden_os = []
1165

    
1166
    if self.blacklisted_os is None:
1167
      self.blacklisted_os = []
1168

    
1169
    # primary_ip_family added before 2.3
1170
    if self.primary_ip_family is None:
1171
      self.primary_ip_family = AF_INET
1172

    
1173
    if self.prealloc_wipe_disks is None:
1174
      self.prealloc_wipe_disks = False
1175

    
1176
    # shared_file_storage_dir added before 2.5
1177
    if self.shared_file_storage_dir is None:
1178
      self.shared_file_storage_dir = ""
1179

    
1180
  def ToDict(self):
1181
    """Custom function for cluster.
1182

1183
    """
1184
    mydict = super(Cluster, self).ToDict()
1185
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1186
    return mydict
1187

    
1188
  @classmethod
1189
  def FromDict(cls, val):
1190
    """Custom function for cluster.
1191

1192
    """
1193
    obj = super(Cluster, cls).FromDict(val)
1194
    if not isinstance(obj.tcpudp_port_pool, set):
1195
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1196
    return obj
1197

    
1198
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1199
    """Get the default hypervisor parameters for the cluster.
1200

1201
    @param hypervisor: the hypervisor name
1202
    @param os_name: if specified, we'll also update the defaults for this OS
1203
    @param skip_keys: if passed, list of keys not to use
1204
    @return: the defaults dict
1205

1206
    """
1207
    if skip_keys is None:
1208
      skip_keys = []
1209

    
1210
    fill_stack = [self.hvparams.get(hypervisor, {})]
1211
    if os_name is not None:
1212
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1213
      fill_stack.append(os_hvp)
1214

    
1215
    ret_dict = {}
1216
    for o_dict in fill_stack:
1217
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1218

    
1219
    return ret_dict
1220

    
1221
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1222
    """Fill a given hvparams dict with cluster defaults.
1223

1224
    @type hv_name: string
1225
    @param hv_name: the hypervisor to use
1226
    @type os_name: string
1227
    @param os_name: the OS to use for overriding the hypervisor defaults
1228
    @type skip_globals: boolean
1229
    @param skip_globals: if True, the global hypervisor parameters will
1230
        not be filled
1231
    @rtype: dict
1232
    @return: a copy of the given hvparams with missing keys filled from
1233
        the cluster defaults
1234

1235
    """
1236
    if skip_globals:
1237
      skip_keys = constants.HVC_GLOBALS
1238
    else:
1239
      skip_keys = []
1240

    
1241
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1242
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1243

    
1244
  def FillHV(self, instance, skip_globals=False):
1245
    """Fill an instance's hvparams dict with cluster defaults.
1246

1247
    @type instance: L{objects.Instance}
1248
    @param instance: the instance parameter to fill
1249
    @type skip_globals: boolean
1250
    @param skip_globals: if True, the global hypervisor parameters will
1251
        not be filled
1252
    @rtype: dict
1253
    @return: a copy of the instance's hvparams with missing keys filled from
1254
        the cluster defaults
1255

1256
    """
1257
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1258
                             instance.hvparams, skip_globals)
1259

    
1260
  def SimpleFillBE(self, beparams):
1261
    """Fill a given beparams dict with cluster defaults.
1262

1263
    @type beparams: dict
1264
    @param beparams: the dict to fill
1265
    @rtype: dict
1266
    @return: a copy of the passed in beparams with missing keys filled
1267
        from the cluster defaults
1268

1269
    """
1270
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1271

    
1272
  def FillBE(self, instance):
1273
    """Fill an instance's beparams dict with cluster defaults.
1274

1275
    @type instance: L{objects.Instance}
1276
    @param instance: the instance parameter to fill
1277
    @rtype: dict
1278
    @return: a copy of the instance's beparams with missing keys filled from
1279
        the cluster defaults
1280

1281
    """
1282
    return self.SimpleFillBE(instance.beparams)
1283

    
1284
  def SimpleFillNIC(self, nicparams):
1285
    """Fill a given nicparams dict with cluster defaults.
1286

1287
    @type nicparams: dict
1288
    @param nicparams: the dict to fill
1289
    @rtype: dict
1290
    @return: a copy of the passed in nicparams with missing keys filled
1291
        from the cluster defaults
1292

1293
    """
1294
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1295

    
1296
  def SimpleFillOS(self, os_name, os_params):
1297
    """Fill an instance's osparams dict with cluster defaults.
1298

1299
    @type os_name: string
1300
    @param os_name: the OS name to use
1301
    @type os_params: dict
1302
    @param os_params: the dict to fill with default values
1303
    @rtype: dict
1304
    @return: a copy of the instance's osparams with missing keys filled from
1305
        the cluster defaults
1306

1307
    """
1308
    name_only = os_name.split("+", 1)[0]
1309
    # base OS
1310
    result = self.osparams.get(name_only, {})
1311
    # OS with variant
1312
    result = FillDict(result, self.osparams.get(os_name, {}))
1313
    # specified params
1314
    return FillDict(result, os_params)
1315

    
1316
  def FillND(self, node, nodegroup):
1317
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1318

1319
    @type node: L{objects.Node}
1320
    @param node: A Node object to fill
1321
    @type nodegroup: L{objects.NodeGroup}
1322
    @param nodegroup: A Node object to fill
1323
    @return a copy of the node's ndparams with defaults filled
1324

1325
    """
1326
    return self.SimpleFillND(nodegroup.FillND(node))
1327

    
1328
  def SimpleFillND(self, ndparams):
1329
    """Fill a given ndparams dict with defaults.
1330

1331
    @type ndparams: dict
1332
    @param ndparams: the dict to fill
1333
    @rtype: dict
1334
    @return: a copy of the passed in ndparams with missing keys filled
1335
        from the cluster defaults
1336

1337
    """
1338
    return FillDict(self.ndparams, ndparams)
1339

    
1340

    
1341
class BlockDevStatus(ConfigObject):
1342
  """Config object representing the status of a block device."""
1343
  __slots__ = [
1344
    "dev_path",
1345
    "major",
1346
    "minor",
1347
    "sync_percent",
1348
    "estimated_time",
1349
    "is_degraded",
1350
    "ldisk_status",
1351
    ]
1352

    
1353

    
1354
class ImportExportStatus(ConfigObject):
1355
  """Config object representing the status of an import or export."""
1356
  __slots__ = [
1357
    "recent_output",
1358
    "listen_port",
1359
    "connected",
1360
    "progress_mbytes",
1361
    "progress_throughput",
1362
    "progress_eta",
1363
    "progress_percent",
1364
    "exit_status",
1365
    "error_message",
1366
    ] + _TIMESTAMPS
1367

    
1368

    
1369
class ImportExportOptions(ConfigObject):
1370
  """Options for import/export daemon
1371

1372
  @ivar key_name: X509 key name (None for cluster certificate)
1373
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1374
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1375
  @ivar magic: Used to ensure the connection goes to the right disk
1376
  @ivar ipv6: Whether to use IPv6
1377
  @ivar connect_timeout: Number of seconds for establishing connection
1378

1379
  """
1380
  __slots__ = [
1381
    "key_name",
1382
    "ca_pem",
1383
    "compress",
1384
    "magic",
1385
    "ipv6",
1386
    "connect_timeout",
1387
    ]
1388

    
1389

    
1390
class ConfdRequest(ConfigObject):
1391
  """Object holding a confd request.
1392

1393
  @ivar protocol: confd protocol version
1394
  @ivar type: confd query type
1395
  @ivar query: query request
1396
  @ivar rsalt: requested reply salt
1397

1398
  """
1399
  __slots__ = [
1400
    "protocol",
1401
    "type",
1402
    "query",
1403
    "rsalt",
1404
    ]
1405

    
1406

    
1407
class ConfdReply(ConfigObject):
1408
  """Object holding a confd reply.
1409

1410
  @ivar protocol: confd protocol version
1411
  @ivar status: reply status code (ok, error)
1412
  @ivar answer: confd query reply
1413
  @ivar serial: configuration serial number
1414

1415
  """
1416
  __slots__ = [
1417
    "protocol",
1418
    "status",
1419
    "answer",
1420
    "serial",
1421
    ]
1422

    
1423

    
1424
class QueryFieldDefinition(ConfigObject):
1425
  """Object holding a query field definition.
1426

1427
  @ivar name: Field name
1428
  @ivar title: Human-readable title
1429
  @ivar kind: Field type
1430
  @ivar doc: Human-readable description
1431

1432
  """
1433
  __slots__ = [
1434
    "name",
1435
    "title",
1436
    "kind",
1437
    "doc",
1438
    ]
1439

    
1440

    
1441
class _QueryResponseBase(ConfigObject):
1442
  __slots__ = [
1443
    "fields",
1444
    ]
1445

    
1446
  def ToDict(self):
1447
    """Custom function for serializing.
1448

1449
    """
1450
    mydict = super(_QueryResponseBase, self).ToDict()
1451
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1452
    return mydict
1453

    
1454
  @classmethod
1455
  def FromDict(cls, val):
1456
    """Custom function for de-serializing.
1457

1458
    """
1459
    obj = super(_QueryResponseBase, cls).FromDict(val)
1460
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1461
    return obj
1462

    
1463

    
1464
class QueryRequest(ConfigObject):
1465
  """Object holding a query request.
1466

1467
  """
1468
  __slots__ = [
1469
    "what",
1470
    "fields",
1471
    "qfilter",
1472
    ]
1473

    
1474

    
1475
class QueryResponse(_QueryResponseBase):
1476
  """Object holding the response to a query.
1477

1478
  @ivar fields: List of L{QueryFieldDefinition} objects
1479
  @ivar data: Requested data
1480

1481
  """
1482
  __slots__ = [
1483
    "data",
1484
    ]
1485

    
1486

    
1487
class QueryFieldsRequest(ConfigObject):
1488
  """Object holding a request for querying available fields.
1489

1490
  """
1491
  __slots__ = [
1492
    "what",
1493
    "fields",
1494
    ]
1495

    
1496

    
1497
class QueryFieldsResponse(_QueryResponseBase):
1498
  """Object holding the response to a query for fields.
1499

1500
  @ivar fields: List of L{QueryFieldDefinition} objects
1501

1502
  """
1503
  __slots__ = [
1504
    ]
1505

    
1506

    
1507
class MigrationStatus(ConfigObject):
1508
  """Object holding the status of a migration.
1509

1510
  """
1511
  __slots__ = [
1512
    "status",
1513
    "transferred_ram",
1514
    "total_ram",
1515
    ]
1516

    
1517

    
1518
class InstanceConsole(ConfigObject):
1519
  """Object describing how to access the console of an instance.
1520

1521
  """
1522
  __slots__ = [
1523
    "instance",
1524
    "kind",
1525
    "message",
1526
    "host",
1527
    "port",
1528
    "user",
1529
    "command",
1530
    "display",
1531
    ]
1532

    
1533
  def Validate(self):
1534
    """Validates contents of this object.
1535

1536
    """
1537
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1538
    assert self.instance, "Missing instance name"
1539
    assert self.message or self.kind in [constants.CONS_SSH,
1540
                                         constants.CONS_SPICE,
1541
                                         constants.CONS_VNC]
1542
    assert self.host or self.kind == constants.CONS_MESSAGE
1543
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1544
                                      constants.CONS_SSH]
1545
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1546
                                      constants.CONS_SPICE,
1547
                                      constants.CONS_VNC]
1548
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1549
                                         constants.CONS_SPICE,
1550
                                         constants.CONS_VNC]
1551
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1552
                                         constants.CONS_SPICE,
1553
                                         constants.CONS_SSH]
1554
    return True
1555

    
1556

    
1557
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1558
  """Simple wrapper over ConfigParse that allows serialization.
1559

1560
  This class is basically ConfigParser.SafeConfigParser with two
1561
  additional methods that allow it to serialize/unserialize to/from a
1562
  buffer.
1563

1564
  """
1565
  def Dumps(self):
1566
    """Dump this instance and return the string representation."""
1567
    buf = StringIO()
1568
    self.write(buf)
1569
    return buf.getvalue()
1570

    
1571
  @classmethod
1572
  def Loads(cls, data):
1573
    """Load data from a string."""
1574
    buf = StringIO(data)
1575
    cfp = cls()
1576
    cfp.readfp(buf)
1577
    return cfp