Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ fffe93e7

History | View | Annotate | Download (44.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable-msg=E0203,W0201
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
import ConfigParser
37
import re
38
import copy
39
import time
40
from cStringIO import StringIO
41

    
42
from ganeti import errors
43
from ganeti import constants
44

    
45
from socket import AF_INET
46

    
47

    
48
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49
           "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
50

    
51
_TIMESTAMPS = ["ctime", "mtime"]
52
_UUID = ["uuid"]
53

    
54

    
55
def FillDict(defaults_dict, custom_dict, skip_keys=None):
56
  """Basic function to apply settings on top a default dict.
57

58
  @type defaults_dict: dict
59
  @param defaults_dict: dictionary holding the default values
60
  @type custom_dict: dict
61
  @param custom_dict: dictionary holding customized value
62
  @type skip_keys: list
63
  @param skip_keys: which keys not to fill
64
  @rtype: dict
65
  @return: dict with the 'full' values
66

67
  """
68
  ret_dict = copy.deepcopy(defaults_dict)
69
  ret_dict.update(custom_dict)
70
  if skip_keys:
71
    for k in skip_keys:
72
      try:
73
        del ret_dict[k]
74
      except KeyError:
75
        pass
76
  return ret_dict
77

    
78

    
79
def UpgradeGroupedParams(target, defaults):
80
  """Update all groups for the target parameter.
81

82
  @type target: dict of dicts
83
  @param target: {group: {parameter: value}}
84
  @type defaults: dict
85
  @param defaults: default parameter values
86

87
  """
88
  if target is None:
89
    target = {constants.PP_DEFAULT: defaults}
90
  else:
91
    for group in target:
92
      target[group] = FillDict(defaults, target[group])
93
  return target
94

    
95

    
96
class ConfigObject(object):
97
  """A generic config object.
98

99
  It has the following properties:
100

101
    - provides somewhat safe recursive unpickling and pickling for its classes
102
    - unset attributes which are defined in slots are always returned
103
      as None instead of raising an error
104

105
  Classes derived from this must always declare __slots__ (we use many
106
  config objects and the memory reduction is useful)
107

108
  """
109
  __slots__ = []
110

    
111
  def __init__(self, **kwargs):
112
    for k, v in kwargs.iteritems():
113
      setattr(self, k, v)
114

    
115
  def __getattr__(self, name):
116
    if name not in self._all_slots():
117
      raise AttributeError("Invalid object attribute %s.%s" %
118
                           (type(self).__name__, name))
119
    return None
120

    
121
  def __setstate__(self, state):
122
    slots = self._all_slots()
123
    for name in state:
124
      if name in slots:
125
        setattr(self, name, state[name])
126

    
127
  @classmethod
128
  def _all_slots(cls):
129
    """Compute the list of all declared slots for a class.
130

131
    """
132
    slots = []
133
    for parent in cls.__mro__:
134
      slots.extend(getattr(parent, "__slots__", []))
135
    return slots
136

    
137
  def ToDict(self):
138
    """Convert to a dict holding only standard python types.
139

140
    The generic routine just dumps all of this object's attributes in
141
    a dict. It does not work if the class has children who are
142
    ConfigObjects themselves (e.g. the nics list in an Instance), in
143
    which case the object should subclass the function in order to
144
    make sure all objects returned are only standard python types.
145

146
    """
147
    result = {}
148
    for name in self._all_slots():
149
      value = getattr(self, name, None)
150
      if value is not None:
151
        result[name] = value
152
    return result
153

    
154
  __getstate__ = ToDict
155

    
156
  @classmethod
157
  def FromDict(cls, val):
158
    """Create an object from a dictionary.
159

160
    This generic routine takes a dict, instantiates a new instance of
161
    the given class, and sets attributes based on the dict content.
162

163
    As for `ToDict`, this does not work if the class has children
164
    who are ConfigObjects themselves (e.g. the nics list in an
165
    Instance), in which case the object should subclass the function
166
    and alter the objects.
167

168
    """
169
    if not isinstance(val, dict):
170
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
171
                                      " expected dict, got %s" % type(val))
172
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
173
    obj = cls(**val_str) # pylint: disable-msg=W0142
174
    return obj
175

    
176
  @staticmethod
177
  def _ContainerToDicts(container):
178
    """Convert the elements of a container to standard python types.
179

180
    This method converts a container with elements derived from
181
    ConfigData to standard python types. If the container is a dict,
182
    we don't touch the keys, only the values.
183

184
    """
185
    if isinstance(container, dict):
186
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187
    elif isinstance(container, (list, tuple, set, frozenset)):
188
      ret = [elem.ToDict() for elem in container]
189
    else:
190
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
191
                      type(container))
192
    return ret
193

    
194
  @staticmethod
195
  def _ContainerFromDicts(source, c_type, e_type):
196
    """Convert a container from standard python types.
197

198
    This method converts a container with standard python types to
199
    ConfigData objects. If the container is a dict, we don't touch the
200
    keys, only the values.
201

202
    """
203
    if not isinstance(c_type, type):
204
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
205
                      " not a type" % type(c_type))
206
    if source is None:
207
      source = c_type()
208
    if c_type is dict:
209
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210
    elif c_type in (list, tuple, set, frozenset):
211
      ret = c_type([e_type.FromDict(elem) for elem in source])
212
    else:
213
      raise TypeError("Invalid container type %s passed to"
214
                      " _ContainerFromDicts" % c_type)
215
    return ret
216

    
217
  def Copy(self):
218
    """Makes a deep copy of the current object and its children.
219

220
    """
221
    dict_form = self.ToDict()
222
    clone_obj = self.__class__.FromDict(dict_form)
223
    return clone_obj
224

    
225
  def __repr__(self):
226
    """Implement __repr__ for ConfigObjects."""
227
    return repr(self.ToDict())
228

    
229
  def UpgradeConfig(self):
230
    """Fill defaults for missing configuration values.
231

232
    This method will be called at configuration load time, and its
233
    implementation will be object dependent.
234

235
    """
236
    pass
237

    
238

    
239
class TaggableObject(ConfigObject):
240
  """An generic class supporting tags.
241

242
  """
243
  __slots__ = ["tags"]
244
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
245

    
246
  @classmethod
247
  def ValidateTag(cls, tag):
248
    """Check if a tag is valid.
249

250
    If the tag is invalid, an errors.TagError will be raised. The
251
    function has no return value.
252

253
    """
254
    if not isinstance(tag, basestring):
255
      raise errors.TagError("Invalid tag type (not a string)")
256
    if len(tag) > constants.MAX_TAG_LEN:
257
      raise errors.TagError("Tag too long (>%d characters)" %
258
                            constants.MAX_TAG_LEN)
259
    if not tag:
260
      raise errors.TagError("Tags cannot be empty")
261
    if not cls.VALID_TAG_RE.match(tag):
262
      raise errors.TagError("Tag contains invalid characters")
263

    
264
  def GetTags(self):
265
    """Return the tags list.
266

267
    """
268
    tags = getattr(self, "tags", None)
269
    if tags is None:
270
      tags = self.tags = set()
271
    return tags
272

    
273
  def AddTag(self, tag):
274
    """Add a new tag.
275

276
    """
277
    self.ValidateTag(tag)
278
    tags = self.GetTags()
279
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280
      raise errors.TagError("Too many tags")
281
    self.GetTags().add(tag)
282

    
283
  def RemoveTag(self, tag):
284
    """Remove a tag.
285

286
    """
287
    self.ValidateTag(tag)
288
    tags = self.GetTags()
289
    try:
290
      tags.remove(tag)
291
    except KeyError:
292
      raise errors.TagError("Tag not found")
293

    
294
  def ToDict(self):
295
    """Taggable-object-specific conversion to standard python types.
296

297
    This replaces the tags set with a list.
298

299
    """
300
    bo = super(TaggableObject, self).ToDict()
301

    
302
    tags = bo.get("tags", None)
303
    if isinstance(tags, set):
304
      bo["tags"] = list(tags)
305
    return bo
306

    
307
  @classmethod
308
  def FromDict(cls, val):
309
    """Custom function for instances.
310

311
    """
312
    obj = super(TaggableObject, cls).FromDict(val)
313
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
314
      obj.tags = set(obj.tags)
315
    return obj
316

    
317

    
318
class ConfigData(ConfigObject):
319
  """Top-level config object."""
320
  __slots__ = [
321
    "version",
322
    "cluster",
323
    "nodes",
324
    "nodegroups",
325
    "instances",
326
    "networks",
327
    "serial_no",
328
    ] + _TIMESTAMPS
329

    
330
  def ToDict(self):
331
    """Custom function for top-level config data.
332

333
    This just replaces the list of instances, nodes and the cluster
334
    with standard python types.
335

336
    """
337
    mydict = super(ConfigData, self).ToDict()
338
    mydict["cluster"] = mydict["cluster"].ToDict()
339
    for key in "nodes", "instances", "nodegroups", "networks":
340
      mydict[key] = self._ContainerToDicts(mydict[key])
341

    
342
    return mydict
343

    
344
  @classmethod
345
  def FromDict(cls, val):
346
    """Custom function for top-level config data
347

348
    """
349
    obj = super(ConfigData, cls).FromDict(val)
350
    obj.cluster = Cluster.FromDict(obj.cluster)
351
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
352
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
353
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
354
    obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network)
355
    return obj
356

    
357
  def HasAnyDiskOfType(self, dev_type):
358
    """Check if in there is at disk of the given type in the configuration.
359

360
    @type dev_type: L{constants.LDS_BLOCK}
361
    @param dev_type: the type to look for
362
    @rtype: boolean
363
    @return: boolean indicating if a disk of the given type was found or not
364

365
    """
366
    for instance in self.instances.values():
367
      for disk in instance.disks:
368
        if disk.IsBasedOnDiskType(dev_type):
369
          return True
370
    return False
371

    
372
  def UpgradeConfig(self):
373
    """Fill defaults for missing configuration values.
374

375
    """
376
    self.cluster.UpgradeConfig()
377
    for node in self.nodes.values():
378
      node.UpgradeConfig()
379
    for instance in self.instances.values():
380
      instance.UpgradeConfig()
381
    if self.nodegroups is None:
382
      self.nodegroups = {}
383
    for nodegroup in self.nodegroups.values():
384
      nodegroup.UpgradeConfig()
385
    if self.cluster.drbd_usermode_helper is None:
386
      # To decide if we set an helper let's check if at least one instance has
387
      # a DRBD disk. This does not cover all the possible scenarios but it
388
      # gives a good approximation.
389
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
390
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
391
    if self.networks is None:
392
      self.networks = {}
393

    
394

    
395
class NIC(ConfigObject):
396
  """Config object representing a network card."""
397
  __slots__ = ["mac", "ip", "network", "nicparams"]
398

    
399
  @classmethod
400
  def CheckParameterSyntax(cls, nicparams):
401
    """Check the given parameters for validity.
402

403
    @type nicparams:  dict
404
    @param nicparams: dictionary with parameter names/value
405
    @raise errors.ConfigurationError: when a parameter is not valid
406

407
    """
408
    if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
409
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
410
      raise errors.ConfigurationError(err)
411

    
412
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
413
        not nicparams[constants.NIC_LINK]):
414
      err = "Missing bridged nic link"
415
      raise errors.ConfigurationError(err)
416

    
417

    
418
class Disk(ConfigObject):
419
  """Config object representing a block device."""
420
  __slots__ = ["dev_type", "logical_id", "physical_id",
421
               "children", "iv_name", "size", "mode"]
422

    
423
  def CreateOnSecondary(self):
424
    """Test if this device needs to be created on a secondary node."""
425
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426

    
427
  def AssembleOnSecondary(self):
428
    """Test if this device needs to be assembled on a secondary node."""
429
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
430

    
431
  def OpenOnSecondary(self):
432
    """Test if this device needs to be opened on a secondary node."""
433
    return self.dev_type in (constants.LD_LV,)
434

    
435
  def StaticDevPath(self):
436
    """Return the device path if this device type has a static one.
437

438
    Some devices (LVM for example) live always at the same /dev/ path,
439
    irrespective of their status. For such devices, we return this
440
    path, for others we return None.
441

442
    @warning: The path returned is not a normalized pathname; callers
443
        should check that it is a valid path.
444

445
    """
446
    if self.dev_type == constants.LD_LV:
447
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
448
    elif self.dev_type == constants.LD_BLOCKDEV:
449
      return self.logical_id[1]
450
    elif self.dev_type == constants.LD_RBD:
451
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
452
    return None
453

    
454
  def ChildrenNeeded(self):
455
    """Compute the needed number of children for activation.
456

457
    This method will return either -1 (all children) or a positive
458
    number denoting the minimum number of children needed for
459
    activation (only mirrored devices will usually return >=0).
460

461
    Currently, only DRBD8 supports diskless activation (therefore we
462
    return 0), for all other we keep the previous semantics and return
463
    -1.
464

465
    """
466
    if self.dev_type == constants.LD_DRBD8:
467
      return 0
468
    return -1
469

    
470
  def IsBasedOnDiskType(self, dev_type):
471
    """Check if the disk or its children are based on the given type.
472

473
    @type dev_type: L{constants.LDS_BLOCK}
474
    @param dev_type: the type to look for
475
    @rtype: boolean
476
    @return: boolean indicating if a device of the given type was found or not
477

478
    """
479
    if self.children:
480
      for child in self.children:
481
        if child.IsBasedOnDiskType(dev_type):
482
          return True
483
    return self.dev_type == dev_type
484

    
485
  def GetNodes(self, node):
486
    """This function returns the nodes this device lives on.
487

488
    Given the node on which the parent of the device lives on (or, in
489
    case of a top-level device, the primary node of the devices'
490
    instance), this function will return a list of nodes on which this
491
    devices needs to (or can) be assembled.
492

493
    """
494
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
495
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
496
      result = [node]
497
    elif self.dev_type in constants.LDS_DRBD:
498
      result = [self.logical_id[0], self.logical_id[1]]
499
      if node not in result:
500
        raise errors.ConfigurationError("DRBD device passed unknown node")
501
    else:
502
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
503
    return result
504

    
505
  def ComputeNodeTree(self, parent_node):
506
    """Compute the node/disk tree for this disk and its children.
507

508
    This method, given the node on which the parent disk lives, will
509
    return the list of all (node, disk) pairs which describe the disk
510
    tree in the most compact way. For example, a drbd/lvm stack
511
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
512
    which represents all the top-level devices on the nodes.
513

514
    """
515
    my_nodes = self.GetNodes(parent_node)
516
    result = [(node, self) for node in my_nodes]
517
    if not self.children:
518
      # leaf device
519
      return result
520
    for node in my_nodes:
521
      for child in self.children:
522
        child_result = child.ComputeNodeTree(node)
523
        if len(child_result) == 1:
524
          # child (and all its descendants) is simple, doesn't split
525
          # over multiple hosts, so we don't need to describe it, our
526
          # own entry for this node describes it completely
527
          continue
528
        else:
529
          # check if child nodes differ from my nodes; note that
530
          # subdisk can differ from the child itself, and be instead
531
          # one of its descendants
532
          for subnode, subdisk in child_result:
533
            if subnode not in my_nodes:
534
              result.append((subnode, subdisk))
535
            # otherwise child is under our own node, so we ignore this
536
            # entry (but probably the other results in the list will
537
            # be different)
538
    return result
539

    
540
  def ComputeGrowth(self, amount):
541
    """Compute the per-VG growth requirements.
542

543
    This only works for VG-based disks.
544

545
    @type amount: integer
546
    @param amount: the desired increase in (user-visible) disk space
547
    @rtype: dict
548
    @return: a dictionary of volume-groups and the required size
549

550
    """
551
    if self.dev_type == constants.LD_LV:
552
      return {self.logical_id[0]: amount}
553
    elif self.dev_type == constants.LD_DRBD8:
554
      if self.children:
555
        return self.children[0].ComputeGrowth(amount)
556
      else:
557
        return {}
558
    else:
559
      # Other disk types do not require VG space
560
      return {}
561

    
562
  def RecordGrow(self, amount):
563
    """Update the size of this disk after growth.
564

565
    This method recurses over the disks's children and updates their
566
    size correspondigly. The method needs to be kept in sync with the
567
    actual algorithms from bdev.
568

569
    """
570
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
571
                         constants.LD_RBD):
572
      self.size += amount
573
    elif self.dev_type == constants.LD_DRBD8:
574
      if self.children:
575
        self.children[0].RecordGrow(amount)
576
      self.size += amount
577
    else:
578
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
579
                                   " disk type %s" % self.dev_type)
580

    
581
  def UnsetSize(self):
582
    """Sets recursively the size to zero for the disk and its children.
583

584
    """
585
    if self.children:
586
      for child in self.children:
587
        child.UnsetSize()
588
    self.size = 0
589

    
590
  def SetPhysicalID(self, target_node, nodes_ip):
591
    """Convert the logical ID to the physical ID.
592

593
    This is used only for drbd, which needs ip/port configuration.
594

595
    The routine descends down and updates its children also, because
596
    this helps when the only the top device is passed to the remote
597
    node.
598

599
    Arguments:
600
      - target_node: the node we wish to configure for
601
      - nodes_ip: a mapping of node name to ip
602

603
    The target_node must exist in in nodes_ip, and must be one of the
604
    nodes in the logical ID for each of the DRBD devices encountered
605
    in the disk tree.
606

607
    """
608
    if self.children:
609
      for child in self.children:
610
        child.SetPhysicalID(target_node, nodes_ip)
611

    
612
    if self.logical_id is None and self.physical_id is not None:
613
      return
614
    if self.dev_type in constants.LDS_DRBD:
615
      pnode, snode, port, pminor, sminor, secret = self.logical_id
616
      if target_node not in (pnode, snode):
617
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
618
                                        target_node)
619
      pnode_ip = nodes_ip.get(pnode, None)
620
      snode_ip = nodes_ip.get(snode, None)
621
      if pnode_ip is None or snode_ip is None:
622
        raise errors.ConfigurationError("Can't find primary or secondary node"
623
                                        " for %s" % str(self))
624
      p_data = (pnode_ip, port)
625
      s_data = (snode_ip, port)
626
      if pnode == target_node:
627
        self.physical_id = p_data + s_data + (pminor, secret)
628
      else: # it must be secondary, we tested above
629
        self.physical_id = s_data + p_data + (sminor, secret)
630
    else:
631
      self.physical_id = self.logical_id
632
    return
633

    
634
  def ToDict(self):
635
    """Disk-specific conversion to standard python types.
636

637
    This replaces the children lists of objects with lists of
638
    standard python types.
639

640
    """
641
    bo = super(Disk, self).ToDict()
642

    
643
    for attr in ("children",):
644
      alist = bo.get(attr, None)
645
      if alist:
646
        bo[attr] = self._ContainerToDicts(alist)
647
    return bo
648

    
649
  @classmethod
650
  def FromDict(cls, val):
651
    """Custom function for Disks
652

653
    """
654
    obj = super(Disk, cls).FromDict(val)
655
    if obj.children:
656
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
657
    if obj.logical_id and isinstance(obj.logical_id, list):
658
      obj.logical_id = tuple(obj.logical_id)
659
    if obj.physical_id and isinstance(obj.physical_id, list):
660
      obj.physical_id = tuple(obj.physical_id)
661
    if obj.dev_type in constants.LDS_DRBD:
662
      # we need a tuple of length six here
663
      if len(obj.logical_id) < 6:
664
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
665
    return obj
666

    
667
  def __str__(self):
668
    """Custom str() formatter for disks.
669

670
    """
671
    if self.dev_type == constants.LD_LV:
672
      val =  "<LogicalVolume(/dev/%s/%s" % self.logical_id
673
    elif self.dev_type in constants.LDS_DRBD:
674
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
675
      val = "<DRBD8("
676
      if self.physical_id is None:
677
        phy = "unconfigured"
678
      else:
679
        phy = ("configured as %s:%s %s:%s" %
680
               (self.physical_id[0], self.physical_id[1],
681
                self.physical_id[2], self.physical_id[3]))
682

    
683
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
684
              (node_a, minor_a, node_b, minor_b, port, phy))
685
      if self.children and self.children.count(None) == 0:
686
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
687
      else:
688
        val += "no local storage"
689
    else:
690
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
691
             (self.dev_type, self.logical_id, self.physical_id, self.children))
692
    if self.iv_name is None:
693
      val += ", not visible"
694
    else:
695
      val += ", visible as /dev/%s" % self.iv_name
696
    if isinstance(self.size, int):
697
      val += ", size=%dm)>" % self.size
698
    else:
699
      val += ", size='%s')>" % (self.size,)
700
    return val
701

    
702
  def Verify(self):
703
    """Checks that this disk is correctly configured.
704

705
    """
706
    all_errors = []
707
    if self.mode not in constants.DISK_ACCESS_SET:
708
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
709
    return all_errors
710

    
711
  def UpgradeConfig(self):
712
    """Fill defaults for missing configuration values.
713

714
    """
715
    if self.children:
716
      for child in self.children:
717
        child.UpgradeConfig()
718
    # add here config upgrade for this disk
719

    
720

    
721
class Instance(TaggableObject):
722
  """Config object representing an instance."""
723
  __slots__ = [
724
    "name",
725
    "primary_node",
726
    "os",
727
    "hypervisor",
728
    "hvparams",
729
    "beparams",
730
    "osparams",
731
    "admin_up",
732
    "nics",
733
    "disks",
734
    "disk_template",
735
    "network_port",
736
    "serial_no",
737
    ] + _TIMESTAMPS + _UUID
738

    
739
  def _ComputeSecondaryNodes(self):
740
    """Compute the list of secondary nodes.
741

742
    This is a simple wrapper over _ComputeAllNodes.
743

744
    """
745
    all_nodes = set(self._ComputeAllNodes())
746
    all_nodes.discard(self.primary_node)
747
    return tuple(all_nodes)
748

    
749
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
750
                             "List of secondary nodes")
751

    
752
  def _ComputeAllNodes(self):
753
    """Compute the list of all nodes.
754

755
    Since the data is already there (in the drbd disks), keeping it as
756
    a separate normal attribute is redundant and if not properly
757
    synchronised can cause problems. Thus it's better to compute it
758
    dynamically.
759

760
    """
761
    def _Helper(nodes, device):
762
      """Recursively computes nodes given a top device."""
763
      if device.dev_type in constants.LDS_DRBD:
764
        nodea, nodeb = device.logical_id[:2]
765
        nodes.add(nodea)
766
        nodes.add(nodeb)
767
      if device.children:
768
        for child in device.children:
769
          _Helper(nodes, child)
770

    
771
    all_nodes = set()
772
    all_nodes.add(self.primary_node)
773
    for device in self.disks:
774
      _Helper(all_nodes, device)
775
    return tuple(all_nodes)
776

    
777
  all_nodes = property(_ComputeAllNodes, None, None,
778
                       "List of all nodes of the instance")
779

    
780
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
781
    """Provide a mapping of nodes to LVs this instance owns.
782

783
    This function figures out what logical volumes should belong on
784
    which nodes, recursing through a device tree.
785

786
    @param lvmap: optional dictionary to receive the
787
        'node' : ['lv', ...] data.
788

789
    @return: None if lvmap arg is given, otherwise, a dictionary of
790
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
791
        volumeN is of the form "vg_name/lv_name", compatible with
792
        GetVolumeList()
793

794
    """
795
    if node == None:
796
      node = self.primary_node
797

    
798
    if lvmap is None:
799
      lvmap = { node : [] }
800
      ret = lvmap
801
    else:
802
      if not node in lvmap:
803
        lvmap[node] = []
804
      ret = None
805

    
806
    if not devs:
807
      devs = self.disks
808

    
809
    for dev in devs:
810
      if dev.dev_type == constants.LD_LV:
811
        lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1])
812

    
813
      elif dev.dev_type in constants.LDS_DRBD:
814
        if dev.children:
815
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
816
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
817

    
818
      elif dev.children:
819
        self.MapLVsByNode(lvmap, dev.children, node)
820

    
821
    return ret
822

    
823
  def FindDisk(self, idx):
824
    """Find a disk given having a specified index.
825

826
    This is just a wrapper that does validation of the index.
827

828
    @type idx: int
829
    @param idx: the disk index
830
    @rtype: L{Disk}
831
    @return: the corresponding disk
832
    @raise errors.OpPrereqError: when the given index is not valid
833

834
    """
835
    try:
836
      idx = int(idx)
837
      return self.disks[idx]
838
    except (TypeError, ValueError), err:
839
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
840
                                 errors.ECODE_INVAL)
841
    except IndexError:
842
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
843
                                 " 0 to %d" % (idx, len(self.disks) - 1),
844
                                 errors.ECODE_INVAL)
845

    
846
  def ToDict(self):
847
    """Instance-specific conversion to standard python types.
848

849
    This replaces the children lists of objects with lists of standard
850
    python types.
851

852
    """
853
    bo = super(Instance, self).ToDict()
854

    
855
    for attr in "nics", "disks":
856
      alist = bo.get(attr, None)
857
      if alist:
858
        nlist = self._ContainerToDicts(alist)
859
      else:
860
        nlist = []
861
      bo[attr] = nlist
862
    return bo
863

    
864
  @classmethod
865
  def FromDict(cls, val):
866
    """Custom function for instances.
867

868
    """
869
    obj = super(Instance, cls).FromDict(val)
870
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
871
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
872
    return obj
873

    
874
  def UpgradeConfig(self):
875
    """Fill defaults for missing configuration values.
876

877
    """
878
    for nic in self.nics:
879
      nic.UpgradeConfig()
880
    for disk in self.disks:
881
      disk.UpgradeConfig()
882
    if self.hvparams:
883
      for key in constants.HVC_GLOBALS:
884
        try:
885
          del self.hvparams[key]
886
        except KeyError:
887
          pass
888
    if self.osparams is None:
889
      self.osparams = {}
890

    
891

    
892
class OS(ConfigObject):
893
  """Config object representing an operating system.
894

895
  @type supported_parameters: list
896
  @ivar supported_parameters: a list of tuples, name and description,
897
      containing the supported parameters by this OS
898

899
  @type VARIANT_DELIM: string
900
  @cvar VARIANT_DELIM: the variant delimiter
901

902
  """
903
  __slots__ = [
904
    "name",
905
    "path",
906
    "api_versions",
907
    "create_script",
908
    "export_script",
909
    "import_script",
910
    "rename_script",
911
    "verify_script",
912
    "supported_variants",
913
    "supported_parameters",
914
    ]
915

    
916
  VARIANT_DELIM = "+"
917

    
918
  @classmethod
919
  def SplitNameVariant(cls, name):
920
    """Splits the name into the proper name and variant.
921

922
    @param name: the OS (unprocessed) name
923
    @rtype: list
924
    @return: a list of two elements; if the original name didn't
925
        contain a variant, it's returned as an empty string
926

927
    """
928
    nv = name.split(cls.VARIANT_DELIM, 1)
929
    if len(nv) == 1:
930
      nv.append("")
931
    return nv
932

    
933
  @classmethod
934
  def GetName(cls, name):
935
    """Returns the proper name of the os (without the variant).
936

937
    @param name: the OS (unprocessed) name
938

939
    """
940
    return cls.SplitNameVariant(name)[0]
941

    
942
  @classmethod
943
  def GetVariant(cls, name):
944
    """Returns the variant the os (without the base name).
945

946
    @param name: the OS (unprocessed) name
947

948
    """
949
    return cls.SplitNameVariant(name)[1]
950

    
951

    
952
class Node(TaggableObject):
953
  """Config object representing a node."""
954
  __slots__ = [
955
    "name",
956
    "primary_ip",
957
    "secondary_ip",
958
    "serial_no",
959
    "master_candidate",
960
    "offline",
961
    "drained",
962
    "group",
963
    "master_capable",
964
    "vm_capable",
965
    "ndparams",
966
    "powered",
967
    ] + _TIMESTAMPS + _UUID
968

    
969
  def UpgradeConfig(self):
970
    """Fill defaults for missing configuration values.
971

972
    """
973
    # pylint: disable-msg=E0203
974
    # because these are "defined" via slots, not manually
975
    if self.master_capable is None:
976
      self.master_capable = True
977

    
978
    if self.vm_capable is None:
979
      self.vm_capable = True
980

    
981
    if self.ndparams is None:
982
      self.ndparams = {}
983

    
984
    if self.powered is None:
985
      self.powered = True
986

    
987

    
988
class NodeGroup(ConfigObject):
989
  """Config object representing a node group."""
990
  __slots__ = [
991
    "name",
992
    "members",
993
    "ndparams",
994
    "serial_no",
995
    "alloc_policy",
996
    "networks",
997
    ] + _TIMESTAMPS + _UUID
998

    
999
  def ToDict(self):
1000
    """Custom function for nodegroup.
1001

1002
    This discards the members object, which gets recalculated and is only kept
1003
    in memory.
1004

1005
    """
1006
    mydict = super(NodeGroup, self).ToDict()
1007
    del mydict["members"]
1008
    return mydict
1009

    
1010
  @classmethod
1011
  def FromDict(cls, val):
1012
    """Custom function for nodegroup.
1013

1014
    The members slot is initialized to an empty list, upon deserialization.
1015

1016
    """
1017
    obj = super(NodeGroup, cls).FromDict(val)
1018
    obj.members = []
1019
    return obj
1020

    
1021
  def UpgradeConfig(self):
1022
    """Fill defaults for missing configuration values.
1023

1024
    """
1025
    if self.ndparams is None:
1026
      self.ndparams = {}
1027

    
1028
    if self.serial_no is None:
1029
      self.serial_no = 1
1030

    
1031
    if self.alloc_policy is None:
1032
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1033

    
1034
    # We only update mtime, and not ctime, since we would not be able to provide
1035
    # a correct value for creation time.
1036
    if self.mtime is None:
1037
      self.mtime = time.time()
1038

    
1039
    if self.networks is None:
1040
      self.networks = {}
1041

    
1042
  def FillND(self, node):
1043
    """Return filled out ndparams for L{object.Node}
1044

1045
    @type node: L{objects.Node}
1046
    @param node: A Node object to fill
1047
    @return a copy of the node's ndparams with defaults filled
1048

1049
    """
1050
    return self.SimpleFillND(node.ndparams)
1051

    
1052
  def SimpleFillND(self, ndparams):
1053
    """Fill a given ndparams dict with defaults.
1054

1055
    @type ndparams: dict
1056
    @param ndparams: the dict to fill
1057
    @rtype: dict
1058
    @return: a copy of the passed in ndparams with missing keys filled
1059
        from the node group defaults
1060

1061
    """
1062
    return FillDict(self.ndparams, ndparams)
1063

    
1064

    
1065
class Cluster(TaggableObject):
1066
  """Config object representing the cluster."""
1067
  __slots__ = [
1068
    "serial_no",
1069
    "rsahostkeypub",
1070
    "highest_used_port",
1071
    "tcpudp_port_pool",
1072
    "mac_prefix",
1073
    "volume_group_name",
1074
    "reserved_lvs",
1075
    "drbd_usermode_helper",
1076
    "default_bridge",
1077
    "default_hypervisor",
1078
    "master_node",
1079
    "master_ip",
1080
    "master_netdev",
1081
    "cluster_name",
1082
    "file_storage_dir",
1083
    "shared_file_storage_dir",
1084
    "enabled_hypervisors",
1085
    "hvparams",
1086
    "os_hvp",
1087
    "beparams",
1088
    "osparams",
1089
    "nicparams",
1090
    "ndparams",
1091
    "candidate_pool_size",
1092
    "modify_etc_hosts",
1093
    "modify_ssh_setup",
1094
    "maintain_node_health",
1095
    "uid_pool",
1096
    "default_iallocator",
1097
    "hidden_os",
1098
    "blacklisted_os",
1099
    "primary_ip_family",
1100
    "prealloc_wipe_disks",
1101
    ] + _TIMESTAMPS + _UUID
1102

    
1103
  def UpgradeConfig(self):
1104
    """Fill defaults for missing configuration values.
1105

1106
    """
1107
    # pylint: disable-msg=E0203
1108
    # because these are "defined" via slots, not manually
1109
    if self.hvparams is None:
1110
      self.hvparams = constants.HVC_DEFAULTS
1111
    else:
1112
      for hypervisor in self.hvparams:
1113
        self.hvparams[hypervisor] = FillDict(
1114
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1115

    
1116
    if self.os_hvp is None:
1117
      self.os_hvp = {}
1118

    
1119
    # osparams added before 2.2
1120
    if self.osparams is None:
1121
      self.osparams = {}
1122

    
1123
    if self.ndparams is None:
1124
      self.ndparams = constants.NDC_DEFAULTS
1125

    
1126
    self.beparams = UpgradeGroupedParams(self.beparams,
1127
                                         constants.BEC_DEFAULTS)
1128
    migrate_default_bridge = not self.nicparams
1129
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1130
                                          constants.NICC_DEFAULTS)
1131
    if migrate_default_bridge:
1132
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1133
        self.default_bridge
1134

    
1135
    if self.modify_etc_hosts is None:
1136
      self.modify_etc_hosts = True
1137

    
1138
    if self.modify_ssh_setup is None:
1139
      self.modify_ssh_setup = True
1140

    
1141
    # default_bridge is no longer used in 2.1. The slot is left there to
1142
    # support auto-upgrading. It can be removed once we decide to deprecate
1143
    # upgrading straight from 2.0.
1144
    if self.default_bridge is not None:
1145
      self.default_bridge = None
1146

    
1147
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1148
    # code can be removed once upgrading straight from 2.0 is deprecated.
1149
    if self.default_hypervisor is not None:
1150
      self.enabled_hypervisors = ([self.default_hypervisor] +
1151
        [hvname for hvname in self.enabled_hypervisors
1152
         if hvname != self.default_hypervisor])
1153
      self.default_hypervisor = None
1154

    
1155
    # maintain_node_health added after 2.1.1
1156
    if self.maintain_node_health is None:
1157
      self.maintain_node_health = False
1158

    
1159
    if self.uid_pool is None:
1160
      self.uid_pool = []
1161

    
1162
    if self.default_iallocator is None:
1163
      self.default_iallocator = ""
1164

    
1165
    # reserved_lvs added before 2.2
1166
    if self.reserved_lvs is None:
1167
      self.reserved_lvs = []
1168

    
1169
    # hidden and blacklisted operating systems added before 2.2.1
1170
    if self.hidden_os is None:
1171
      self.hidden_os = []
1172

    
1173
    if self.blacklisted_os is None:
1174
      self.blacklisted_os = []
1175

    
1176
    # primary_ip_family added before 2.3
1177
    if self.primary_ip_family is None:
1178
      self.primary_ip_family = AF_INET
1179

    
1180
    if self.prealloc_wipe_disks is None:
1181
      self.prealloc_wipe_disks = False
1182

    
1183
    # shared_file_storage_dir added before 2.5
1184
    if self.shared_file_storage_dir is None:
1185
      self.shared_file_storage_dir = ""
1186

    
1187
  def ToDict(self):
1188
    """Custom function for cluster.
1189

1190
    """
1191
    mydict = super(Cluster, self).ToDict()
1192
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1193
    return mydict
1194

    
1195
  @classmethod
1196
  def FromDict(cls, val):
1197
    """Custom function for cluster.
1198

1199
    """
1200
    obj = super(Cluster, cls).FromDict(val)
1201
    if not isinstance(obj.tcpudp_port_pool, set):
1202
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1203
    return obj
1204

    
1205
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1206
    """Get the default hypervisor parameters for the cluster.
1207

1208
    @param hypervisor: the hypervisor name
1209
    @param os_name: if specified, we'll also update the defaults for this OS
1210
    @param skip_keys: if passed, list of keys not to use
1211
    @return: the defaults dict
1212

1213
    """
1214
    if skip_keys is None:
1215
      skip_keys = []
1216

    
1217
    fill_stack = [self.hvparams.get(hypervisor, {})]
1218
    if os_name is not None:
1219
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1220
      fill_stack.append(os_hvp)
1221

    
1222
    ret_dict = {}
1223
    for o_dict in fill_stack:
1224
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1225

    
1226
    return ret_dict
1227

    
1228
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1229
    """Fill a given hvparams dict with cluster defaults.
1230

1231
    @type hv_name: string
1232
    @param hv_name: the hypervisor to use
1233
    @type os_name: string
1234
    @param os_name: the OS to use for overriding the hypervisor defaults
1235
    @type skip_globals: boolean
1236
    @param skip_globals: if True, the global hypervisor parameters will
1237
        not be filled
1238
    @rtype: dict
1239
    @return: a copy of the given hvparams with missing keys filled from
1240
        the cluster defaults
1241

1242
    """
1243
    if skip_globals:
1244
      skip_keys = constants.HVC_GLOBALS
1245
    else:
1246
      skip_keys = []
1247

    
1248
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1249
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1250

    
1251
  def FillHV(self, instance, skip_globals=False):
1252
    """Fill an instance's hvparams dict with cluster defaults.
1253

1254
    @type instance: L{objects.Instance}
1255
    @param instance: the instance parameter to fill
1256
    @type skip_globals: boolean
1257
    @param skip_globals: if True, the global hypervisor parameters will
1258
        not be filled
1259
    @rtype: dict
1260
    @return: a copy of the instance's hvparams with missing keys filled from
1261
        the cluster defaults
1262

1263
    """
1264
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1265
                             instance.hvparams, skip_globals)
1266

    
1267
  def SimpleFillBE(self, beparams):
1268
    """Fill a given beparams dict with cluster defaults.
1269

1270
    @type beparams: dict
1271
    @param beparams: the dict to fill
1272
    @rtype: dict
1273
    @return: a copy of the passed in beparams with missing keys filled
1274
        from the cluster defaults
1275

1276
    """
1277
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1278

    
1279
  def FillBE(self, instance):
1280
    """Fill an instance's beparams dict with cluster defaults.
1281

1282
    @type instance: L{objects.Instance}
1283
    @param instance: the instance parameter to fill
1284
    @rtype: dict
1285
    @return: a copy of the instance's beparams with missing keys filled from
1286
        the cluster defaults
1287

1288
    """
1289
    return self.SimpleFillBE(instance.beparams)
1290

    
1291
  def SimpleFillNIC(self, nicparams):
1292
    """Fill a given nicparams dict with cluster defaults.
1293

1294
    @type nicparams: dict
1295
    @param nicparams: the dict to fill
1296
    @rtype: dict
1297
    @return: a copy of the passed in nicparams with missing keys filled
1298
        from the cluster defaults
1299

1300
    """
1301
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1302

    
1303
  def SimpleFillOS(self, os_name, os_params):
1304
    """Fill an instance's osparams dict with cluster defaults.
1305

1306
    @type os_name: string
1307
    @param os_name: the OS name to use
1308
    @type os_params: dict
1309
    @param os_params: the dict to fill with default values
1310
    @rtype: dict
1311
    @return: a copy of the instance's osparams with missing keys filled from
1312
        the cluster defaults
1313

1314
    """
1315
    name_only = os_name.split("+", 1)[0]
1316
    # base OS
1317
    result = self.osparams.get(name_only, {})
1318
    # OS with variant
1319
    result = FillDict(result, self.osparams.get(os_name, {}))
1320
    # specified params
1321
    return FillDict(result, os_params)
1322

    
1323
  def FillND(self, node, nodegroup):
1324
    """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node}
1325

1326
    @type node: L{objects.Node}
1327
    @param node: A Node object to fill
1328
    @type nodegroup: L{objects.NodeGroup}
1329
    @param nodegroup: A Node object to fill
1330
    @return a copy of the node's ndparams with defaults filled
1331

1332
    """
1333
    return self.SimpleFillND(nodegroup.FillND(node))
1334

    
1335
  def SimpleFillND(self, ndparams):
1336
    """Fill a given ndparams dict with defaults.
1337

1338
    @type ndparams: dict
1339
    @param ndparams: the dict to fill
1340
    @rtype: dict
1341
    @return: a copy of the passed in ndparams with missing keys filled
1342
        from the cluster defaults
1343

1344
    """
1345
    return FillDict(self.ndparams, ndparams)
1346

    
1347

    
1348
class BlockDevStatus(ConfigObject):
1349
  """Config object representing the status of a block device."""
1350
  __slots__ = [
1351
    "dev_path",
1352
    "major",
1353
    "minor",
1354
    "sync_percent",
1355
    "estimated_time",
1356
    "is_degraded",
1357
    "ldisk_status",
1358
    ]
1359

    
1360

    
1361
class ImportExportStatus(ConfigObject):
1362
  """Config object representing the status of an import or export."""
1363
  __slots__ = [
1364
    "recent_output",
1365
    "listen_port",
1366
    "connected",
1367
    "progress_mbytes",
1368
    "progress_throughput",
1369
    "progress_eta",
1370
    "progress_percent",
1371
    "exit_status",
1372
    "error_message",
1373
    ] + _TIMESTAMPS
1374

    
1375

    
1376
class ImportExportOptions(ConfigObject):
1377
  """Options for import/export daemon
1378

1379
  @ivar key_name: X509 key name (None for cluster certificate)
1380
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1381
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1382
  @ivar magic: Used to ensure the connection goes to the right disk
1383
  @ivar ipv6: Whether to use IPv6
1384
  @ivar connect_timeout: Number of seconds for establishing connection
1385

1386
  """
1387
  __slots__ = [
1388
    "key_name",
1389
    "ca_pem",
1390
    "compress",
1391
    "magic",
1392
    "ipv6",
1393
    "connect_timeout",
1394
    ]
1395

    
1396

    
1397
class ConfdRequest(ConfigObject):
1398
  """Object holding a confd request.
1399

1400
  @ivar protocol: confd protocol version
1401
  @ivar type: confd query type
1402
  @ivar query: query request
1403
  @ivar rsalt: requested reply salt
1404

1405
  """
1406
  __slots__ = [
1407
    "protocol",
1408
    "type",
1409
    "query",
1410
    "rsalt",
1411
    ]
1412

    
1413

    
1414
class ConfdReply(ConfigObject):
1415
  """Object holding a confd reply.
1416

1417
  @ivar protocol: confd protocol version
1418
  @ivar status: reply status code (ok, error)
1419
  @ivar answer: confd query reply
1420
  @ivar serial: configuration serial number
1421

1422
  """
1423
  __slots__ = [
1424
    "protocol",
1425
    "status",
1426
    "answer",
1427
    "serial",
1428
    ]
1429

    
1430

    
1431
class QueryFieldDefinition(ConfigObject):
1432
  """Object holding a query field definition.
1433

1434
  @ivar name: Field name
1435
  @ivar title: Human-readable title
1436
  @ivar kind: Field type
1437

1438
  """
1439
  __slots__ = [
1440
    "name",
1441
    "title",
1442
    "kind",
1443
    ]
1444

    
1445

    
1446
class _QueryResponseBase(ConfigObject):
1447
  __slots__ = [
1448
    "fields",
1449
    ]
1450

    
1451
  def ToDict(self):
1452
    """Custom function for serializing.
1453

1454
    """
1455
    mydict = super(_QueryResponseBase, self).ToDict()
1456
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1457
    return mydict
1458

    
1459
  @classmethod
1460
  def FromDict(cls, val):
1461
    """Custom function for de-serializing.
1462

1463
    """
1464
    obj = super(_QueryResponseBase, cls).FromDict(val)
1465
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1466
    return obj
1467

    
1468

    
1469
class QueryRequest(ConfigObject):
1470
  """Object holding a query request.
1471

1472
  """
1473
  __slots__ = [
1474
    "what",
1475
    "fields",
1476
    "filter",
1477
    ]
1478

    
1479

    
1480
class QueryResponse(_QueryResponseBase):
1481
  """Object holding the response to a query.
1482

1483
  @ivar fields: List of L{QueryFieldDefinition} objects
1484
  @ivar data: Requested data
1485

1486
  """
1487
  __slots__ = [
1488
    "data",
1489
    ]
1490

    
1491

    
1492
class QueryFieldsRequest(ConfigObject):
1493
  """Object holding a request for querying available fields.
1494

1495
  """
1496
  __slots__ = [
1497
    "what",
1498
    "fields",
1499
    ]
1500

    
1501

    
1502
class QueryFieldsResponse(_QueryResponseBase):
1503
  """Object holding the response to a query for fields.
1504

1505
  @ivar fields: List of L{QueryFieldDefinition} objects
1506

1507
  """
1508
  __slots__ = [
1509
    ]
1510

    
1511

    
1512
class InstanceConsole(ConfigObject):
1513
  """Object describing how to access the console of an instance.
1514

1515
  """
1516
  __slots__ = [
1517
    "instance",
1518
    "kind",
1519
    "message",
1520
    "host",
1521
    "port",
1522
    "user",
1523
    "command",
1524
    "display",
1525
    ]
1526

    
1527
  def Validate(self):
1528
    """Validates contents of this object.
1529

1530
    """
1531
    assert self.kind in constants.CONS_ALL, "Unknown console type"
1532
    assert self.instance, "Missing instance name"
1533
    assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1534
    assert self.host or self.kind == constants.CONS_MESSAGE
1535
    assert self.port or self.kind in [constants.CONS_MESSAGE,
1536
                                      constants.CONS_SSH]
1537
    assert self.user or self.kind in [constants.CONS_MESSAGE,
1538
                                      constants.CONS_VNC]
1539
    assert self.command or self.kind in [constants.CONS_MESSAGE,
1540
                                         constants.CONS_VNC]
1541
    assert self.display or self.kind in [constants.CONS_MESSAGE,
1542
                                         constants.CONS_SSH]
1543
    return True
1544

    
1545

    
1546
class Network(ConfigObject):
1547
  """Object representing a network definition for ganeti.
1548

1549
  """
1550
  __slots__ = [
1551
    "name",
1552
    "family",
1553
    "network",
1554
    "gateway",
1555
    "size",
1556
    "reservations",
1557
    "ext_reservations",
1558
    ] + _UUID
1559

    
1560

    
1561
class SerializableConfigParser(ConfigParser.SafeConfigParser):
1562
  """Simple wrapper over ConfigParse that allows serialization.
1563

1564
  This class is basically ConfigParser.SafeConfigParser with two
1565
  additional methods that allow it to serialize/unserialize to/from a
1566
  buffer.
1567

1568
  """
1569
  def Dumps(self):
1570
    """Dump this instance and return the string representation."""
1571
    buf = StringIO()
1572
    self.write(buf)
1573
    return buf.getvalue()
1574

    
1575
  @classmethod
1576
  def Loads(cls, data):
1577
    """Load data from a string."""
1578
    buf = StringIO(data)
1579
    cfp = cls()
1580
    cfp.readfp(buf)
1581
    return cfp