Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ 7228ca91

History | View | Annotate | Download (60.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.ISPECS_MIN: constants.VTYPE_INT,
61
  constants.ISPECS_MAX: constants.VTYPE_INT,
62
  }
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.ISPECS_MIN: constants.VTYPE_INT,
66
  constants.ISPECS_MAX: constants.VTYPE_INT,
67
  constants.ISPECS_STD: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
96
  """Fills an instance policy with defaults.
97

98
  """
99
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
100
  ret_dict = {}
101
  for key in constants.IPOLICY_ISPECS:
102
    ret_dict[key] = FillDict(default_ipolicy[key],
103
                             custom_ipolicy.get(key, {}),
104
                             skip_keys=skip_keys)
105
  # list items
106
  for key in [constants.IPOLICY_DTS]:
107
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
108
  # other items which we know we can directly copy (immutables)
109
  for key in constants.IPOLICY_PARAMETERS:
110
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
111

    
112
  return ret_dict
113

    
114

    
115
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
116
  """Fills the disk parameter defaults.
117

118
  @see: L{FillDict} for parameters and return value
119

120
  """
121
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
122

    
123
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
124
                             skip_keys=skip_keys))
125
              for dt in constants.DISK_TEMPLATES)
126

    
127

    
128
def UpgradeGroupedParams(target, defaults):
129
  """Update all groups for the target parameter.
130

131
  @type target: dict of dicts
132
  @param target: {group: {parameter: value}}
133
  @type defaults: dict
134
  @param defaults: default parameter values
135

136
  """
137
  if target is None:
138
    target = {constants.PP_DEFAULT: defaults}
139
  else:
140
    for group in target:
141
      target[group] = FillDict(defaults, target[group])
142
  return target
143

    
144

    
145
def UpgradeBeParams(target):
146
  """Update the be parameters dict to the new format.
147

148
  @type target: dict
149
  @param target: "be" parameters dict
150

151
  """
152
  if constants.BE_MEMORY in target:
153
    memory = target[constants.BE_MEMORY]
154
    target[constants.BE_MAXMEM] = memory
155
    target[constants.BE_MINMEM] = memory
156
    del target[constants.BE_MEMORY]
157

    
158

    
159
def UpgradeDiskParams(diskparams):
160
  """Upgrade the disk parameters.
161

162
  @type diskparams: dict
163
  @param diskparams: disk parameters to upgrade
164
  @rtype: dict
165
  @return: the upgraded disk parameters dict
166

167
  """
168
  if not diskparams:
169
    result = {}
170
  else:
171
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
172

    
173
  return result
174

    
175

    
176
def UpgradeNDParams(ndparams):
177
  """Upgrade ndparams structure.
178

179
  @type ndparams: dict
180
  @param ndparams: disk parameters to upgrade
181
  @rtype: dict
182
  @return: the upgraded node parameters dict
183

184
  """
185
  if ndparams is None:
186
    ndparams = {}
187

    
188
  return FillDict(constants.NDC_DEFAULTS, ndparams)
189

    
190

    
191
def MakeEmptyIPolicy():
192
  """Create empty IPolicy dictionary.
193

194
  """
195
  return dict([
196
    (constants.ISPECS_MIN, {}),
197
    (constants.ISPECS_MAX, {}),
198
    (constants.ISPECS_STD, {}),
199
    ])
200

    
201

    
202
def CreateIPolicyFromOpts(ispecs_mem_size=None,
203
                          ispecs_cpu_count=None,
204
                          ispecs_disk_count=None,
205
                          ispecs_disk_size=None,
206
                          ispecs_nic_count=None,
207
                          ipolicy_disk_templates=None,
208
                          ipolicy_vcpu_ratio=None,
209
                          group_ipolicy=False,
210
                          allowed_values=None,
211
                          fill_all=False):
212
  """Creation of instance policy based on command line options.
213

214
  @param fill_all: whether for cluster policies we should ensure that
215
    all values are filled
216

217

218
  """
219
  # prepare ipolicy dict
220
  ipolicy_transposed = {
221
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
222
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
223
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
224
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
225
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
226
    }
227

    
228
  # first, check that the values given are correct
229
  if group_ipolicy:
230
    forced_type = TISPECS_GROUP_TYPES
231
  else:
232
    forced_type = TISPECS_CLUSTER_TYPES
233

    
234
  for specs in ipolicy_transposed.values():
235
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
236

    
237
  # then transpose
238
  ipolicy_out = MakeEmptyIPolicy()
239
  for name, specs in ipolicy_transposed.iteritems():
240
    assert name in constants.ISPECS_PARAMETERS
241
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
242
      ipolicy_out[key][name] = val
243

    
244
  # no filldict for non-dicts
245
  if not group_ipolicy and fill_all:
246
    if ipolicy_disk_templates is None:
247
      ipolicy_disk_templates = constants.DISK_TEMPLATES
248
    if ipolicy_vcpu_ratio is None:
249
      ipolicy_vcpu_ratio = \
250
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
251
  if ipolicy_disk_templates is not None:
252
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
253
  if ipolicy_vcpu_ratio is not None:
254
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
255

    
256
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
257

    
258
  return ipolicy_out
259

    
260

    
261
class ConfigObject(object):
262
  """A generic config object.
263

264
  It has the following properties:
265

266
    - provides somewhat safe recursive unpickling and pickling for its classes
267
    - unset attributes which are defined in slots are always returned
268
      as None instead of raising an error
269

270
  Classes derived from this must always declare __slots__ (we use many
271
  config objects and the memory reduction is useful)
272

273
  """
274
  __slots__ = []
275

    
276
  def __init__(self, **kwargs):
277
    for k, v in kwargs.iteritems():
278
      setattr(self, k, v)
279

    
280
  def __getattr__(self, name):
281
    if name not in self._all_slots():
282
      raise AttributeError("Invalid object attribute %s.%s" %
283
                           (type(self).__name__, name))
284
    return None
285

    
286
  def __setstate__(self, state):
287
    slots = self._all_slots()
288
    for name in state:
289
      if name in slots:
290
        setattr(self, name, state[name])
291

    
292
  @classmethod
293
  def _all_slots(cls):
294
    """Compute the list of all declared slots for a class.
295

296
    """
297
    slots = []
298
    for parent in cls.__mro__:
299
      slots.extend(getattr(parent, "__slots__", []))
300
    return slots
301

    
302
  #: Public getter for the defined slots
303
  GetAllSlots = _all_slots
304

    
305
  def ToDict(self):
306
    """Convert to a dict holding only standard python types.
307

308
    The generic routine just dumps all of this object's attributes in
309
    a dict. It does not work if the class has children who are
310
    ConfigObjects themselves (e.g. the nics list in an Instance), in
311
    which case the object should subclass the function in order to
312
    make sure all objects returned are only standard python types.
313

314
    """
315
    result = {}
316
    for name in self._all_slots():
317
      value = getattr(self, name, None)
318
      if value is not None:
319
        result[name] = value
320
    return result
321

    
322
  __getstate__ = ToDict
323

    
324
  @classmethod
325
  def FromDict(cls, val):
326
    """Create an object from a dictionary.
327

328
    This generic routine takes a dict, instantiates a new instance of
329
    the given class, and sets attributes based on the dict content.
330

331
    As for `ToDict`, this does not work if the class has children
332
    who are ConfigObjects themselves (e.g. the nics list in an
333
    Instance), in which case the object should subclass the function
334
    and alter the objects.
335

336
    """
337
    if not isinstance(val, dict):
338
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
339
                                      " expected dict, got %s" % type(val))
340
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
341
    obj = cls(**val_str) # pylint: disable=W0142
342
    return obj
343

    
344
  @staticmethod
345
  def _ContainerToDicts(container):
346
    """Convert the elements of a container to standard python types.
347

348
    This method converts a container with elements derived from
349
    ConfigData to standard python types. If the container is a dict,
350
    we don't touch the keys, only the values.
351

352
    """
353
    if isinstance(container, dict):
354
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
355
    elif isinstance(container, (list, tuple, set, frozenset)):
356
      ret = [elem.ToDict() for elem in container]
357
    else:
358
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
359
                      type(container))
360
    return ret
361

    
362
  @staticmethod
363
  def _ContainerFromDicts(source, c_type, e_type):
364
    """Convert a container from standard python types.
365

366
    This method converts a container with standard python types to
367
    ConfigData objects. If the container is a dict, we don't touch the
368
    keys, only the values.
369

370
    """
371
    if not isinstance(c_type, type):
372
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
373
                      " not a type" % type(c_type))
374
    if source is None:
375
      source = c_type()
376
    if c_type is dict:
377
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
378
    elif c_type in (list, tuple, set, frozenset):
379
      ret = c_type([e_type.FromDict(elem) for elem in source])
380
    else:
381
      raise TypeError("Invalid container type %s passed to"
382
                      " _ContainerFromDicts" % c_type)
383
    return ret
384

    
385
  def Copy(self):
386
    """Makes a deep copy of the current object and its children.
387

388
    """
389
    dict_form = self.ToDict()
390
    clone_obj = self.__class__.FromDict(dict_form)
391
    return clone_obj
392

    
393
  def __repr__(self):
394
    """Implement __repr__ for ConfigObjects."""
395
    return repr(self.ToDict())
396

    
397
  def UpgradeConfig(self):
398
    """Fill defaults for missing configuration values.
399

400
    This method will be called at configuration load time, and its
401
    implementation will be object dependent.
402

403
    """
404
    pass
405

    
406

    
407
class TaggableObject(ConfigObject):
408
  """An generic class supporting tags.
409

410
  """
411
  __slots__ = ["tags"]
412
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
413

    
414
  @classmethod
415
  def ValidateTag(cls, tag):
416
    """Check if a tag is valid.
417

418
    If the tag is invalid, an errors.TagError will be raised. The
419
    function has no return value.
420

421
    """
422
    if not isinstance(tag, basestring):
423
      raise errors.TagError("Invalid tag type (not a string)")
424
    if len(tag) > constants.MAX_TAG_LEN:
425
      raise errors.TagError("Tag too long (>%d characters)" %
426
                            constants.MAX_TAG_LEN)
427
    if not tag:
428
      raise errors.TagError("Tags cannot be empty")
429
    if not cls.VALID_TAG_RE.match(tag):
430
      raise errors.TagError("Tag contains invalid characters")
431

    
432
  def GetTags(self):
433
    """Return the tags list.
434

435
    """
436
    tags = getattr(self, "tags", None)
437
    if tags is None:
438
      tags = self.tags = set()
439
    return tags
440

    
441
  def AddTag(self, tag):
442
    """Add a new tag.
443

444
    """
445
    self.ValidateTag(tag)
446
    tags = self.GetTags()
447
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
448
      raise errors.TagError("Too many tags")
449
    self.GetTags().add(tag)
450

    
451
  def RemoveTag(self, tag):
452
    """Remove a tag.
453

454
    """
455
    self.ValidateTag(tag)
456
    tags = self.GetTags()
457
    try:
458
      tags.remove(tag)
459
    except KeyError:
460
      raise errors.TagError("Tag not found")
461

    
462
  def ToDict(self):
463
    """Taggable-object-specific conversion to standard python types.
464

465
    This replaces the tags set with a list.
466

467
    """
468
    bo = super(TaggableObject, self).ToDict()
469

    
470
    tags = bo.get("tags", None)
471
    if isinstance(tags, set):
472
      bo["tags"] = list(tags)
473
    return bo
474

    
475
  @classmethod
476
  def FromDict(cls, val):
477
    """Custom function for instances.
478

479
    """
480
    obj = super(TaggableObject, cls).FromDict(val)
481
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
482
      obj.tags = set(obj.tags)
483
    return obj
484

    
485

    
486
class MasterNetworkParameters(ConfigObject):
487
  """Network configuration parameters for the master
488

489
  @ivar name: master name
490
  @ivar ip: master IP
491
  @ivar netmask: master netmask
492
  @ivar netdev: master network device
493
  @ivar ip_family: master IP family
494

495
  """
496
  __slots__ = [
497
    "name",
498
    "ip",
499
    "netmask",
500
    "netdev",
501
    "ip_family"
502
    ]
503

    
504

    
505
class ConfigData(ConfigObject):
506
  """Top-level config object."""
507
  __slots__ = [
508
    "version",
509
    "cluster",
510
    "nodes",
511
    "nodegroups",
512
    "instances",
513
    "serial_no",
514
    ] + _TIMESTAMPS
515

    
516
  def ToDict(self):
517
    """Custom function for top-level config data.
518

519
    This just replaces the list of instances, nodes and the cluster
520
    with standard python types.
521

522
    """
523
    mydict = super(ConfigData, self).ToDict()
524
    mydict["cluster"] = mydict["cluster"].ToDict()
525
    for key in "nodes", "instances", "nodegroups":
526
      mydict[key] = self._ContainerToDicts(mydict[key])
527

    
528
    return mydict
529

    
530
  @classmethod
531
  def FromDict(cls, val):
532
    """Custom function for top-level config data
533

534
    """
535
    obj = super(ConfigData, cls).FromDict(val)
536
    obj.cluster = Cluster.FromDict(obj.cluster)
537
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
538
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
539
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
540
    return obj
541

    
542
  def HasAnyDiskOfType(self, dev_type):
543
    """Check if in there is at disk of the given type in the configuration.
544

545
    @type dev_type: L{constants.LDS_BLOCK}
546
    @param dev_type: the type to look for
547
    @rtype: boolean
548
    @return: boolean indicating if a disk of the given type was found or not
549

550
    """
551
    for instance in self.instances.values():
552
      for disk in instance.disks:
553
        if disk.IsBasedOnDiskType(dev_type):
554
          return True
555
    return False
556

    
557
  def UpgradeConfig(self):
558
    """Fill defaults for missing configuration values.
559

560
    """
561
    self.cluster.UpgradeConfig()
562
    for node in self.nodes.values():
563
      node.UpgradeConfig()
564
    for instance in self.instances.values():
565
      instance.UpgradeConfig()
566
    if self.nodegroups is None:
567
      self.nodegroups = {}
568
    for nodegroup in self.nodegroups.values():
569
      nodegroup.UpgradeConfig()
570
    if self.cluster.drbd_usermode_helper is None:
571
      # To decide if we set an helper let's check if at least one instance has
572
      # a DRBD disk. This does not cover all the possible scenarios but it
573
      # gives a good approximation.
574
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
575
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
576

    
577

    
578
class NIC(ConfigObject):
579
  """Config object representing a network card."""
580
  __slots__ = ["mac", "ip", "nicparams"]
581

    
582
  @classmethod
583
  def CheckParameterSyntax(cls, nicparams):
584
    """Check the given parameters for validity.
585

586
    @type nicparams:  dict
587
    @param nicparams: dictionary with parameter names/value
588
    @raise errors.ConfigurationError: when a parameter is not valid
589

590
    """
591
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
592
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
593
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
594
      raise errors.ConfigurationError(err)
595

    
596
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
597
        not nicparams[constants.NIC_LINK]):
598
      err = "Missing bridged nic link"
599
      raise errors.ConfigurationError(err)
600

    
601

    
602
class Disk(ConfigObject):
603
  """Config object representing a block device."""
604
  __slots__ = ["dev_type", "logical_id", "physical_id",
605
               "children", "iv_name", "size", "mode", "params"]
606

    
607
  def CreateOnSecondary(self):
608
    """Test if this device needs to be created on a secondary node."""
609
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
610

    
611
  def AssembleOnSecondary(self):
612
    """Test if this device needs to be assembled on a secondary node."""
613
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
614

    
615
  def OpenOnSecondary(self):
616
    """Test if this device needs to be opened on a secondary node."""
617
    return self.dev_type in (constants.LD_LV,)
618

    
619
  def StaticDevPath(self):
620
    """Return the device path if this device type has a static one.
621

622
    Some devices (LVM for example) live always at the same /dev/ path,
623
    irrespective of their status. For such devices, we return this
624
    path, for others we return None.
625

626
    @warning: The path returned is not a normalized pathname; callers
627
        should check that it is a valid path.
628

629
    """
630
    if self.dev_type == constants.LD_LV:
631
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
632
    elif self.dev_type == constants.LD_BLOCKDEV:
633
      return self.logical_id[1]
634
    elif self.dev_type == constants.LD_RBD:
635
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
636
    return None
637

    
638
  def ChildrenNeeded(self):
639
    """Compute the needed number of children for activation.
640

641
    This method will return either -1 (all children) or a positive
642
    number denoting the minimum number of children needed for
643
    activation (only mirrored devices will usually return >=0).
644

645
    Currently, only DRBD8 supports diskless activation (therefore we
646
    return 0), for all other we keep the previous semantics and return
647
    -1.
648

649
    """
650
    if self.dev_type == constants.LD_DRBD8:
651
      return 0
652
    return -1
653

    
654
  def IsBasedOnDiskType(self, dev_type):
655
    """Check if the disk or its children are based on the given type.
656

657
    @type dev_type: L{constants.LDS_BLOCK}
658
    @param dev_type: the type to look for
659
    @rtype: boolean
660
    @return: boolean indicating if a device of the given type was found or not
661

662
    """
663
    if self.children:
664
      for child in self.children:
665
        if child.IsBasedOnDiskType(dev_type):
666
          return True
667
    return self.dev_type == dev_type
668

    
669
  def GetNodes(self, node):
670
    """This function returns the nodes this device lives on.
671

672
    Given the node on which the parent of the device lives on (or, in
673
    case of a top-level device, the primary node of the devices'
674
    instance), this function will return a list of nodes on which this
675
    devices needs to (or can) be assembled.
676

677
    """
678
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
679
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
680
      result = [node]
681
    elif self.dev_type in constants.LDS_DRBD:
682
      result = [self.logical_id[0], self.logical_id[1]]
683
      if node not in result:
684
        raise errors.ConfigurationError("DRBD device passed unknown node")
685
    else:
686
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
687
    return result
688

    
689
  def ComputeNodeTree(self, parent_node):
690
    """Compute the node/disk tree for this disk and its children.
691

692
    This method, given the node on which the parent disk lives, will
693
    return the list of all (node, disk) pairs which describe the disk
694
    tree in the most compact way. For example, a drbd/lvm stack
695
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
696
    which represents all the top-level devices on the nodes.
697

698
    """
699
    my_nodes = self.GetNodes(parent_node)
700
    result = [(node, self) for node in my_nodes]
701
    if not self.children:
702
      # leaf device
703
      return result
704
    for node in my_nodes:
705
      for child in self.children:
706
        child_result = child.ComputeNodeTree(node)
707
        if len(child_result) == 1:
708
          # child (and all its descendants) is simple, doesn't split
709
          # over multiple hosts, so we don't need to describe it, our
710
          # own entry for this node describes it completely
711
          continue
712
        else:
713
          # check if child nodes differ from my nodes; note that
714
          # subdisk can differ from the child itself, and be instead
715
          # one of its descendants
716
          for subnode, subdisk in child_result:
717
            if subnode not in my_nodes:
718
              result.append((subnode, subdisk))
719
            # otherwise child is under our own node, so we ignore this
720
            # entry (but probably the other results in the list will
721
            # be different)
722
    return result
723

    
724
  def ComputeGrowth(self, amount):
725
    """Compute the per-VG growth requirements.
726

727
    This only works for VG-based disks.
728

729
    @type amount: integer
730
    @param amount: the desired increase in (user-visible) disk space
731
    @rtype: dict
732
    @return: a dictionary of volume-groups and the required size
733

734
    """
735
    if self.dev_type == constants.LD_LV:
736
      return {self.logical_id[0]: amount}
737
    elif self.dev_type == constants.LD_DRBD8:
738
      if self.children:
739
        return self.children[0].ComputeGrowth(amount)
740
      else:
741
        return {}
742
    else:
743
      # Other disk types do not require VG space
744
      return {}
745

    
746
  def RecordGrow(self, amount):
747
    """Update the size of this disk after growth.
748

749
    This method recurses over the disks's children and updates their
750
    size correspondigly. The method needs to be kept in sync with the
751
    actual algorithms from bdev.
752

753
    """
754
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
755
                         constants.LD_RBD):
756
      self.size += amount
757
    elif self.dev_type == constants.LD_DRBD8:
758
      if self.children:
759
        self.children[0].RecordGrow(amount)
760
      self.size += amount
761
    else:
762
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
763
                                   " disk type %s" % self.dev_type)
764

    
765
  def Update(self, size=None, mode=None):
766
    """Apply changes to size and mode.
767

768
    """
769
    if self.dev_type == constants.LD_DRBD8:
770
      if self.children:
771
        self.children[0].Update(size=size, mode=mode)
772
    else:
773
      assert not self.children
774

    
775
    if size is not None:
776
      self.size = size
777
    if mode is not None:
778
      self.mode = mode
779

    
780
  def UnsetSize(self):
781
    """Sets recursively the size to zero for the disk and its children.
782

783
    """
784
    if self.children:
785
      for child in self.children:
786
        child.UnsetSize()
787
    self.size = 0
788

    
789
  def SetPhysicalID(self, target_node, nodes_ip):
790
    """Convert the logical ID to the physical ID.
791

792
    This is used only for drbd, which needs ip/port configuration.
793

794
    The routine descends down and updates its children also, because
795
    this helps when the only the top device is passed to the remote
796
    node.
797

798
    Arguments:
799
      - target_node: the node we wish to configure for
800
      - nodes_ip: a mapping of node name to ip
801

802
    The target_node must exist in in nodes_ip, and must be one of the
803
    nodes in the logical ID for each of the DRBD devices encountered
804
    in the disk tree.
805

806
    """
807
    if self.children:
808
      for child in self.children:
809
        child.SetPhysicalID(target_node, nodes_ip)
810

    
811
    if self.logical_id is None and self.physical_id is not None:
812
      return
813
    if self.dev_type in constants.LDS_DRBD:
814
      pnode, snode, port, pminor, sminor, secret = self.logical_id
815
      if target_node not in (pnode, snode):
816
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
817
                                        target_node)
818
      pnode_ip = nodes_ip.get(pnode, None)
819
      snode_ip = nodes_ip.get(snode, None)
820
      if pnode_ip is None or snode_ip is None:
821
        raise errors.ConfigurationError("Can't find primary or secondary node"
822
                                        " for %s" % str(self))
823
      p_data = (pnode_ip, port)
824
      s_data = (snode_ip, port)
825
      if pnode == target_node:
826
        self.physical_id = p_data + s_data + (pminor, secret)
827
      else: # it must be secondary, we tested above
828
        self.physical_id = s_data + p_data + (sminor, secret)
829
    else:
830
      self.physical_id = self.logical_id
831
    return
832

    
833
  def ToDict(self):
834
    """Disk-specific conversion to standard python types.
835

836
    This replaces the children lists of objects with lists of
837
    standard python types.
838

839
    """
840
    bo = super(Disk, self).ToDict()
841

    
842
    for attr in ("children",):
843
      alist = bo.get(attr, None)
844
      if alist:
845
        bo[attr] = self._ContainerToDicts(alist)
846
    return bo
847

    
848
  @classmethod
849
  def FromDict(cls, val):
850
    """Custom function for Disks
851

852
    """
853
    obj = super(Disk, cls).FromDict(val)
854
    if obj.children:
855
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
856
    if obj.logical_id and isinstance(obj.logical_id, list):
857
      obj.logical_id = tuple(obj.logical_id)
858
    if obj.physical_id and isinstance(obj.physical_id, list):
859
      obj.physical_id = tuple(obj.physical_id)
860
    if obj.dev_type in constants.LDS_DRBD:
861
      # we need a tuple of length six here
862
      if len(obj.logical_id) < 6:
863
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
864
    return obj
865

    
866
  def __str__(self):
867
    """Custom str() formatter for disks.
868

869
    """
870
    if self.dev_type == constants.LD_LV:
871
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
872
    elif self.dev_type in constants.LDS_DRBD:
873
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
874
      val = "<DRBD8("
875
      if self.physical_id is None:
876
        phy = "unconfigured"
877
      else:
878
        phy = ("configured as %s:%s %s:%s" %
879
               (self.physical_id[0], self.physical_id[1],
880
                self.physical_id[2], self.physical_id[3]))
881

    
882
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
883
              (node_a, minor_a, node_b, minor_b, port, phy))
884
      if self.children and self.children.count(None) == 0:
885
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
886
      else:
887
        val += "no local storage"
888
    else:
889
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
890
             (self.dev_type, self.logical_id, self.physical_id, self.children))
891
    if self.iv_name is None:
892
      val += ", not visible"
893
    else:
894
      val += ", visible as /dev/%s" % self.iv_name
895
    if isinstance(self.size, int):
896
      val += ", size=%dm)>" % self.size
897
    else:
898
      val += ", size='%s')>" % (self.size,)
899
    return val
900

    
901
  def Verify(self):
902
    """Checks that this disk is correctly configured.
903

904
    """
905
    all_errors = []
906
    if self.mode not in constants.DISK_ACCESS_SET:
907
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
908
    return all_errors
909

    
910
  def UpgradeConfig(self):
911
    """Fill defaults for missing configuration values.
912

913
    """
914
    if self.children:
915
      for child in self.children:
916
        child.UpgradeConfig()
917

    
918
    if not self.params:
919
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
920
    else:
921
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
922
                             self.params)
923
    # add here config upgrade for this disk
924

    
925
  @staticmethod
926
  def ComputeLDParams(disk_template, disk_params):
927
    """Computes Logical Disk parameters from Disk Template parameters.
928

929
    @type disk_template: string
930
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
931
    @type disk_params: dict
932
    @param disk_params: disk template parameters;
933
                        dict(template_name -> parameters
934
    @rtype: list(dict)
935
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
936
      contains the LD parameters of the node. The tree is flattened in-order.
937

938
    """
939
    if disk_template not in constants.DISK_TEMPLATES:
940
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
941

    
942
    assert disk_template in disk_params
943

    
944
    result = list()
945
    dt_params = disk_params[disk_template]
946
    if disk_template == constants.DT_DRBD8:
947
      drbd_params = {
948
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
949
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
950
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
951
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
952
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
953
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
954
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
955
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
956
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
957
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
958
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
959
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
960
        }
961

    
962
      drbd_params = \
963
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
964
                 drbd_params)
965

    
966
      result.append(drbd_params)
967

    
968
      # data LV
969
      data_params = {
970
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
971
        }
972
      data_params = \
973
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
974
                 data_params)
975
      result.append(data_params)
976

    
977
      # metadata LV
978
      meta_params = {
979
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
980
        }
981
      meta_params = \
982
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
983
                 meta_params)
984
      result.append(meta_params)
985

    
986
    elif (disk_template == constants.DT_FILE or
987
          disk_template == constants.DT_SHARED_FILE):
988
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
989

    
990
    elif disk_template == constants.DT_PLAIN:
991
      params = {
992
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
993
        }
994
      params = \
995
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
996
                 params)
997
      result.append(params)
998

    
999
    elif disk_template == constants.DT_BLOCK:
1000
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
1001

    
1002
    elif disk_template == constants.DT_RBD:
1003
      params = {
1004
        constants.LDP_POOL: dt_params[constants.RBD_POOL]
1005
        }
1006
      params = \
1007
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
1008
                 params)
1009
      result.append(params)
1010

    
1011
    return result
1012

    
1013

    
1014
class InstancePolicy(ConfigObject):
1015
  """Config object representing instance policy limits dictionary.
1016

1017

1018
  Note that this object is not actually used in the config, it's just
1019
  used as a placeholder for a few functions.
1020

1021
  """
1022
  @classmethod
1023
  def CheckParameterSyntax(cls, ipolicy):
1024
    """ Check the instance policy for validity.
1025

1026
    """
1027
    for param in constants.ISPECS_PARAMETERS:
1028
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
1029
    if constants.IPOLICY_DTS in ipolicy:
1030
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1031
    for key in constants.IPOLICY_PARAMETERS:
1032
      if key in ipolicy:
1033
        InstancePolicy.CheckParameter(key, ipolicy[key])
1034
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1035
    if wrong_keys:
1036
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1037
                                      utils.CommaJoin(wrong_keys))
1038

    
1039
  @classmethod
1040
  def CheckISpecSyntax(cls, ipolicy, name):
1041
    """Check the instance policy for validity on a given key.
1042

1043
    We check if the instance policy makes sense for a given key, that is
1044
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
1045

1046
    @type ipolicy: dict
1047
    @param ipolicy: dictionary with min, max, std specs
1048
    @type name: string
1049
    @param name: what are the limits for
1050
    @raise errors.ConfigureError: when specs for given name are not valid
1051

1052
    """
1053
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
1054
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
1055
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
1056
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
1057
           (name,
1058
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
1059
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
1060
            ipolicy[constants.ISPECS_STD].get(name, "-")))
1061
    if min_v > std_v or std_v > max_v:
1062
      raise errors.ConfigurationError(err)
1063

    
1064
  @classmethod
1065
  def CheckDiskTemplates(cls, disk_templates):
1066
    """Checks the disk templates for validity.
1067

1068
    """
1069
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1070
    if wrong:
1071
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1072
                                      utils.CommaJoin(wrong))
1073

    
1074
  @classmethod
1075
  def CheckParameter(cls, key, value):
1076
    """Checks a parameter.
1077

1078
    Currently we expect all parameters to be float values.
1079

1080
    """
1081
    try:
1082
      float(value)
1083
    except (TypeError, ValueError), err:
1084
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1085
                                      " '%s', error: %s" % (key, value, err))
1086

    
1087

    
1088
class Instance(TaggableObject):
1089
  """Config object representing an instance."""
1090
  __slots__ = [
1091
    "name",
1092
    "primary_node",
1093
    "os",
1094
    "hypervisor",
1095
    "hvparams",
1096
    "beparams",
1097
    "osparams",
1098
    "admin_state",
1099
    "nics",
1100
    "disks",
1101
    "disk_template",
1102
    "network_port",
1103
    "serial_no",
1104
    ] + _TIMESTAMPS + _UUID
1105

    
1106
  def _ComputeSecondaryNodes(self):
1107
    """Compute the list of secondary nodes.
1108

1109
    This is a simple wrapper over _ComputeAllNodes.
1110

1111
    """
1112
    all_nodes = set(self._ComputeAllNodes())
1113
    all_nodes.discard(self.primary_node)
1114
    return tuple(all_nodes)
1115

    
1116
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1117
                             "List of secondary nodes")
1118

    
1119
  def _ComputeAllNodes(self):
1120
    """Compute the list of all nodes.
1121

1122
    Since the data is already there (in the drbd disks), keeping it as
1123
    a separate normal attribute is redundant and if not properly
1124
    synchronised can cause problems. Thus it's better to compute it
1125
    dynamically.
1126

1127
    """
1128
    def _Helper(nodes, device):
1129
      """Recursively computes nodes given a top device."""
1130
      if device.dev_type in constants.LDS_DRBD:
1131
        nodea, nodeb = device.logical_id[:2]
1132
        nodes.add(nodea)
1133
        nodes.add(nodeb)
1134
      if device.children:
1135
        for child in device.children:
1136
          _Helper(nodes, child)
1137

    
1138
    all_nodes = set()
1139
    all_nodes.add(self.primary_node)
1140
    for device in self.disks:
1141
      _Helper(all_nodes, device)
1142
    return tuple(all_nodes)
1143

    
1144
  all_nodes = property(_ComputeAllNodes, None, None,
1145
                       "List of all nodes of the instance")
1146

    
1147
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1148
    """Provide a mapping of nodes to LVs this instance owns.
1149

1150
    This function figures out what logical volumes should belong on
1151
    which nodes, recursing through a device tree.
1152

1153
    @param lvmap: optional dictionary to receive the
1154
        'node' : ['lv', ...] data.
1155

1156
    @return: None if lvmap arg is given, otherwise, a dictionary of
1157
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1158
        volumeN is of the form "vg_name/lv_name", compatible with
1159
        GetVolumeList()
1160

1161
    """
1162
    if node == None:
1163
      node = self.primary_node
1164

    
1165
    if lvmap is None:
1166
      lvmap = {
1167
        node: [],
1168
        }
1169
      ret = lvmap
1170
    else:
1171
      if not node in lvmap:
1172
        lvmap[node] = []
1173
      ret = None
1174

    
1175
    if not devs:
1176
      devs = self.disks
1177

    
1178
    for dev in devs:
1179
      if dev.dev_type == constants.LD_LV:
1180
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1181

    
1182
      elif dev.dev_type in constants.LDS_DRBD:
1183
        if dev.children:
1184
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1185
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1186

    
1187
      elif dev.children:
1188
        self.MapLVsByNode(lvmap, dev.children, node)
1189

    
1190
    return ret
1191

    
1192
  def FindDisk(self, idx):
1193
    """Find a disk given having a specified index.
1194

1195
    This is just a wrapper that does validation of the index.
1196

1197
    @type idx: int
1198
    @param idx: the disk index
1199
    @rtype: L{Disk}
1200
    @return: the corresponding disk
1201
    @raise errors.OpPrereqError: when the given index is not valid
1202

1203
    """
1204
    try:
1205
      idx = int(idx)
1206
      return self.disks[idx]
1207
    except (TypeError, ValueError), err:
1208
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1209
                                 errors.ECODE_INVAL)
1210
    except IndexError:
1211
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1212
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1213
                                 errors.ECODE_INVAL)
1214

    
1215
  def ToDict(self):
1216
    """Instance-specific conversion to standard python types.
1217

1218
    This replaces the children lists of objects with lists of standard
1219
    python types.
1220

1221
    """
1222
    bo = super(Instance, self).ToDict()
1223

    
1224
    for attr in "nics", "disks":
1225
      alist = bo.get(attr, None)
1226
      if alist:
1227
        nlist = self._ContainerToDicts(alist)
1228
      else:
1229
        nlist = []
1230
      bo[attr] = nlist
1231
    return bo
1232

    
1233
  @classmethod
1234
  def FromDict(cls, val):
1235
    """Custom function for instances.
1236

1237
    """
1238
    if "admin_state" not in val:
1239
      if val.get("admin_up", False):
1240
        val["admin_state"] = constants.ADMINST_UP
1241
      else:
1242
        val["admin_state"] = constants.ADMINST_DOWN
1243
    if "admin_up" in val:
1244
      del val["admin_up"]
1245
    obj = super(Instance, cls).FromDict(val)
1246
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1247
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1248
    return obj
1249

    
1250
  def UpgradeConfig(self):
1251
    """Fill defaults for missing configuration values.
1252

1253
    """
1254
    for nic in self.nics:
1255
      nic.UpgradeConfig()
1256
    for disk in self.disks:
1257
      disk.UpgradeConfig()
1258
    if self.hvparams:
1259
      for key in constants.HVC_GLOBALS:
1260
        try:
1261
          del self.hvparams[key]
1262
        except KeyError:
1263
          pass
1264
    if self.osparams is None:
1265
      self.osparams = {}
1266
    UpgradeBeParams(self.beparams)
1267

    
1268

    
1269
class OS(ConfigObject):
1270
  """Config object representing an operating system.
1271

1272
  @type supported_parameters: list
1273
  @ivar supported_parameters: a list of tuples, name and description,
1274
      containing the supported parameters by this OS
1275

1276
  @type VARIANT_DELIM: string
1277
  @cvar VARIANT_DELIM: the variant delimiter
1278

1279
  """
1280
  __slots__ = [
1281
    "name",
1282
    "path",
1283
    "api_versions",
1284
    "create_script",
1285
    "export_script",
1286
    "import_script",
1287
    "rename_script",
1288
    "verify_script",
1289
    "supported_variants",
1290
    "supported_parameters",
1291
    ]
1292

    
1293
  VARIANT_DELIM = "+"
1294

    
1295
  @classmethod
1296
  def SplitNameVariant(cls, name):
1297
    """Splits the name into the proper name and variant.
1298

1299
    @param name: the OS (unprocessed) name
1300
    @rtype: list
1301
    @return: a list of two elements; if the original name didn't
1302
        contain a variant, it's returned as an empty string
1303

1304
    """
1305
    nv = name.split(cls.VARIANT_DELIM, 1)
1306
    if len(nv) == 1:
1307
      nv.append("")
1308
    return nv
1309

    
1310
  @classmethod
1311
  def GetName(cls, name):
1312
    """Returns the proper name of the os (without the variant).
1313

1314
    @param name: the OS (unprocessed) name
1315

1316
    """
1317
    return cls.SplitNameVariant(name)[0]
1318

    
1319
  @classmethod
1320
  def GetVariant(cls, name):
1321
    """Returns the variant the os (without the base name).
1322

1323
    @param name: the OS (unprocessed) name
1324

1325
    """
1326
    return cls.SplitNameVariant(name)[1]
1327

    
1328

    
1329
class NodeHvState(ConfigObject):
1330
  """Hypvervisor state on a node.
1331

1332
  @ivar mem_total: Total amount of memory
1333
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1334
    available)
1335
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1336
    rounding
1337
  @ivar mem_inst: Memory used by instances living on node
1338
  @ivar cpu_total: Total node CPU core count
1339
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1340

1341
  """
1342
  __slots__ = [
1343
    "mem_total",
1344
    "mem_node",
1345
    "mem_hv",
1346
    "mem_inst",
1347
    "cpu_total",
1348
    "cpu_node",
1349
    ] + _TIMESTAMPS
1350

    
1351

    
1352
class NodeDiskState(ConfigObject):
1353
  """Disk state on a node.
1354

1355
  """
1356
  __slots__ = [
1357
    "total",
1358
    "reserved",
1359
    "overhead",
1360
    ] + _TIMESTAMPS
1361

    
1362

    
1363
class Node(TaggableObject):
1364
  """Config object representing a node.
1365

1366
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1367
  @ivar hv_state_static: Hypervisor state overriden by user
1368
  @ivar disk_state: Disk state (e.g. free space)
1369
  @ivar disk_state_static: Disk state overriden by user
1370

1371
  """
1372
  __slots__ = [
1373
    "name",
1374
    "primary_ip",
1375
    "secondary_ip",
1376
    "serial_no",
1377
    "master_candidate",
1378
    "offline",
1379
    "drained",
1380
    "group",
1381
    "master_capable",
1382
    "vm_capable",
1383
    "ndparams",
1384
    "powered",
1385
    "hv_state",
1386
    "hv_state_static",
1387
    "disk_state",
1388
    "disk_state_static",
1389
    ] + _TIMESTAMPS + _UUID
1390

    
1391
  def UpgradeConfig(self):
1392
    """Fill defaults for missing configuration values.
1393

1394
    """
1395
    # pylint: disable=E0203
1396
    # because these are "defined" via slots, not manually
1397
    if self.master_capable is None:
1398
      self.master_capable = True
1399

    
1400
    if self.vm_capable is None:
1401
      self.vm_capable = True
1402

    
1403
    if self.ndparams is None:
1404
      self.ndparams = {}
1405

    
1406
    if self.powered is None:
1407
      self.powered = True
1408

    
1409
  def ToDict(self):
1410
    """Custom function for serializing.
1411

1412
    """
1413
    data = super(Node, self).ToDict()
1414

    
1415
    hv_state = data.get("hv_state", None)
1416
    if hv_state is not None:
1417
      data["hv_state"] = self._ContainerToDicts(hv_state)
1418

    
1419
    disk_state = data.get("disk_state", None)
1420
    if disk_state is not None:
1421
      data["disk_state"] = \
1422
        dict((key, self._ContainerToDicts(value))
1423
             for (key, value) in disk_state.items())
1424

    
1425
    return data
1426

    
1427
  @classmethod
1428
  def FromDict(cls, val):
1429
    """Custom function for deserializing.
1430

1431
    """
1432
    obj = super(Node, cls).FromDict(val)
1433

    
1434
    if obj.hv_state is not None:
1435
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1436

    
1437
    if obj.disk_state is not None:
1438
      obj.disk_state = \
1439
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1440
             for (key, value) in obj.disk_state.items())
1441

    
1442
    return obj
1443

    
1444

    
1445
class NodeGroup(TaggableObject):
1446
  """Config object representing a node group."""
1447
  __slots__ = [
1448
    "name",
1449
    "members",
1450
    "ndparams",
1451
    "diskparams",
1452
    "ipolicy",
1453
    "serial_no",
1454
    "hv_state_static",
1455
    "disk_state_static",
1456
    "alloc_policy",
1457
    ] + _TIMESTAMPS + _UUID
1458

    
1459
  def ToDict(self):
1460
    """Custom function for nodegroup.
1461

1462
    This discards the members object, which gets recalculated and is only kept
1463
    in memory.
1464

1465
    """
1466
    mydict = super(NodeGroup, self).ToDict()
1467
    del mydict["members"]
1468
    return mydict
1469

    
1470
  @classmethod
1471
  def FromDict(cls, val):
1472
    """Custom function for nodegroup.
1473

1474
    The members slot is initialized to an empty list, upon deserialization.
1475

1476
    """
1477
    obj = super(NodeGroup, cls).FromDict(val)
1478
    obj.members = []
1479
    return obj
1480

    
1481
  def UpgradeConfig(self):
1482
    """Fill defaults for missing configuration values.
1483

1484
    """
1485
    if self.ndparams is None:
1486
      self.ndparams = {}
1487

    
1488
    if self.serial_no is None:
1489
      self.serial_no = 1
1490

    
1491
    if self.alloc_policy is None:
1492
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1493

    
1494
    # We only update mtime, and not ctime, since we would not be able
1495
    # to provide a correct value for creation time.
1496
    if self.mtime is None:
1497
      self.mtime = time.time()
1498

    
1499
    if self.diskparams is None:
1500
      self.diskparams = {}
1501
    if self.ipolicy is None:
1502
      self.ipolicy = MakeEmptyIPolicy()
1503

    
1504
  def FillND(self, node):
1505
    """Return filled out ndparams for L{objects.Node}
1506

1507
    @type node: L{objects.Node}
1508
    @param node: A Node object to fill
1509
    @return a copy of the node's ndparams with defaults filled
1510

1511
    """
1512
    return self.SimpleFillND(node.ndparams)
1513

    
1514
  def SimpleFillND(self, ndparams):
1515
    """Fill a given ndparams dict with defaults.
1516

1517
    @type ndparams: dict
1518
    @param ndparams: the dict to fill
1519
    @rtype: dict
1520
    @return: a copy of the passed in ndparams with missing keys filled
1521
        from the node group defaults
1522

1523
    """
1524
    return FillDict(self.ndparams, ndparams)
1525

    
1526

    
1527
class Cluster(TaggableObject):
1528
  """Config object representing the cluster."""
1529
  __slots__ = [
1530
    "serial_no",
1531
    "rsahostkeypub",
1532
    "highest_used_port",
1533
    "tcpudp_port_pool",
1534
    "mac_prefix",
1535
    "volume_group_name",
1536
    "reserved_lvs",
1537
    "drbd_usermode_helper",
1538
    "default_bridge",
1539
    "default_hypervisor",
1540
    "master_node",
1541
    "master_ip",
1542
    "master_netdev",
1543
    "master_netmask",
1544
    "use_external_mip_script",
1545
    "cluster_name",
1546
    "file_storage_dir",
1547
    "shared_file_storage_dir",
1548
    "enabled_hypervisors",
1549
    "hvparams",
1550
    "ipolicy",
1551
    "os_hvp",
1552
    "beparams",
1553
    "osparams",
1554
    "nicparams",
1555
    "ndparams",
1556
    "diskparams",
1557
    "candidate_pool_size",
1558
    "modify_etc_hosts",
1559
    "modify_ssh_setup",
1560
    "maintain_node_health",
1561
    "uid_pool",
1562
    "default_iallocator",
1563
    "hidden_os",
1564
    "blacklisted_os",
1565
    "primary_ip_family",
1566
    "prealloc_wipe_disks",
1567
    "hv_state_static",
1568
    "disk_state_static",
1569
    ] + _TIMESTAMPS + _UUID
1570

    
1571
  def UpgradeConfig(self):
1572
    """Fill defaults for missing configuration values.
1573

1574
    """
1575
    # pylint: disable=E0203
1576
    # because these are "defined" via slots, not manually
1577
    if self.hvparams is None:
1578
      self.hvparams = constants.HVC_DEFAULTS
1579
    else:
1580
      for hypervisor in self.hvparams:
1581
        self.hvparams[hypervisor] = FillDict(
1582
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1583

    
1584
    if self.os_hvp is None:
1585
      self.os_hvp = {}
1586

    
1587
    # osparams added before 2.2
1588
    if self.osparams is None:
1589
      self.osparams = {}
1590

    
1591
    self.ndparams = UpgradeNDParams(self.ndparams)
1592

    
1593
    self.beparams = UpgradeGroupedParams(self.beparams,
1594
                                         constants.BEC_DEFAULTS)
1595
    for beparams_group in self.beparams:
1596
      UpgradeBeParams(self.beparams[beparams_group])
1597

    
1598
    migrate_default_bridge = not self.nicparams
1599
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1600
                                          constants.NICC_DEFAULTS)
1601
    if migrate_default_bridge:
1602
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1603
        self.default_bridge
1604

    
1605
    if self.modify_etc_hosts is None:
1606
      self.modify_etc_hosts = True
1607

    
1608
    if self.modify_ssh_setup is None:
1609
      self.modify_ssh_setup = True
1610

    
1611
    # default_bridge is no longer used in 2.1. The slot is left there to
1612
    # support auto-upgrading. It can be removed once we decide to deprecate
1613
    # upgrading straight from 2.0.
1614
    if self.default_bridge is not None:
1615
      self.default_bridge = None
1616

    
1617
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1618
    # code can be removed once upgrading straight from 2.0 is deprecated.
1619
    if self.default_hypervisor is not None:
1620
      self.enabled_hypervisors = ([self.default_hypervisor] +
1621
        [hvname for hvname in self.enabled_hypervisors
1622
         if hvname != self.default_hypervisor])
1623
      self.default_hypervisor = None
1624

    
1625
    # maintain_node_health added after 2.1.1
1626
    if self.maintain_node_health is None:
1627
      self.maintain_node_health = False
1628

    
1629
    if self.uid_pool is None:
1630
      self.uid_pool = []
1631

    
1632
    if self.default_iallocator is None:
1633
      self.default_iallocator = ""
1634

    
1635
    # reserved_lvs added before 2.2
1636
    if self.reserved_lvs is None:
1637
      self.reserved_lvs = []
1638

    
1639
    # hidden and blacklisted operating systems added before 2.2.1
1640
    if self.hidden_os is None:
1641
      self.hidden_os = []
1642

    
1643
    if self.blacklisted_os is None:
1644
      self.blacklisted_os = []
1645

    
1646
    # primary_ip_family added before 2.3
1647
    if self.primary_ip_family is None:
1648
      self.primary_ip_family = AF_INET
1649

    
1650
    if self.master_netmask is None:
1651
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1652
      self.master_netmask = ipcls.iplen
1653

    
1654
    if self.prealloc_wipe_disks is None:
1655
      self.prealloc_wipe_disks = False
1656

    
1657
    # shared_file_storage_dir added before 2.5
1658
    if self.shared_file_storage_dir is None:
1659
      self.shared_file_storage_dir = ""
1660

    
1661
    if self.use_external_mip_script is None:
1662
      self.use_external_mip_script = False
1663

    
1664
    if self.diskparams:
1665
      self.diskparams = UpgradeDiskParams(self.diskparams)
1666
    else:
1667
      self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1668

    
1669
    # instance policy added before 2.6
1670
    if self.ipolicy is None:
1671
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1672
    else:
1673
      # we can either make sure to upgrade the ipolicy always, or only
1674
      # do it in some corner cases (e.g. missing keys); note that this
1675
      # will break any removal of keys from the ipolicy dict
1676
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1677

    
1678
  @property
1679
  def primary_hypervisor(self):
1680
    """The first hypervisor is the primary.
1681

1682
    Useful, for example, for L{Node}'s hv/disk state.
1683

1684
    """
1685
    return self.enabled_hypervisors[0]
1686

    
1687
  def ToDict(self):
1688
    """Custom function for cluster.
1689

1690
    """
1691
    mydict = super(Cluster, self).ToDict()
1692
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1693
    return mydict
1694

    
1695
  @classmethod
1696
  def FromDict(cls, val):
1697
    """Custom function for cluster.
1698

1699
    """
1700
    obj = super(Cluster, cls).FromDict(val)
1701
    if not isinstance(obj.tcpudp_port_pool, set):
1702
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1703
    return obj
1704

    
1705
  def SimpleFillDP(self, diskparams):
1706
    """Fill a given diskparams dict with cluster defaults.
1707

1708
    @param diskparams: The diskparams
1709
    @return: The defaults dict
1710

1711
    """
1712
    return FillDiskParams(self.diskparams, diskparams)
1713

    
1714
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1715
    """Get the default hypervisor parameters for the cluster.
1716

1717
    @param hypervisor: the hypervisor name
1718
    @param os_name: if specified, we'll also update the defaults for this OS
1719
    @param skip_keys: if passed, list of keys not to use
1720
    @return: the defaults dict
1721

1722
    """
1723
    if skip_keys is None:
1724
      skip_keys = []
1725

    
1726
    fill_stack = [self.hvparams.get(hypervisor, {})]
1727
    if os_name is not None:
1728
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1729
      fill_stack.append(os_hvp)
1730

    
1731
    ret_dict = {}
1732
    for o_dict in fill_stack:
1733
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1734

    
1735
    return ret_dict
1736

    
1737
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1738
    """Fill a given hvparams dict with cluster defaults.
1739

1740
    @type hv_name: string
1741
    @param hv_name: the hypervisor to use
1742
    @type os_name: string
1743
    @param os_name: the OS to use for overriding the hypervisor defaults
1744
    @type skip_globals: boolean
1745
    @param skip_globals: if True, the global hypervisor parameters will
1746
        not be filled
1747
    @rtype: dict
1748
    @return: a copy of the given hvparams with missing keys filled from
1749
        the cluster defaults
1750

1751
    """
1752
    if skip_globals:
1753
      skip_keys = constants.HVC_GLOBALS
1754
    else:
1755
      skip_keys = []
1756

    
1757
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1758
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1759

    
1760
  def FillHV(self, instance, skip_globals=False):
1761
    """Fill an instance's hvparams dict with cluster defaults.
1762

1763
    @type instance: L{objects.Instance}
1764
    @param instance: the instance parameter to fill
1765
    @type skip_globals: boolean
1766
    @param skip_globals: if True, the global hypervisor parameters will
1767
        not be filled
1768
    @rtype: dict
1769
    @return: a copy of the instance's hvparams with missing keys filled from
1770
        the cluster defaults
1771

1772
    """
1773
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1774
                             instance.hvparams, skip_globals)
1775

    
1776
  def SimpleFillBE(self, beparams):
1777
    """Fill a given beparams dict with cluster defaults.
1778

1779
    @type beparams: dict
1780
    @param beparams: the dict to fill
1781
    @rtype: dict
1782
    @return: a copy of the passed in beparams with missing keys filled
1783
        from the cluster defaults
1784

1785
    """
1786
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1787

    
1788
  def FillBE(self, instance):
1789
    """Fill an instance's beparams dict with cluster defaults.
1790

1791
    @type instance: L{objects.Instance}
1792
    @param instance: the instance parameter to fill
1793
    @rtype: dict
1794
    @return: a copy of the instance's beparams with missing keys filled from
1795
        the cluster defaults
1796

1797
    """
1798
    return self.SimpleFillBE(instance.beparams)
1799

    
1800
  def SimpleFillNIC(self, nicparams):
1801
    """Fill a given nicparams dict with cluster defaults.
1802

1803
    @type nicparams: dict
1804
    @param nicparams: the dict to fill
1805
    @rtype: dict
1806
    @return: a copy of the passed in nicparams with missing keys filled
1807
        from the cluster defaults
1808

1809
    """
1810
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1811

    
1812
  def SimpleFillOS(self, os_name, os_params):
1813
    """Fill an instance's osparams dict with cluster defaults.
1814

1815
    @type os_name: string
1816
    @param os_name: the OS name to use
1817
    @type os_params: dict
1818
    @param os_params: the dict to fill with default values
1819
    @rtype: dict
1820
    @return: a copy of the instance's osparams with missing keys filled from
1821
        the cluster defaults
1822

1823
    """
1824
    name_only = os_name.split("+", 1)[0]
1825
    # base OS
1826
    result = self.osparams.get(name_only, {})
1827
    # OS with variant
1828
    result = FillDict(result, self.osparams.get(os_name, {}))
1829
    # specified params
1830
    return FillDict(result, os_params)
1831

    
1832
  @staticmethod
1833
  def SimpleFillHvState(hv_state):
1834
    """Fill an hv_state sub dict with cluster defaults.
1835

1836
    """
1837
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1838

    
1839
  @staticmethod
1840
  def SimpleFillDiskState(disk_state):
1841
    """Fill an disk_state sub dict with cluster defaults.
1842

1843
    """
1844
    return FillDict(constants.DS_DEFAULTS, disk_state)
1845

    
1846
  def FillND(self, node, nodegroup):
1847
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1848

1849
    @type node: L{objects.Node}
1850
    @param node: A Node object to fill
1851
    @type nodegroup: L{objects.NodeGroup}
1852
    @param nodegroup: A Node object to fill
1853
    @return a copy of the node's ndparams with defaults filled
1854

1855
    """
1856
    return self.SimpleFillND(nodegroup.FillND(node))
1857

    
1858
  def SimpleFillND(self, ndparams):
1859
    """Fill a given ndparams dict with defaults.
1860

1861
    @type ndparams: dict
1862
    @param ndparams: the dict to fill
1863
    @rtype: dict
1864
    @return: a copy of the passed in ndparams with missing keys filled
1865
        from the cluster defaults
1866

1867
    """
1868
    return FillDict(self.ndparams, ndparams)
1869

    
1870
  def SimpleFillIPolicy(self, ipolicy):
1871
    """ Fill instance policy dict with defaults.
1872

1873
    @type ipolicy: dict
1874
    @param ipolicy: the dict to fill
1875
    @rtype: dict
1876
    @return: a copy of passed ipolicy with missing keys filled from
1877
      the cluster defaults
1878

1879
    """
1880
    return FillIPolicy(self.ipolicy, ipolicy)
1881

    
1882

    
1883
class BlockDevStatus(ConfigObject):
1884
  """Config object representing the status of a block device."""
1885
  __slots__ = [
1886
    "dev_path",
1887
    "major",
1888
    "minor",
1889
    "sync_percent",
1890
    "estimated_time",
1891
    "is_degraded",
1892
    "ldisk_status",
1893
    ]
1894

    
1895

    
1896
class ImportExportStatus(ConfigObject):
1897
  """Config object representing the status of an import or export."""
1898
  __slots__ = [
1899
    "recent_output",
1900
    "listen_port",
1901
    "connected",
1902
    "progress_mbytes",
1903
    "progress_throughput",
1904
    "progress_eta",
1905
    "progress_percent",
1906
    "exit_status",
1907
    "error_message",
1908
    ] + _TIMESTAMPS
1909

    
1910

    
1911
class ImportExportOptions(ConfigObject):
1912
  """Options for import/export daemon
1913

1914
  @ivar key_name: X509 key name (None for cluster certificate)
1915
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1916
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1917
  @ivar magic: Used to ensure the connection goes to the right disk
1918
  @ivar ipv6: Whether to use IPv6
1919
  @ivar connect_timeout: Number of seconds for establishing connection
1920

1921
  """
1922
  __slots__ = [
1923
    "key_name",
1924
    "ca_pem",
1925
    "compress",
1926
    "magic",
1927
    "ipv6",
1928
    "connect_timeout",
1929
    ]
1930

    
1931

    
1932
class ConfdRequest(ConfigObject):
1933
  """Object holding a confd request.
1934

1935
  @ivar protocol: confd protocol version
1936
  @ivar type: confd query type
1937
  @ivar query: query request
1938
  @ivar rsalt: requested reply salt
1939

1940
  """
1941
  __slots__ = [
1942
    "protocol",
1943
    "type",
1944
    "query",
1945
    "rsalt",
1946
    ]
1947

    
1948

    
1949
class ConfdReply(ConfigObject):
1950
  """Object holding a confd reply.
1951

1952
  @ivar protocol: confd protocol version
1953
  @ivar status: reply status code (ok, error)
1954
  @ivar answer: confd query reply
1955
  @ivar serial: configuration serial number
1956

1957
  """
1958
  __slots__ = [
1959
    "protocol",
1960
    "status",
1961
    "answer",
1962
    "serial",
1963
    ]
1964

    
1965

    
1966
class QueryFieldDefinition(ConfigObject):
1967
  """Object holding a query field definition.
1968

1969
  @ivar name: Field name
1970
  @ivar title: Human-readable title
1971
  @ivar kind: Field type
1972
  @ivar doc: Human-readable description
1973

1974
  """
1975
  __slots__ = [
1976
    "name",
1977
    "title",
1978
    "kind",
1979
    "doc",
1980
    ]
1981

    
1982

    
1983
class _QueryResponseBase(ConfigObject):
1984
  __slots__ = [
1985
    "fields",
1986
    ]
1987

    
1988
  def ToDict(self):
1989
    """Custom function for serializing.
1990

1991
    """
1992
    mydict = super(_QueryResponseBase, self).ToDict()
1993
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1994
    return mydict
1995

    
1996
  @classmethod
1997
  def FromDict(cls, val):
1998
    """Custom function for de-serializing.
1999

2000
    """
2001
    obj = super(_QueryResponseBase, cls).FromDict(val)
2002
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2003
    return obj
2004

    
2005

    
2006
class QueryResponse(_QueryResponseBase):
2007
  """Object holding the response to a query.
2008

2009
  @ivar fields: List of L{QueryFieldDefinition} objects
2010
  @ivar data: Requested data
2011

2012
  """
2013
  __slots__ = [
2014
    "data",
2015
    ]
2016

    
2017

    
2018
class QueryFieldsRequest(ConfigObject):
2019
  """Object holding a request for querying available fields.
2020

2021
  """
2022
  __slots__ = [
2023
    "what",
2024
    "fields",
2025
    ]
2026

    
2027

    
2028
class QueryFieldsResponse(_QueryResponseBase):
2029
  """Object holding the response to a query for fields.
2030

2031
  @ivar fields: List of L{QueryFieldDefinition} objects
2032

2033
  """
2034
  __slots__ = [
2035
    ]
2036

    
2037

    
2038
class MigrationStatus(ConfigObject):
2039
  """Object holding the status of a migration.
2040

2041
  """
2042
  __slots__ = [
2043
    "status",
2044
    "transferred_ram",
2045
    "total_ram",
2046
    ]
2047

    
2048

    
2049
class InstanceConsole(ConfigObject):
2050
  """Object describing how to access the console of an instance.
2051

2052
  """
2053
  __slots__ = [
2054
    "instance",
2055
    "kind",
2056
    "message",
2057
    "host",
2058
    "port",
2059
    "user",
2060
    "command",
2061
    "display",
2062
    ]
2063

    
2064
  def Validate(self):
2065
    """Validates contents of this object.
2066

2067
    """
2068
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2069
    assert self.instance, "Missing instance name"
2070
    assert self.message or self.kind in [constants.CONS_SSH,
2071
                                         constants.CONS_SPICE,
2072
                                         constants.CONS_VNC]
2073
    assert self.host or self.kind == constants.CONS_MESSAGE
2074
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2075
                                      constants.CONS_SSH]
2076
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2077
                                      constants.CONS_SPICE,
2078
                                      constants.CONS_VNC]
2079
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2080
                                         constants.CONS_SPICE,
2081
                                         constants.CONS_VNC]
2082
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2083
                                         constants.CONS_SPICE,
2084
                                         constants.CONS_SSH]
2085
    return True
2086

    
2087

    
2088
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2089
  """Simple wrapper over ConfigParse that allows serialization.
2090

2091
  This class is basically ConfigParser.SafeConfigParser with two
2092
  additional methods that allow it to serialize/unserialize to/from a
2093
  buffer.
2094

2095
  """
2096
  def Dumps(self):
2097
    """Dump this instance and return the string representation."""
2098
    buf = StringIO()
2099
    self.write(buf)
2100
    return buf.getvalue()
2101

    
2102
  @classmethod
2103
  def Loads(cls, data):
2104
    """Load data from a string."""
2105
    buf = StringIO(data)
2106
    cfp = cls()
2107
    cfp.readfp(buf)
2108
    return cfp