Statistics
| Branch: | Tag: | Revision:

root / lib / objects.py @ cd46491f

History | View | Annotate | Download (60.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Transportable objects for Ganeti.
23

24
This module provides small, mostly data-only objects which are safe to
25
pass to and from external parties.
26

27
"""
28

    
29
# pylint: disable=E0203,W0201,R0902
30

    
31
# E0203: Access to member %r before its definition, since we use
32
# objects.py which doesn't explicitely initialise its members
33

    
34
# W0201: Attribute '%s' defined outside __init__
35

    
36
# R0902: Allow instances of these objects to have more than 20 attributes
37

    
38
import ConfigParser
39
import re
40
import copy
41
import time
42
from cStringIO import StringIO
43

    
44
from ganeti import errors
45
from ganeti import constants
46
from ganeti import netutils
47
from ganeti import utils
48

    
49
from socket import AF_INET
50

    
51

    
52
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
53
           "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54

    
55
_TIMESTAMPS = ["ctime", "mtime"]
56
_UUID = ["uuid"]
57

    
58
# constants used to create InstancePolicy dictionary
59
TISPECS_GROUP_TYPES = {
60
  constants.ISPECS_MIN: constants.VTYPE_INT,
61
  constants.ISPECS_MAX: constants.VTYPE_INT,
62
  }
63

    
64
TISPECS_CLUSTER_TYPES = {
65
  constants.ISPECS_MIN: constants.VTYPE_INT,
66
  constants.ISPECS_MAX: constants.VTYPE_INT,
67
  constants.ISPECS_STD: constants.VTYPE_INT,
68
  }
69

    
70

    
71
def FillDict(defaults_dict, custom_dict, skip_keys=None):
72
  """Basic function to apply settings on top a default dict.
73

74
  @type defaults_dict: dict
75
  @param defaults_dict: dictionary holding the default values
76
  @type custom_dict: dict
77
  @param custom_dict: dictionary holding customized value
78
  @type skip_keys: list
79
  @param skip_keys: which keys not to fill
80
  @rtype: dict
81
  @return: dict with the 'full' values
82

83
  """
84
  ret_dict = copy.deepcopy(defaults_dict)
85
  ret_dict.update(custom_dict)
86
  if skip_keys:
87
    for k in skip_keys:
88
      try:
89
        del ret_dict[k]
90
      except KeyError:
91
        pass
92
  return ret_dict
93

    
94

    
95
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
96
  """Fills an instance policy with defaults.
97

98
  """
99
  assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
100
  ret_dict = {}
101
  for key in constants.IPOLICY_ISPECS:
102
    ret_dict[key] = FillDict(default_ipolicy[key],
103
                             custom_ipolicy.get(key, {}),
104
                             skip_keys=skip_keys)
105
  # list items
106
  for key in [constants.IPOLICY_DTS]:
107
    ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
108
  # other items which we know we can directly copy (immutables)
109
  for key in constants.IPOLICY_PARAMETERS:
110
    ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
111

    
112
  return ret_dict
113

    
114

    
115
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
116
  """Fills the disk parameter defaults.
117

118
  @see FillDict: For parameters and return value
119

120
  """
121
  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
122

    
123
  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
124
                             skip_keys=skip_keys))
125
              for dt in constants.DISK_TEMPLATES)
126

    
127

    
128
def UpgradeGroupedParams(target, defaults):
129
  """Update all groups for the target parameter.
130

131
  @type target: dict of dicts
132
  @param target: {group: {parameter: value}}
133
  @type defaults: dict
134
  @param defaults: default parameter values
135

136
  """
137
  if target is None:
138
    target = {constants.PP_DEFAULT: defaults}
139
  else:
140
    for group in target:
141
      target[group] = FillDict(defaults, target[group])
142
  return target
143

    
144

    
145
def UpgradeBeParams(target):
146
  """Update the be parameters dict to the new format.
147

148
  @type target: dict
149
  @param target: "be" parameters dict
150

151
  """
152
  if constants.BE_MEMORY in target:
153
    memory = target[constants.BE_MEMORY]
154
    target[constants.BE_MAXMEM] = memory
155
    target[constants.BE_MINMEM] = memory
156
    del target[constants.BE_MEMORY]
157

    
158

    
159
def UpgradeDiskParams(diskparams):
160
  """Upgrade the disk parameters.
161

162
  @type diskparams: dict
163
  @param diskparams: disk parameters to upgrade
164
  @rtype: dict
165
  @return: the upgraded disk parameters dict
166

167
  """
168
  if diskparams is None:
169
    result = constants.DISK_DT_DEFAULTS.copy()
170
  else:
171
    result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
172

    
173
  return result
174

    
175

    
176
def UpgradeNDParams(ndparams):
177
  """Upgrade ndparams structure.
178

179
  @type ndparams: dict
180
  @param ndparams: disk parameters to upgrade
181
  @rtype: dict
182
  @return: the upgraded node parameters dict
183

184
  """
185
  if ndparams is None:
186
    ndparams = {}
187

    
188
  return FillDict(constants.NDC_DEFAULTS, ndparams)
189

    
190

    
191
def MakeEmptyIPolicy():
192
  """Create empty IPolicy dictionary.
193

194
  """
195
  return dict([
196
    (constants.ISPECS_MIN, {}),
197
    (constants.ISPECS_MAX, {}),
198
    (constants.ISPECS_STD, {}),
199
    ])
200

    
201

    
202
def CreateIPolicyFromOpts(ispecs_mem_size=None,
203
                          ispecs_cpu_count=None,
204
                          ispecs_disk_count=None,
205
                          ispecs_disk_size=None,
206
                          ispecs_nic_count=None,
207
                          ipolicy_disk_templates=None,
208
                          ipolicy_vcpu_ratio=None,
209
                          group_ipolicy=False,
210
                          allowed_values=None,
211
                          fill_all=False):
212
  """Creation of instance policy based on command line options.
213

214
  @param fill_all: whether for cluster policies we should ensure that
215
    all values are filled
216

217

218
  """
219
  # prepare ipolicy dict
220
  ipolicy_transposed = {
221
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
222
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
223
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
224
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
225
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
226
    }
227

    
228
  # first, check that the values given are correct
229
  if group_ipolicy:
230
    forced_type = TISPECS_GROUP_TYPES
231
  else:
232
    forced_type = TISPECS_CLUSTER_TYPES
233

    
234
  for specs in ipolicy_transposed.values():
235
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
236

    
237
  # then transpose
238
  ipolicy_out = MakeEmptyIPolicy()
239
  for name, specs in ipolicy_transposed.iteritems():
240
    assert name in constants.ISPECS_PARAMETERS
241
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
242
      ipolicy_out[key][name] = val
243

    
244
  # no filldict for non-dicts
245
  if not group_ipolicy and fill_all:
246
    if ipolicy_disk_templates is None:
247
      ipolicy_disk_templates = constants.DISK_TEMPLATES
248
    if ipolicy_vcpu_ratio is None:
249
      ipolicy_vcpu_ratio = \
250
        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
251
  if ipolicy_disk_templates is not None:
252
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
253
  if ipolicy_vcpu_ratio is not None:
254
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
255

    
256
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
257

    
258
  return ipolicy_out
259

    
260

    
261
class ConfigObject(object):
262
  """A generic config object.
263

264
  It has the following properties:
265

266
    - provides somewhat safe recursive unpickling and pickling for its classes
267
    - unset attributes which are defined in slots are always returned
268
      as None instead of raising an error
269

270
  Classes derived from this must always declare __slots__ (we use many
271
  config objects and the memory reduction is useful)
272

273
  """
274
  __slots__ = []
275

    
276
  def __init__(self, **kwargs):
277
    for k, v in kwargs.iteritems():
278
      setattr(self, k, v)
279

    
280
  def __getattr__(self, name):
281
    if name not in self._all_slots():
282
      raise AttributeError("Invalid object attribute %s.%s" %
283
                           (type(self).__name__, name))
284
    return None
285

    
286
  def __setstate__(self, state):
287
    slots = self._all_slots()
288
    for name in state:
289
      if name in slots:
290
        setattr(self, name, state[name])
291

    
292
  @classmethod
293
  def _all_slots(cls):
294
    """Compute the list of all declared slots for a class.
295

296
    """
297
    slots = []
298
    for parent in cls.__mro__:
299
      slots.extend(getattr(parent, "__slots__", []))
300
    return slots
301

    
302
  #: Public getter for the defined slots
303
  GetAllSlots = _all_slots
304

    
305
  def ToDict(self):
306
    """Convert to a dict holding only standard python types.
307

308
    The generic routine just dumps all of this object's attributes in
309
    a dict. It does not work if the class has children who are
310
    ConfigObjects themselves (e.g. the nics list in an Instance), in
311
    which case the object should subclass the function in order to
312
    make sure all objects returned are only standard python types.
313

314
    """
315
    result = {}
316
    for name in self._all_slots():
317
      value = getattr(self, name, None)
318
      if value is not None:
319
        result[name] = value
320
    return result
321

    
322
  __getstate__ = ToDict
323

    
324
  @classmethod
325
  def FromDict(cls, val):
326
    """Create an object from a dictionary.
327

328
    This generic routine takes a dict, instantiates a new instance of
329
    the given class, and sets attributes based on the dict content.
330

331
    As for `ToDict`, this does not work if the class has children
332
    who are ConfigObjects themselves (e.g. the nics list in an
333
    Instance), in which case the object should subclass the function
334
    and alter the objects.
335

336
    """
337
    if not isinstance(val, dict):
338
      raise errors.ConfigurationError("Invalid object passed to FromDict:"
339
                                      " expected dict, got %s" % type(val))
340
    val_str = dict([(str(k), v) for k, v in val.iteritems()])
341
    obj = cls(**val_str) # pylint: disable=W0142
342
    return obj
343

    
344
  @staticmethod
345
  def _ContainerToDicts(container):
346
    """Convert the elements of a container to standard python types.
347

348
    This method converts a container with elements derived from
349
    ConfigData to standard python types. If the container is a dict,
350
    we don't touch the keys, only the values.
351

352
    """
353
    if isinstance(container, dict):
354
      ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
355
    elif isinstance(container, (list, tuple, set, frozenset)):
356
      ret = [elem.ToDict() for elem in container]
357
    else:
358
      raise TypeError("Invalid type %s passed to _ContainerToDicts" %
359
                      type(container))
360
    return ret
361

    
362
  @staticmethod
363
  def _ContainerFromDicts(source, c_type, e_type):
364
    """Convert a container from standard python types.
365

366
    This method converts a container with standard python types to
367
    ConfigData objects. If the container is a dict, we don't touch the
368
    keys, only the values.
369

370
    """
371
    if not isinstance(c_type, type):
372
      raise TypeError("Container type %s passed to _ContainerFromDicts is"
373
                      " not a type" % type(c_type))
374
    if source is None:
375
      source = c_type()
376
    if c_type is dict:
377
      ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
378
    elif c_type in (list, tuple, set, frozenset):
379
      ret = c_type([e_type.FromDict(elem) for elem in source])
380
    else:
381
      raise TypeError("Invalid container type %s passed to"
382
                      " _ContainerFromDicts" % c_type)
383
    return ret
384

    
385
  def Copy(self):
386
    """Makes a deep copy of the current object and its children.
387

388
    """
389
    dict_form = self.ToDict()
390
    clone_obj = self.__class__.FromDict(dict_form)
391
    return clone_obj
392

    
393
  def __repr__(self):
394
    """Implement __repr__ for ConfigObjects."""
395
    return repr(self.ToDict())
396

    
397
  def UpgradeConfig(self):
398
    """Fill defaults for missing configuration values.
399

400
    This method will be called at configuration load time, and its
401
    implementation will be object dependent.
402

403
    """
404
    pass
405

    
406

    
407
class TaggableObject(ConfigObject):
408
  """An generic class supporting tags.
409

410
  """
411
  __slots__ = ["tags"]
412
  VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
413

    
414
  @classmethod
415
  def ValidateTag(cls, tag):
416
    """Check if a tag is valid.
417

418
    If the tag is invalid, an errors.TagError will be raised. The
419
    function has no return value.
420

421
    """
422
    if not isinstance(tag, basestring):
423
      raise errors.TagError("Invalid tag type (not a string)")
424
    if len(tag) > constants.MAX_TAG_LEN:
425
      raise errors.TagError("Tag too long (>%d characters)" %
426
                            constants.MAX_TAG_LEN)
427
    if not tag:
428
      raise errors.TagError("Tags cannot be empty")
429
    if not cls.VALID_TAG_RE.match(tag):
430
      raise errors.TagError("Tag contains invalid characters")
431

    
432
  def GetTags(self):
433
    """Return the tags list.
434

435
    """
436
    tags = getattr(self, "tags", None)
437
    if tags is None:
438
      tags = self.tags = set()
439
    return tags
440

    
441
  def AddTag(self, tag):
442
    """Add a new tag.
443

444
    """
445
    self.ValidateTag(tag)
446
    tags = self.GetTags()
447
    if len(tags) >= constants.MAX_TAGS_PER_OBJ:
448
      raise errors.TagError("Too many tags")
449
    self.GetTags().add(tag)
450

    
451
  def RemoveTag(self, tag):
452
    """Remove a tag.
453

454
    """
455
    self.ValidateTag(tag)
456
    tags = self.GetTags()
457
    try:
458
      tags.remove(tag)
459
    except KeyError:
460
      raise errors.TagError("Tag not found")
461

    
462
  def ToDict(self):
463
    """Taggable-object-specific conversion to standard python types.
464

465
    This replaces the tags set with a list.
466

467
    """
468
    bo = super(TaggableObject, self).ToDict()
469

    
470
    tags = bo.get("tags", None)
471
    if isinstance(tags, set):
472
      bo["tags"] = list(tags)
473
    return bo
474

    
475
  @classmethod
476
  def FromDict(cls, val):
477
    """Custom function for instances.
478

479
    """
480
    obj = super(TaggableObject, cls).FromDict(val)
481
    if hasattr(obj, "tags") and isinstance(obj.tags, list):
482
      obj.tags = set(obj.tags)
483
    return obj
484

    
485

    
486
class MasterNetworkParameters(ConfigObject):
487
  """Network configuration parameters for the master
488

489
  @ivar name: master name
490
  @ivar ip: master IP
491
  @ivar netmask: master netmask
492
  @ivar netdev: master network device
493
  @ivar ip_family: master IP family
494

495
  """
496
  __slots__ = [
497
    "name",
498
    "ip",
499
    "netmask",
500
    "netdev",
501
    "ip_family"
502
    ]
503

    
504

    
505
class ConfigData(ConfigObject):
506
  """Top-level config object."""
507
  __slots__ = [
508
    "version",
509
    "cluster",
510
    "nodes",
511
    "nodegroups",
512
    "instances",
513
    "serial_no",
514
    ] + _TIMESTAMPS
515

    
516
  def ToDict(self):
517
    """Custom function for top-level config data.
518

519
    This just replaces the list of instances, nodes and the cluster
520
    with standard python types.
521

522
    """
523
    mydict = super(ConfigData, self).ToDict()
524
    mydict["cluster"] = mydict["cluster"].ToDict()
525
    for key in "nodes", "instances", "nodegroups":
526
      mydict[key] = self._ContainerToDicts(mydict[key])
527

    
528
    return mydict
529

    
530
  @classmethod
531
  def FromDict(cls, val):
532
    """Custom function for top-level config data
533

534
    """
535
    obj = super(ConfigData, cls).FromDict(val)
536
    obj.cluster = Cluster.FromDict(obj.cluster)
537
    obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
538
    obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
539
    obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
540
    return obj
541

    
542
  def HasAnyDiskOfType(self, dev_type):
543
    """Check if in there is at disk of the given type in the configuration.
544

545
    @type dev_type: L{constants.LDS_BLOCK}
546
    @param dev_type: the type to look for
547
    @rtype: boolean
548
    @return: boolean indicating if a disk of the given type was found or not
549

550
    """
551
    for instance in self.instances.values():
552
      for disk in instance.disks:
553
        if disk.IsBasedOnDiskType(dev_type):
554
          return True
555
    return False
556

    
557
  def UpgradeConfig(self):
558
    """Fill defaults for missing configuration values.
559

560
    """
561
    self.cluster.UpgradeConfig()
562
    for node in self.nodes.values():
563
      node.UpgradeConfig()
564
    for instance in self.instances.values():
565
      instance.UpgradeConfig()
566
    if self.nodegroups is None:
567
      self.nodegroups = {}
568
    for nodegroup in self.nodegroups.values():
569
      nodegroup.UpgradeConfig()
570
    if self.cluster.drbd_usermode_helper is None:
571
      # To decide if we set an helper let's check if at least one instance has
572
      # a DRBD disk. This does not cover all the possible scenarios but it
573
      # gives a good approximation.
574
      if self.HasAnyDiskOfType(constants.LD_DRBD8):
575
        self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
576

    
577

    
578
class NIC(ConfigObject):
579
  """Config object representing a network card."""
580
  __slots__ = ["mac", "ip", "nicparams"]
581

    
582
  @classmethod
583
  def CheckParameterSyntax(cls, nicparams):
584
    """Check the given parameters for validity.
585

586
    @type nicparams:  dict
587
    @param nicparams: dictionary with parameter names/value
588
    @raise errors.ConfigurationError: when a parameter is not valid
589

590
    """
591
    if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
592
        nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
593
      err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
594
      raise errors.ConfigurationError(err)
595

    
596
    if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
597
        not nicparams[constants.NIC_LINK]):
598
      err = "Missing bridged nic link"
599
      raise errors.ConfigurationError(err)
600

    
601

    
602
class Disk(ConfigObject):
603
  """Config object representing a block device."""
604
  __slots__ = ["dev_type", "logical_id", "physical_id",
605
               "children", "iv_name", "size", "mode", "params"]
606

    
607
  def CreateOnSecondary(self):
608
    """Test if this device needs to be created on a secondary node."""
609
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
610

    
611
  def AssembleOnSecondary(self):
612
    """Test if this device needs to be assembled on a secondary node."""
613
    return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
614

    
615
  def OpenOnSecondary(self):
616
    """Test if this device needs to be opened on a secondary node."""
617
    return self.dev_type in (constants.LD_LV,)
618

    
619
  def StaticDevPath(self):
620
    """Return the device path if this device type has a static one.
621

622
    Some devices (LVM for example) live always at the same /dev/ path,
623
    irrespective of their status. For such devices, we return this
624
    path, for others we return None.
625

626
    @warning: The path returned is not a normalized pathname; callers
627
        should check that it is a valid path.
628

629
    """
630
    if self.dev_type == constants.LD_LV:
631
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
632
    elif self.dev_type == constants.LD_BLOCKDEV:
633
      return self.logical_id[1]
634
    elif self.dev_type == constants.LD_RBD:
635
      return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
636
    return None
637

    
638
  def ChildrenNeeded(self):
639
    """Compute the needed number of children for activation.
640

641
    This method will return either -1 (all children) or a positive
642
    number denoting the minimum number of children needed for
643
    activation (only mirrored devices will usually return >=0).
644

645
    Currently, only DRBD8 supports diskless activation (therefore we
646
    return 0), for all other we keep the previous semantics and return
647
    -1.
648

649
    """
650
    if self.dev_type == constants.LD_DRBD8:
651
      return 0
652
    return -1
653

    
654
  def IsBasedOnDiskType(self, dev_type):
655
    """Check if the disk or its children are based on the given type.
656

657
    @type dev_type: L{constants.LDS_BLOCK}
658
    @param dev_type: the type to look for
659
    @rtype: boolean
660
    @return: boolean indicating if a device of the given type was found or not
661

662
    """
663
    if self.children:
664
      for child in self.children:
665
        if child.IsBasedOnDiskType(dev_type):
666
          return True
667
    return self.dev_type == dev_type
668

    
669
  def GetNodes(self, node):
670
    """This function returns the nodes this device lives on.
671

672
    Given the node on which the parent of the device lives on (or, in
673
    case of a top-level device, the primary node of the devices'
674
    instance), this function will return a list of nodes on which this
675
    devices needs to (or can) be assembled.
676

677
    """
678
    if self.dev_type in [constants.LD_LV, constants.LD_FILE,
679
                         constants.LD_BLOCKDEV, constants.LD_RBD]:
680
      result = [node]
681
    elif self.dev_type in constants.LDS_DRBD:
682
      result = [self.logical_id[0], self.logical_id[1]]
683
      if node not in result:
684
        raise errors.ConfigurationError("DRBD device passed unknown node")
685
    else:
686
      raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
687
    return result
688

    
689
  def ComputeNodeTree(self, parent_node):
690
    """Compute the node/disk tree for this disk and its children.
691

692
    This method, given the node on which the parent disk lives, will
693
    return the list of all (node, disk) pairs which describe the disk
694
    tree in the most compact way. For example, a drbd/lvm stack
695
    will be returned as (primary_node, drbd) and (secondary_node, drbd)
696
    which represents all the top-level devices on the nodes.
697

698
    """
699
    my_nodes = self.GetNodes(parent_node)
700
    result = [(node, self) for node in my_nodes]
701
    if not self.children:
702
      # leaf device
703
      return result
704
    for node in my_nodes:
705
      for child in self.children:
706
        child_result = child.ComputeNodeTree(node)
707
        if len(child_result) == 1:
708
          # child (and all its descendants) is simple, doesn't split
709
          # over multiple hosts, so we don't need to describe it, our
710
          # own entry for this node describes it completely
711
          continue
712
        else:
713
          # check if child nodes differ from my nodes; note that
714
          # subdisk can differ from the child itself, and be instead
715
          # one of its descendants
716
          for subnode, subdisk in child_result:
717
            if subnode not in my_nodes:
718
              result.append((subnode, subdisk))
719
            # otherwise child is under our own node, so we ignore this
720
            # entry (but probably the other results in the list will
721
            # be different)
722
    return result
723

    
724
  def ComputeGrowth(self, amount):
725
    """Compute the per-VG growth requirements.
726

727
    This only works for VG-based disks.
728

729
    @type amount: integer
730
    @param amount: the desired increase in (user-visible) disk space
731
    @rtype: dict
732
    @return: a dictionary of volume-groups and the required size
733

734
    """
735
    if self.dev_type == constants.LD_LV:
736
      return {self.logical_id[0]: amount}
737
    elif self.dev_type == constants.LD_DRBD8:
738
      if self.children:
739
        return self.children[0].ComputeGrowth(amount)
740
      else:
741
        return {}
742
    else:
743
      # Other disk types do not require VG space
744
      return {}
745

    
746
  def RecordGrow(self, amount):
747
    """Update the size of this disk after growth.
748

749
    This method recurses over the disks's children and updates their
750
    size correspondigly. The method needs to be kept in sync with the
751
    actual algorithms from bdev.
752

753
    """
754
    if self.dev_type in (constants.LD_LV, constants.LD_FILE,
755
                         constants.LD_RBD):
756
      self.size += amount
757
    elif self.dev_type == constants.LD_DRBD8:
758
      if self.children:
759
        self.children[0].RecordGrow(amount)
760
      self.size += amount
761
    else:
762
      raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
763
                                   " disk type %s" % self.dev_type)
764

    
765
  def Update(self, size=None, mode=None):
766
    """Apply changes to size and mode.
767

768
    """
769
    if self.dev_type == constants.LD_DRBD8:
770
      if self.children:
771
        self.children[0].Update(size=size, mode=mode)
772
    else:
773
      assert not self.children
774

    
775
    if size is not None:
776
      self.size = size
777
    if mode is not None:
778
      self.mode = mode
779

    
780
  def UnsetSize(self):
781
    """Sets recursively the size to zero for the disk and its children.
782

783
    """
784
    if self.children:
785
      for child in self.children:
786
        child.UnsetSize()
787
    self.size = 0
788

    
789
  def SetPhysicalID(self, target_node, nodes_ip):
790
    """Convert the logical ID to the physical ID.
791

792
    This is used only for drbd, which needs ip/port configuration.
793

794
    The routine descends down and updates its children also, because
795
    this helps when the only the top device is passed to the remote
796
    node.
797

798
    Arguments:
799
      - target_node: the node we wish to configure for
800
      - nodes_ip: a mapping of node name to ip
801

802
    The target_node must exist in in nodes_ip, and must be one of the
803
    nodes in the logical ID for each of the DRBD devices encountered
804
    in the disk tree.
805

806
    """
807
    if self.children:
808
      for child in self.children:
809
        child.SetPhysicalID(target_node, nodes_ip)
810

    
811
    if self.logical_id is None and self.physical_id is not None:
812
      return
813
    if self.dev_type in constants.LDS_DRBD:
814
      pnode, snode, port, pminor, sminor, secret = self.logical_id
815
      if target_node not in (pnode, snode):
816
        raise errors.ConfigurationError("DRBD device not knowing node %s" %
817
                                        target_node)
818
      pnode_ip = nodes_ip.get(pnode, None)
819
      snode_ip = nodes_ip.get(snode, None)
820
      if pnode_ip is None or snode_ip is None:
821
        raise errors.ConfigurationError("Can't find primary or secondary node"
822
                                        " for %s" % str(self))
823
      p_data = (pnode_ip, port)
824
      s_data = (snode_ip, port)
825
      if pnode == target_node:
826
        self.physical_id = p_data + s_data + (pminor, secret)
827
      else: # it must be secondary, we tested above
828
        self.physical_id = s_data + p_data + (sminor, secret)
829
    else:
830
      self.physical_id = self.logical_id
831
    return
832

    
833
  def ToDict(self):
834
    """Disk-specific conversion to standard python types.
835

836
    This replaces the children lists of objects with lists of
837
    standard python types.
838

839
    """
840
    bo = super(Disk, self).ToDict()
841

    
842
    for attr in ("children",):
843
      alist = bo.get(attr, None)
844
      if alist:
845
        bo[attr] = self._ContainerToDicts(alist)
846
    return bo
847

    
848
  @classmethod
849
  def FromDict(cls, val):
850
    """Custom function for Disks
851

852
    """
853
    obj = super(Disk, cls).FromDict(val)
854
    if obj.children:
855
      obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
856
    if obj.logical_id and isinstance(obj.logical_id, list):
857
      obj.logical_id = tuple(obj.logical_id)
858
    if obj.physical_id and isinstance(obj.physical_id, list):
859
      obj.physical_id = tuple(obj.physical_id)
860
    if obj.dev_type in constants.LDS_DRBD:
861
      # we need a tuple of length six here
862
      if len(obj.logical_id) < 6:
863
        obj.logical_id += (None,) * (6 - len(obj.logical_id))
864
    return obj
865

    
866
  def __str__(self):
867
    """Custom str() formatter for disks.
868

869
    """
870
    if self.dev_type == constants.LD_LV:
871
      val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
872
    elif self.dev_type in constants.LDS_DRBD:
873
      node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
874
      val = "<DRBD8("
875
      if self.physical_id is None:
876
        phy = "unconfigured"
877
      else:
878
        phy = ("configured as %s:%s %s:%s" %
879
               (self.physical_id[0], self.physical_id[1],
880
                self.physical_id[2], self.physical_id[3]))
881

    
882
      val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
883
              (node_a, minor_a, node_b, minor_b, port, phy))
884
      if self.children and self.children.count(None) == 0:
885
        val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
886
      else:
887
        val += "no local storage"
888
    else:
889
      val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
890
             (self.dev_type, self.logical_id, self.physical_id, self.children))
891
    if self.iv_name is None:
892
      val += ", not visible"
893
    else:
894
      val += ", visible as /dev/%s" % self.iv_name
895
    if isinstance(self.size, int):
896
      val += ", size=%dm)>" % self.size
897
    else:
898
      val += ", size='%s')>" % (self.size,)
899
    return val
900

    
901
  def Verify(self):
902
    """Checks that this disk is correctly configured.
903

904
    """
905
    all_errors = []
906
    if self.mode not in constants.DISK_ACCESS_SET:
907
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
908
    return all_errors
909

    
910
  def UpgradeConfig(self):
911
    """Fill defaults for missing configuration values.
912

913
    """
914
    if self.children:
915
      for child in self.children:
916
        child.UpgradeConfig()
917

    
918
    if not self.params:
919
      self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
920
    else:
921
      self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
922
                             self.params)
923
    # add here config upgrade for this disk
924

    
925
  @staticmethod
926
  def ComputeLDParams(disk_template, disk_params):
927
    """Computes Logical Disk parameters from Disk Template parameters.
928

929
    @type disk_template: string
930
    @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
931
    @type disk_params: dict
932
    @param disk_params: disk template parameters;
933
                        dict(template_name -> parameters
934
    @rtype: list(dict)
935
    @return: a list of dicts, one for each node of the disk hierarchy. Each dict
936
      contains the LD parameters of the node. The tree is flattened in-order.
937

938
    """
939
    if disk_template not in constants.DISK_TEMPLATES:
940
      raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
941

    
942
    assert disk_template in disk_params
943

    
944
    result = list()
945
    dt_params = disk_params[disk_template]
946
    if disk_template == constants.DT_DRBD8:
947
      drbd_params = {
948
        constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
949
        constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
950
        constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
951
        constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
952
        constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
953
        constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
954
        constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
955
        constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
956
        constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
957
        constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
958
        constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
959
        constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
960
        }
961

    
962
      drbd_params = \
963
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
964
                 drbd_params)
965

    
966
      result.append(drbd_params)
967

    
968
      # data LV
969
      data_params = {
970
        constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
971
        }
972
      data_params = \
973
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
974
                 data_params)
975
      result.append(data_params)
976

    
977
      # metadata LV
978
      meta_params = {
979
        constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
980
        }
981
      meta_params = \
982
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
983
                 meta_params)
984
      result.append(meta_params)
985

    
986
    elif (disk_template == constants.DT_FILE or
987
          disk_template == constants.DT_SHARED_FILE):
988
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
989

    
990
    elif disk_template == constants.DT_PLAIN:
991
      params = {
992
        constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
993
        }
994
      params = \
995
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
996
                 params)
997
      result.append(params)
998

    
999
    elif disk_template == constants.DT_BLOCK:
1000
      result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
1001

    
1002
    elif disk_template == constants.DT_RBD:
1003
      params = {
1004
        constants.LDP_POOL: dt_params[constants.RBD_POOL]
1005
        }
1006
      params = \
1007
        FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD],
1008
                 params)
1009
      result.append(params)
1010

    
1011
    return result
1012

    
1013

    
1014
class InstancePolicy(ConfigObject):
1015
  """Config object representing instance policy limits dictionary.
1016

1017

1018
  Note that this object is not actually used in the config, it's just
1019
  used as a placeholder for a few functions.
1020

1021
  """
1022
  @classmethod
1023
  def CheckParameterSyntax(cls, ipolicy):
1024
    """ Check the instance policy for validity.
1025

1026
    """
1027
    for param in constants.ISPECS_PARAMETERS:
1028
      InstancePolicy.CheckISpecSyntax(ipolicy, param)
1029
    if constants.IPOLICY_DTS in ipolicy:
1030
      InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
1031
    for key in constants.IPOLICY_PARAMETERS:
1032
      if key in ipolicy:
1033
        InstancePolicy.CheckParameter(key, ipolicy[key])
1034
    wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1035
    if wrong_keys:
1036
      raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
1037
                                      utils.CommaJoin(wrong_keys))
1038

    
1039
  @classmethod
1040
  def CheckISpecSyntax(cls, ipolicy, name):
1041
    """Check the instance policy for validity on a given key.
1042

1043
    We check if the instance policy makes sense for a given key, that is
1044
    if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
1045

1046
    @type ipolicy: dict
1047
    @param ipolicy: dictionary with min, max, std specs
1048
    @type name: string
1049
    @param name: what are the limits for
1050
    @raise errors.ConfigureError: when specs for given name are not valid
1051

1052
    """
1053
    min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
1054
    std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
1055
    max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
1056
    err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
1057
           (name,
1058
            ipolicy[constants.ISPECS_MIN].get(name, "-"),
1059
            ipolicy[constants.ISPECS_MAX].get(name, "-"),
1060
            ipolicy[constants.ISPECS_STD].get(name, "-")))
1061
    if min_v > std_v or std_v > max_v:
1062
      raise errors.ConfigurationError(err)
1063

    
1064
  @classmethod
1065
  def CheckDiskTemplates(cls, disk_templates):
1066
    """Checks the disk templates for validity.
1067

1068
    """
1069
    wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1070
    if wrong:
1071
      raise errors.ConfigurationError("Invalid disk template(s) %s" %
1072
                                      utils.CommaJoin(wrong))
1073

    
1074
  @classmethod
1075
  def CheckParameter(cls, key, value):
1076
    """Checks a parameter.
1077

1078
    Currently we expect all parameters to be float values.
1079

1080
    """
1081
    try:
1082
      float(value)
1083
    except (TypeError, ValueError), err:
1084
      raise errors.ConfigurationError("Invalid value for key" " '%s':"
1085
                                      " '%s', error: %s" % (key, value, err))
1086

    
1087

    
1088
class Instance(TaggableObject):
1089
  """Config object representing an instance."""
1090
  __slots__ = [
1091
    "name",
1092
    "primary_node",
1093
    "os",
1094
    "hypervisor",
1095
    "hvparams",
1096
    "beparams",
1097
    "osparams",
1098
    "admin_state",
1099
    "nics",
1100
    "disks",
1101
    "disk_template",
1102
    "network_port",
1103
    "serial_no",
1104
    ] + _TIMESTAMPS + _UUID
1105

    
1106
  def _ComputeSecondaryNodes(self):
1107
    """Compute the list of secondary nodes.
1108

1109
    This is a simple wrapper over _ComputeAllNodes.
1110

1111
    """
1112
    all_nodes = set(self._ComputeAllNodes())
1113
    all_nodes.discard(self.primary_node)
1114
    return tuple(all_nodes)
1115

    
1116
  secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1117
                             "List of secondary nodes")
1118

    
1119
  def _ComputeAllNodes(self):
1120
    """Compute the list of all nodes.
1121

1122
    Since the data is already there (in the drbd disks), keeping it as
1123
    a separate normal attribute is redundant and if not properly
1124
    synchronised can cause problems. Thus it's better to compute it
1125
    dynamically.
1126

1127
    """
1128
    def _Helper(nodes, device):
1129
      """Recursively computes nodes given a top device."""
1130
      if device.dev_type in constants.LDS_DRBD:
1131
        nodea, nodeb = device.logical_id[:2]
1132
        nodes.add(nodea)
1133
        nodes.add(nodeb)
1134
      if device.children:
1135
        for child in device.children:
1136
          _Helper(nodes, child)
1137

    
1138
    all_nodes = set()
1139
    all_nodes.add(self.primary_node)
1140
    for device in self.disks:
1141
      _Helper(all_nodes, device)
1142
    return tuple(all_nodes)
1143

    
1144
  all_nodes = property(_ComputeAllNodes, None, None,
1145
                       "List of all nodes of the instance")
1146

    
1147
  def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1148
    """Provide a mapping of nodes to LVs this instance owns.
1149

1150
    This function figures out what logical volumes should belong on
1151
    which nodes, recursing through a device tree.
1152

1153
    @param lvmap: optional dictionary to receive the
1154
        'node' : ['lv', ...] data.
1155

1156
    @return: None if lvmap arg is given, otherwise, a dictionary of
1157
        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1158
        volumeN is of the form "vg_name/lv_name", compatible with
1159
        GetVolumeList()
1160

1161
    """
1162
    if node == None:
1163
      node = self.primary_node
1164

    
1165
    if lvmap is None:
1166
      lvmap = {
1167
        node: [],
1168
        }
1169
      ret = lvmap
1170
    else:
1171
      if not node in lvmap:
1172
        lvmap[node] = []
1173
      ret = None
1174

    
1175
    if not devs:
1176
      devs = self.disks
1177

    
1178
    for dev in devs:
1179
      if dev.dev_type == constants.LD_LV:
1180
        lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1181

    
1182
      elif dev.dev_type in constants.LDS_DRBD:
1183
        if dev.children:
1184
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1185
          self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1186

    
1187
      elif dev.children:
1188
        self.MapLVsByNode(lvmap, dev.children, node)
1189

    
1190
    return ret
1191

    
1192
  def FindDisk(self, idx):
1193
    """Find a disk given having a specified index.
1194

1195
    This is just a wrapper that does validation of the index.
1196

1197
    @type idx: int
1198
    @param idx: the disk index
1199
    @rtype: L{Disk}
1200
    @return: the corresponding disk
1201
    @raise errors.OpPrereqError: when the given index is not valid
1202

1203
    """
1204
    try:
1205
      idx = int(idx)
1206
      return self.disks[idx]
1207
    except (TypeError, ValueError), err:
1208
      raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1209
                                 errors.ECODE_INVAL)
1210
    except IndexError:
1211
      raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1212
                                 " 0 to %d" % (idx, len(self.disks) - 1),
1213
                                 errors.ECODE_INVAL)
1214

    
1215
  def ToDict(self):
1216
    """Instance-specific conversion to standard python types.
1217

1218
    This replaces the children lists of objects with lists of standard
1219
    python types.
1220

1221
    """
1222
    bo = super(Instance, self).ToDict()
1223

    
1224
    for attr in "nics", "disks":
1225
      alist = bo.get(attr, None)
1226
      if alist:
1227
        nlist = self._ContainerToDicts(alist)
1228
      else:
1229
        nlist = []
1230
      bo[attr] = nlist
1231
    return bo
1232

    
1233
  @classmethod
1234
  def FromDict(cls, val):
1235
    """Custom function for instances.
1236

1237
    """
1238
    if "admin_state" not in val:
1239
      if val.get("admin_up", False):
1240
        val["admin_state"] = constants.ADMINST_UP
1241
      else:
1242
        val["admin_state"] = constants.ADMINST_DOWN
1243
    if "admin_up" in val:
1244
      del val["admin_up"]
1245
    obj = super(Instance, cls).FromDict(val)
1246
    obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
1247
    obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
1248
    return obj
1249

    
1250
  def UpgradeConfig(self):
1251
    """Fill defaults for missing configuration values.
1252

1253
    """
1254
    for nic in self.nics:
1255
      nic.UpgradeConfig()
1256
    for disk in self.disks:
1257
      disk.UpgradeConfig()
1258
    if self.hvparams:
1259
      for key in constants.HVC_GLOBALS:
1260
        try:
1261
          del self.hvparams[key]
1262
        except KeyError:
1263
          pass
1264
    if self.osparams is None:
1265
      self.osparams = {}
1266
    UpgradeBeParams(self.beparams)
1267

    
1268

    
1269
class OS(ConfigObject):
1270
  """Config object representing an operating system.
1271

1272
  @type supported_parameters: list
1273
  @ivar supported_parameters: a list of tuples, name and description,
1274
      containing the supported parameters by this OS
1275

1276
  @type VARIANT_DELIM: string
1277
  @cvar VARIANT_DELIM: the variant delimiter
1278

1279
  """
1280
  __slots__ = [
1281
    "name",
1282
    "path",
1283
    "api_versions",
1284
    "create_script",
1285
    "export_script",
1286
    "import_script",
1287
    "rename_script",
1288
    "verify_script",
1289
    "supported_variants",
1290
    "supported_parameters",
1291
    ]
1292

    
1293
  VARIANT_DELIM = "+"
1294

    
1295
  @classmethod
1296
  def SplitNameVariant(cls, name):
1297
    """Splits the name into the proper name and variant.
1298

1299
    @param name: the OS (unprocessed) name
1300
    @rtype: list
1301
    @return: a list of two elements; if the original name didn't
1302
        contain a variant, it's returned as an empty string
1303

1304
    """
1305
    nv = name.split(cls.VARIANT_DELIM, 1)
1306
    if len(nv) == 1:
1307
      nv.append("")
1308
    return nv
1309

    
1310
  @classmethod
1311
  def GetName(cls, name):
1312
    """Returns the proper name of the os (without the variant).
1313

1314
    @param name: the OS (unprocessed) name
1315

1316
    """
1317
    return cls.SplitNameVariant(name)[0]
1318

    
1319
  @classmethod
1320
  def GetVariant(cls, name):
1321
    """Returns the variant the os (without the base name).
1322

1323
    @param name: the OS (unprocessed) name
1324

1325
    """
1326
    return cls.SplitNameVariant(name)[1]
1327

    
1328

    
1329
class NodeHvState(ConfigObject):
1330
  """Hypvervisor state on a node.
1331

1332
  @ivar mem_total: Total amount of memory
1333
  @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1334
    available)
1335
  @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1336
    rounding
1337
  @ivar mem_inst: Memory used by instances living on node
1338
  @ivar cpu_total: Total node CPU core count
1339
  @ivar cpu_node: Number of CPU cores reserved for the node itself
1340

1341
  """
1342
  __slots__ = [
1343
    "mem_total",
1344
    "mem_node",
1345
    "mem_hv",
1346
    "mem_inst",
1347
    "cpu_total",
1348
    "cpu_node",
1349
    ] + _TIMESTAMPS
1350

    
1351

    
1352
class NodeDiskState(ConfigObject):
1353
  """Disk state on a node.
1354

1355
  """
1356
  __slots__ = [
1357
    "total",
1358
    "reserved",
1359
    "overhead",
1360
    ] + _TIMESTAMPS
1361

    
1362

    
1363
class Node(TaggableObject):
1364
  """Config object representing a node.
1365

1366
  @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1367
  @ivar hv_state_static: Hypervisor state overriden by user
1368
  @ivar disk_state: Disk state (e.g. free space)
1369
  @ivar disk_state_static: Disk state overriden by user
1370

1371
  """
1372
  __slots__ = [
1373
    "name",
1374
    "primary_ip",
1375
    "secondary_ip",
1376
    "serial_no",
1377
    "master_candidate",
1378
    "offline",
1379
    "drained",
1380
    "group",
1381
    "master_capable",
1382
    "vm_capable",
1383
    "ndparams",
1384
    "powered",
1385
    "hv_state",
1386
    "hv_state_static",
1387
    "disk_state",
1388
    "disk_state_static",
1389
    ] + _TIMESTAMPS + _UUID
1390

    
1391
  def UpgradeConfig(self):
1392
    """Fill defaults for missing configuration values.
1393

1394
    """
1395
    # pylint: disable=E0203
1396
    # because these are "defined" via slots, not manually
1397
    if self.master_capable is None:
1398
      self.master_capable = True
1399

    
1400
    if self.vm_capable is None:
1401
      self.vm_capable = True
1402

    
1403
    if self.ndparams is None:
1404
      self.ndparams = {}
1405

    
1406
    if self.powered is None:
1407
      self.powered = True
1408

    
1409
  def ToDict(self):
1410
    """Custom function for serializing.
1411

1412
    """
1413
    data = super(Node, self).ToDict()
1414

    
1415
    hv_state = data.get("hv_state", None)
1416
    if hv_state is not None:
1417
      data["hv_state"] = self._ContainerToDicts(hv_state)
1418

    
1419
    disk_state = data.get("disk_state", None)
1420
    if disk_state is not None:
1421
      data["disk_state"] = \
1422
        dict((key, self._ContainerToDicts(value))
1423
             for (key, value) in disk_state.items())
1424

    
1425
    return data
1426

    
1427
  @classmethod
1428
  def FromDict(cls, val):
1429
    """Custom function for deserializing.
1430

1431
    """
1432
    obj = super(Node, cls).FromDict(val)
1433

    
1434
    if obj.hv_state is not None:
1435
      obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1436

    
1437
    if obj.disk_state is not None:
1438
      obj.disk_state = \
1439
        dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1440
             for (key, value) in obj.disk_state.items())
1441

    
1442
    return obj
1443

    
1444

    
1445
class NodeGroup(TaggableObject):
1446
  """Config object representing a node group."""
1447
  __slots__ = [
1448
    "name",
1449
    "members",
1450
    "ndparams",
1451
    "diskparams",
1452
    "ipolicy",
1453
    "serial_no",
1454
    "hv_state_static",
1455
    "disk_state_static",
1456
    "alloc_policy",
1457
    ] + _TIMESTAMPS + _UUID
1458

    
1459
  def ToDict(self):
1460
    """Custom function for nodegroup.
1461

1462
    This discards the members object, which gets recalculated and is only kept
1463
    in memory.
1464

1465
    """
1466
    mydict = super(NodeGroup, self).ToDict()
1467
    del mydict["members"]
1468
    return mydict
1469

    
1470
  @classmethod
1471
  def FromDict(cls, val):
1472
    """Custom function for nodegroup.
1473

1474
    The members slot is initialized to an empty list, upon deserialization.
1475

1476
    """
1477
    obj = super(NodeGroup, cls).FromDict(val)
1478
    obj.members = []
1479
    return obj
1480

    
1481
  def UpgradeConfig(self):
1482
    """Fill defaults for missing configuration values.
1483

1484
    """
1485
    if self.ndparams is None:
1486
      self.ndparams = {}
1487

    
1488
    if self.serial_no is None:
1489
      self.serial_no = 1
1490

    
1491
    if self.alloc_policy is None:
1492
      self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1493

    
1494
    # We only update mtime, and not ctime, since we would not be able
1495
    # to provide a correct value for creation time.
1496
    if self.mtime is None:
1497
      self.mtime = time.time()
1498

    
1499
    self.diskparams = UpgradeDiskParams(self.diskparams)
1500
    if self.ipolicy is None:
1501
      self.ipolicy = MakeEmptyIPolicy()
1502

    
1503
  def FillND(self, node):
1504
    """Return filled out ndparams for L{objects.Node}
1505

1506
    @type node: L{objects.Node}
1507
    @param node: A Node object to fill
1508
    @return a copy of the node's ndparams with defaults filled
1509

1510
    """
1511
    return self.SimpleFillND(node.ndparams)
1512

    
1513
  def SimpleFillND(self, ndparams):
1514
    """Fill a given ndparams dict with defaults.
1515

1516
    @type ndparams: dict
1517
    @param ndparams: the dict to fill
1518
    @rtype: dict
1519
    @return: a copy of the passed in ndparams with missing keys filled
1520
        from the node group defaults
1521

1522
    """
1523
    return FillDict(self.ndparams, ndparams)
1524

    
1525

    
1526
class Cluster(TaggableObject):
1527
  """Config object representing the cluster."""
1528
  __slots__ = [
1529
    "serial_no",
1530
    "rsahostkeypub",
1531
    "highest_used_port",
1532
    "tcpudp_port_pool",
1533
    "mac_prefix",
1534
    "volume_group_name",
1535
    "reserved_lvs",
1536
    "drbd_usermode_helper",
1537
    "default_bridge",
1538
    "default_hypervisor",
1539
    "master_node",
1540
    "master_ip",
1541
    "master_netdev",
1542
    "master_netmask",
1543
    "use_external_mip_script",
1544
    "cluster_name",
1545
    "file_storage_dir",
1546
    "shared_file_storage_dir",
1547
    "enabled_hypervisors",
1548
    "hvparams",
1549
    "ipolicy",
1550
    "os_hvp",
1551
    "beparams",
1552
    "osparams",
1553
    "nicparams",
1554
    "ndparams",
1555
    "diskparams",
1556
    "candidate_pool_size",
1557
    "modify_etc_hosts",
1558
    "modify_ssh_setup",
1559
    "maintain_node_health",
1560
    "uid_pool",
1561
    "default_iallocator",
1562
    "hidden_os",
1563
    "blacklisted_os",
1564
    "primary_ip_family",
1565
    "prealloc_wipe_disks",
1566
    "hv_state_static",
1567
    "disk_state_static",
1568
    ] + _TIMESTAMPS + _UUID
1569

    
1570
  def UpgradeConfig(self):
1571
    """Fill defaults for missing configuration values.
1572

1573
    """
1574
    # pylint: disable=E0203
1575
    # because these are "defined" via slots, not manually
1576
    if self.hvparams is None:
1577
      self.hvparams = constants.HVC_DEFAULTS
1578
    else:
1579
      for hypervisor in self.hvparams:
1580
        self.hvparams[hypervisor] = FillDict(
1581
            constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1582

    
1583
    if self.os_hvp is None:
1584
      self.os_hvp = {}
1585

    
1586
    # osparams added before 2.2
1587
    if self.osparams is None:
1588
      self.osparams = {}
1589

    
1590
    self.ndparams = UpgradeNDParams(self.ndparams)
1591

    
1592
    self.beparams = UpgradeGroupedParams(self.beparams,
1593
                                         constants.BEC_DEFAULTS)
1594
    for beparams_group in self.beparams:
1595
      UpgradeBeParams(self.beparams[beparams_group])
1596

    
1597
    migrate_default_bridge = not self.nicparams
1598
    self.nicparams = UpgradeGroupedParams(self.nicparams,
1599
                                          constants.NICC_DEFAULTS)
1600
    if migrate_default_bridge:
1601
      self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1602
        self.default_bridge
1603

    
1604
    if self.modify_etc_hosts is None:
1605
      self.modify_etc_hosts = True
1606

    
1607
    if self.modify_ssh_setup is None:
1608
      self.modify_ssh_setup = True
1609

    
1610
    # default_bridge is no longer used in 2.1. The slot is left there to
1611
    # support auto-upgrading. It can be removed once we decide to deprecate
1612
    # upgrading straight from 2.0.
1613
    if self.default_bridge is not None:
1614
      self.default_bridge = None
1615

    
1616
    # default_hypervisor is just the first enabled one in 2.1. This slot and
1617
    # code can be removed once upgrading straight from 2.0 is deprecated.
1618
    if self.default_hypervisor is not None:
1619
      self.enabled_hypervisors = ([self.default_hypervisor] +
1620
        [hvname for hvname in self.enabled_hypervisors
1621
         if hvname != self.default_hypervisor])
1622
      self.default_hypervisor = None
1623

    
1624
    # maintain_node_health added after 2.1.1
1625
    if self.maintain_node_health is None:
1626
      self.maintain_node_health = False
1627

    
1628
    if self.uid_pool is None:
1629
      self.uid_pool = []
1630

    
1631
    if self.default_iallocator is None:
1632
      self.default_iallocator = ""
1633

    
1634
    # reserved_lvs added before 2.2
1635
    if self.reserved_lvs is None:
1636
      self.reserved_lvs = []
1637

    
1638
    # hidden and blacklisted operating systems added before 2.2.1
1639
    if self.hidden_os is None:
1640
      self.hidden_os = []
1641

    
1642
    if self.blacklisted_os is None:
1643
      self.blacklisted_os = []
1644

    
1645
    # primary_ip_family added before 2.3
1646
    if self.primary_ip_family is None:
1647
      self.primary_ip_family = AF_INET
1648

    
1649
    if self.master_netmask is None:
1650
      ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1651
      self.master_netmask = ipcls.iplen
1652

    
1653
    if self.prealloc_wipe_disks is None:
1654
      self.prealloc_wipe_disks = False
1655

    
1656
    # shared_file_storage_dir added before 2.5
1657
    if self.shared_file_storage_dir is None:
1658
      self.shared_file_storage_dir = ""
1659

    
1660
    if self.use_external_mip_script is None:
1661
      self.use_external_mip_script = False
1662

    
1663
    self.diskparams = UpgradeDiskParams(self.diskparams)
1664

    
1665
    # instance policy added before 2.6
1666
    if self.ipolicy is None:
1667
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1668
    else:
1669
      # we can either make sure to upgrade the ipolicy always, or only
1670
      # do it in some corner cases (e.g. missing keys); note that this
1671
      # will break any removal of keys from the ipolicy dict
1672
      self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1673

    
1674
  @property
1675
  def primary_hypervisor(self):
1676
    """The first hypervisor is the primary.
1677

1678
    Useful, for example, for L{Node}'s hv/disk state.
1679

1680
    """
1681
    return self.enabled_hypervisors[0]
1682

    
1683
  def ToDict(self):
1684
    """Custom function for cluster.
1685

1686
    """
1687
    mydict = super(Cluster, self).ToDict()
1688
    mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1689
    return mydict
1690

    
1691
  @classmethod
1692
  def FromDict(cls, val):
1693
    """Custom function for cluster.
1694

1695
    """
1696
    obj = super(Cluster, cls).FromDict(val)
1697
    if not isinstance(obj.tcpudp_port_pool, set):
1698
      obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1699
    return obj
1700

    
1701
  def SimpleFillDP(self, diskparams):
1702
    """Fill a given diskparams dict with cluster defaults.
1703

1704
    @param diskparams: The diskparams
1705
    @return: The defaults dict
1706

1707
    """
1708
    return FillDiskParams(self.diskparams, diskparams)
1709

    
1710
  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1711
    """Get the default hypervisor parameters for the cluster.
1712

1713
    @param hypervisor: the hypervisor name
1714
    @param os_name: if specified, we'll also update the defaults for this OS
1715
    @param skip_keys: if passed, list of keys not to use
1716
    @return: the defaults dict
1717

1718
    """
1719
    if skip_keys is None:
1720
      skip_keys = []
1721

    
1722
    fill_stack = [self.hvparams.get(hypervisor, {})]
1723
    if os_name is not None:
1724
      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1725
      fill_stack.append(os_hvp)
1726

    
1727
    ret_dict = {}
1728
    for o_dict in fill_stack:
1729
      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1730

    
1731
    return ret_dict
1732

    
1733
  def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1734
    """Fill a given hvparams dict with cluster defaults.
1735

1736
    @type hv_name: string
1737
    @param hv_name: the hypervisor to use
1738
    @type os_name: string
1739
    @param os_name: the OS to use for overriding the hypervisor defaults
1740
    @type skip_globals: boolean
1741
    @param skip_globals: if True, the global hypervisor parameters will
1742
        not be filled
1743
    @rtype: dict
1744
    @return: a copy of the given hvparams with missing keys filled from
1745
        the cluster defaults
1746

1747
    """
1748
    if skip_globals:
1749
      skip_keys = constants.HVC_GLOBALS
1750
    else:
1751
      skip_keys = []
1752

    
1753
    def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1754
    return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1755

    
1756
  def FillHV(self, instance, skip_globals=False):
1757
    """Fill an instance's hvparams dict with cluster defaults.
1758

1759
    @type instance: L{objects.Instance}
1760
    @param instance: the instance parameter to fill
1761
    @type skip_globals: boolean
1762
    @param skip_globals: if True, the global hypervisor parameters will
1763
        not be filled
1764
    @rtype: dict
1765
    @return: a copy of the instance's hvparams with missing keys filled from
1766
        the cluster defaults
1767

1768
    """
1769
    return self.SimpleFillHV(instance.hypervisor, instance.os,
1770
                             instance.hvparams, skip_globals)
1771

    
1772
  def SimpleFillBE(self, beparams):
1773
    """Fill a given beparams dict with cluster defaults.
1774

1775
    @type beparams: dict
1776
    @param beparams: the dict to fill
1777
    @rtype: dict
1778
    @return: a copy of the passed in beparams with missing keys filled
1779
        from the cluster defaults
1780

1781
    """
1782
    return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1783

    
1784
  def FillBE(self, instance):
1785
    """Fill an instance's beparams dict with cluster defaults.
1786

1787
    @type instance: L{objects.Instance}
1788
    @param instance: the instance parameter to fill
1789
    @rtype: dict
1790
    @return: a copy of the instance's beparams with missing keys filled from
1791
        the cluster defaults
1792

1793
    """
1794
    return self.SimpleFillBE(instance.beparams)
1795

    
1796
  def SimpleFillNIC(self, nicparams):
1797
    """Fill a given nicparams dict with cluster defaults.
1798

1799
    @type nicparams: dict
1800
    @param nicparams: the dict to fill
1801
    @rtype: dict
1802
    @return: a copy of the passed in nicparams with missing keys filled
1803
        from the cluster defaults
1804

1805
    """
1806
    return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1807

    
1808
  def SimpleFillOS(self, os_name, os_params):
1809
    """Fill an instance's osparams dict with cluster defaults.
1810

1811
    @type os_name: string
1812
    @param os_name: the OS name to use
1813
    @type os_params: dict
1814
    @param os_params: the dict to fill with default values
1815
    @rtype: dict
1816
    @return: a copy of the instance's osparams with missing keys filled from
1817
        the cluster defaults
1818

1819
    """
1820
    name_only = os_name.split("+", 1)[0]
1821
    # base OS
1822
    result = self.osparams.get(name_only, {})
1823
    # OS with variant
1824
    result = FillDict(result, self.osparams.get(os_name, {}))
1825
    # specified params
1826
    return FillDict(result, os_params)
1827

    
1828
  @staticmethod
1829
  def SimpleFillHvState(hv_state):
1830
    """Fill an hv_state sub dict with cluster defaults.
1831

1832
    """
1833
    return FillDict(constants.HVST_DEFAULTS, hv_state)
1834

    
1835
  @staticmethod
1836
  def SimpleFillDiskState(disk_state):
1837
    """Fill an disk_state sub dict with cluster defaults.
1838

1839
    """
1840
    return FillDict(constants.DS_DEFAULTS, disk_state)
1841

    
1842
  def FillND(self, node, nodegroup):
1843
    """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1844

1845
    @type node: L{objects.Node}
1846
    @param node: A Node object to fill
1847
    @type nodegroup: L{objects.NodeGroup}
1848
    @param nodegroup: A Node object to fill
1849
    @return a copy of the node's ndparams with defaults filled
1850

1851
    """
1852
    return self.SimpleFillND(nodegroup.FillND(node))
1853

    
1854
  def SimpleFillND(self, ndparams):
1855
    """Fill a given ndparams dict with defaults.
1856

1857
    @type ndparams: dict
1858
    @param ndparams: the dict to fill
1859
    @rtype: dict
1860
    @return: a copy of the passed in ndparams with missing keys filled
1861
        from the cluster defaults
1862

1863
    """
1864
    return FillDict(self.ndparams, ndparams)
1865

    
1866
  def SimpleFillIPolicy(self, ipolicy):
1867
    """ Fill instance policy dict with defaults.
1868

1869
    @type ipolicy: dict
1870
    @param ipolicy: the dict to fill
1871
    @rtype: dict
1872
    @return: a copy of passed ipolicy with missing keys filled from
1873
      the cluster defaults
1874

1875
    """
1876
    return FillIPolicy(self.ipolicy, ipolicy)
1877

    
1878

    
1879
class BlockDevStatus(ConfigObject):
1880
  """Config object representing the status of a block device."""
1881
  __slots__ = [
1882
    "dev_path",
1883
    "major",
1884
    "minor",
1885
    "sync_percent",
1886
    "estimated_time",
1887
    "is_degraded",
1888
    "ldisk_status",
1889
    ]
1890

    
1891

    
1892
class ImportExportStatus(ConfigObject):
1893
  """Config object representing the status of an import or export."""
1894
  __slots__ = [
1895
    "recent_output",
1896
    "listen_port",
1897
    "connected",
1898
    "progress_mbytes",
1899
    "progress_throughput",
1900
    "progress_eta",
1901
    "progress_percent",
1902
    "exit_status",
1903
    "error_message",
1904
    ] + _TIMESTAMPS
1905

    
1906

    
1907
class ImportExportOptions(ConfigObject):
1908
  """Options for import/export daemon
1909

1910
  @ivar key_name: X509 key name (None for cluster certificate)
1911
  @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1912
  @ivar compress: Compression method (one of L{constants.IEC_ALL})
1913
  @ivar magic: Used to ensure the connection goes to the right disk
1914
  @ivar ipv6: Whether to use IPv6
1915
  @ivar connect_timeout: Number of seconds for establishing connection
1916

1917
  """
1918
  __slots__ = [
1919
    "key_name",
1920
    "ca_pem",
1921
    "compress",
1922
    "magic",
1923
    "ipv6",
1924
    "connect_timeout",
1925
    ]
1926

    
1927

    
1928
class ConfdRequest(ConfigObject):
1929
  """Object holding a confd request.
1930

1931
  @ivar protocol: confd protocol version
1932
  @ivar type: confd query type
1933
  @ivar query: query request
1934
  @ivar rsalt: requested reply salt
1935

1936
  """
1937
  __slots__ = [
1938
    "protocol",
1939
    "type",
1940
    "query",
1941
    "rsalt",
1942
    ]
1943

    
1944

    
1945
class ConfdReply(ConfigObject):
1946
  """Object holding a confd reply.
1947

1948
  @ivar protocol: confd protocol version
1949
  @ivar status: reply status code (ok, error)
1950
  @ivar answer: confd query reply
1951
  @ivar serial: configuration serial number
1952

1953
  """
1954
  __slots__ = [
1955
    "protocol",
1956
    "status",
1957
    "answer",
1958
    "serial",
1959
    ]
1960

    
1961

    
1962
class QueryFieldDefinition(ConfigObject):
1963
  """Object holding a query field definition.
1964

1965
  @ivar name: Field name
1966
  @ivar title: Human-readable title
1967
  @ivar kind: Field type
1968
  @ivar doc: Human-readable description
1969

1970
  """
1971
  __slots__ = [
1972
    "name",
1973
    "title",
1974
    "kind",
1975
    "doc",
1976
    ]
1977

    
1978

    
1979
class _QueryResponseBase(ConfigObject):
1980
  __slots__ = [
1981
    "fields",
1982
    ]
1983

    
1984
  def ToDict(self):
1985
    """Custom function for serializing.
1986

1987
    """
1988
    mydict = super(_QueryResponseBase, self).ToDict()
1989
    mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1990
    return mydict
1991

    
1992
  @classmethod
1993
  def FromDict(cls, val):
1994
    """Custom function for de-serializing.
1995

1996
    """
1997
    obj = super(_QueryResponseBase, cls).FromDict(val)
1998
    obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1999
    return obj
2000

    
2001

    
2002
class QueryResponse(_QueryResponseBase):
2003
  """Object holding the response to a query.
2004

2005
  @ivar fields: List of L{QueryFieldDefinition} objects
2006
  @ivar data: Requested data
2007

2008
  """
2009
  __slots__ = [
2010
    "data",
2011
    ]
2012

    
2013

    
2014
class QueryFieldsRequest(ConfigObject):
2015
  """Object holding a request for querying available fields.
2016

2017
  """
2018
  __slots__ = [
2019
    "what",
2020
    "fields",
2021
    ]
2022

    
2023

    
2024
class QueryFieldsResponse(_QueryResponseBase):
2025
  """Object holding the response to a query for fields.
2026

2027
  @ivar fields: List of L{QueryFieldDefinition} objects
2028

2029
  """
2030
  __slots__ = [
2031
    ]
2032

    
2033

    
2034
class MigrationStatus(ConfigObject):
2035
  """Object holding the status of a migration.
2036

2037
  """
2038
  __slots__ = [
2039
    "status",
2040
    "transferred_ram",
2041
    "total_ram",
2042
    ]
2043

    
2044

    
2045
class InstanceConsole(ConfigObject):
2046
  """Object describing how to access the console of an instance.
2047

2048
  """
2049
  __slots__ = [
2050
    "instance",
2051
    "kind",
2052
    "message",
2053
    "host",
2054
    "port",
2055
    "user",
2056
    "command",
2057
    "display",
2058
    ]
2059

    
2060
  def Validate(self):
2061
    """Validates contents of this object.
2062

2063
    """
2064
    assert self.kind in constants.CONS_ALL, "Unknown console type"
2065
    assert self.instance, "Missing instance name"
2066
    assert self.message or self.kind in [constants.CONS_SSH,
2067
                                         constants.CONS_SPICE,
2068
                                         constants.CONS_VNC]
2069
    assert self.host or self.kind == constants.CONS_MESSAGE
2070
    assert self.port or self.kind in [constants.CONS_MESSAGE,
2071
                                      constants.CONS_SSH]
2072
    assert self.user or self.kind in [constants.CONS_MESSAGE,
2073
                                      constants.CONS_SPICE,
2074
                                      constants.CONS_VNC]
2075
    assert self.command or self.kind in [constants.CONS_MESSAGE,
2076
                                         constants.CONS_SPICE,
2077
                                         constants.CONS_VNC]
2078
    assert self.display or self.kind in [constants.CONS_MESSAGE,
2079
                                         constants.CONS_SPICE,
2080
                                         constants.CONS_SSH]
2081
    return True
2082

    
2083

    
2084
class SerializableConfigParser(ConfigParser.SafeConfigParser):
2085
  """Simple wrapper over ConfigParse that allows serialization.
2086

2087
  This class is basically ConfigParser.SafeConfigParser with two
2088
  additional methods that allow it to serialize/unserialize to/from a
2089
  buffer.
2090

2091
  """
2092
  def Dumps(self):
2093
    """Dump this instance and return the string representation."""
2094
    buf = StringIO()
2095
    self.write(buf)
2096
    return buf.getvalue()
2097

    
2098
  @classmethod
2099
  def Loads(cls, data):
2100
    """Load data from a string."""
2101
    buf = StringIO(data)
2102
    cfp = cls()
2103
    cfp.readfp(buf)
2104
    return cfp