4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
39 from cStringIO import StringIO
41 from ganeti import errors
42 from ganeti import constants
45 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46 "OS", "Node", "Cluster", "FillDict"]
48 _TIMESTAMPS = ["ctime", "mtime"]
51 def FillDict(defaults_dict, custom_dict, skip_keys=None):
52 """Basic function to apply settings on top a default dict.
54 @type defaults_dict: dict
55 @param defaults_dict: dictionary holding the default values
56 @type custom_dict: dict
57 @param custom_dict: dictionary holding customized value
59 @param skip_keys: which keys not to fill
61 @return: dict with the 'full' values
64 ret_dict = copy.deepcopy(defaults_dict)
65 ret_dict.update(custom_dict)
75 def UpgradeGroupedParams(target, defaults):
76 """Update all groups for the target parameter.
78 @type target: dict of dicts
79 @param target: {group: {parameter: value}}
81 @param defaults: default parameter values
85 target = {constants.PP_DEFAULT: defaults}
88 target[group] = FillDict(defaults, target[group])
92 class ConfigObject(object):
93 """A generic config object.
95 It has the following properties:
97 - provides somewhat safe recursive unpickling and pickling for its classes
98 - unset attributes which are defined in slots are always returned
99 as None instead of raising an error
101 Classes derived from this must always declare __slots__ (we use many
102 config objects and the memory reduction is useful)
107 def __init__(self, **kwargs):
108 for k, v in kwargs.iteritems():
111 def __getattr__(self, name):
112 if name not in self.__slots__:
113 raise AttributeError("Invalid object attribute %s.%s" %
114 (type(self).__name__, name))
117 def __setstate__(self, state):
119 if name in self.__slots__:
120 setattr(self, name, state[name])
123 """Convert to a dict holding only standard python types.
125 The generic routine just dumps all of this object's attributes in
126 a dict. It does not work if the class has children who are
127 ConfigObjects themselves (e.g. the nics list in an Instance), in
128 which case the object should subclass the function in order to
129 make sure all objects returned are only standard python types.
133 for name in self.__slots__:
134 value = getattr(self, name, None)
135 if value is not None:
139 __getstate__ = ToDict
142 def FromDict(cls, val):
143 """Create an object from a dictionary.
145 This generic routine takes a dict, instantiates a new instance of
146 the given class, and sets attributes based on the dict content.
148 As for `ToDict`, this does not work if the class has children
149 who are ConfigObjects themselves (e.g. the nics list in an
150 Instance), in which case the object should subclass the function
151 and alter the objects.
154 if not isinstance(val, dict):
155 raise errors.ConfigurationError("Invalid object passed to FromDict:"
156 " expected dict, got %s" % type(val))
157 val_str = dict([(str(k), v) for k, v in val.iteritems()])
158 obj = cls(**val_str) # pylint: disable-msg=W0142
162 def _ContainerToDicts(container):
163 """Convert the elements of a container to standard python types.
165 This method converts a container with elements derived from
166 ConfigData to standard python types. If the container is a dict,
167 we don't touch the keys, only the values.
170 if isinstance(container, dict):
171 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
172 elif isinstance(container, (list, tuple, set, frozenset)):
173 ret = [elem.ToDict() for elem in container]
175 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
180 def _ContainerFromDicts(source, c_type, e_type):
181 """Convert a container from standard python types.
183 This method converts a container with standard python types to
184 ConfigData objects. If the container is a dict, we don't touch the
185 keys, only the values.
188 if not isinstance(c_type, type):
189 raise TypeError("Container type %s passed to _ContainerFromDicts is"
190 " not a type" % type(c_type))
192 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
193 elif c_type in (list, tuple, set, frozenset):
194 ret = c_type([e_type.FromDict(elem) for elem in source])
196 raise TypeError("Invalid container type %s passed to"
197 " _ContainerFromDicts" % c_type)
201 """Makes a deep copy of the current object and its children.
204 dict_form = self.ToDict()
205 clone_obj = self.__class__.FromDict(dict_form)
209 """Implement __repr__ for ConfigObjects."""
210 return repr(self.ToDict())
212 def UpgradeConfig(self):
213 """Fill defaults for missing configuration values.
215 This method will be called at configuration load time, and its
216 implementation will be object dependent.
222 class TaggableObject(ConfigObject):
223 """An generic class supporting tags.
226 __slots__ = ConfigObject.__slots__ + ["tags"]
227 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
230 def ValidateTag(cls, tag):
231 """Check if a tag is valid.
233 If the tag is invalid, an errors.TagError will be raised. The
234 function has no return value.
237 if not isinstance(tag, basestring):
238 raise errors.TagError("Invalid tag type (not a string)")
239 if len(tag) > constants.MAX_TAG_LEN:
240 raise errors.TagError("Tag too long (>%d characters)" %
241 constants.MAX_TAG_LEN)
243 raise errors.TagError("Tags cannot be empty")
244 if not cls.VALID_TAG_RE.match(tag):
245 raise errors.TagError("Tag contains invalid characters")
248 """Return the tags list.
251 tags = getattr(self, "tags", None)
253 tags = self.tags = set()
256 def AddTag(self, tag):
260 self.ValidateTag(tag)
261 tags = self.GetTags()
262 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
263 raise errors.TagError("Too many tags")
264 self.GetTags().add(tag)
266 def RemoveTag(self, tag):
270 self.ValidateTag(tag)
271 tags = self.GetTags()
275 raise errors.TagError("Tag not found")
278 """Taggable-object-specific conversion to standard python types.
280 This replaces the tags set with a list.
283 bo = super(TaggableObject, self).ToDict()
285 tags = bo.get("tags", None)
286 if isinstance(tags, set):
287 bo["tags"] = list(tags)
291 def FromDict(cls, val):
292 """Custom function for instances.
295 obj = super(TaggableObject, cls).FromDict(val)
296 if hasattr(obj, "tags") and isinstance(obj.tags, list):
297 obj.tags = set(obj.tags)
301 class ConfigData(ConfigObject):
302 """Top-level config object."""
303 __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
307 """Custom function for top-level config data.
309 This just replaces the list of instances, nodes and the cluster
310 with standard python types.
313 mydict = super(ConfigData, self).ToDict()
314 mydict["cluster"] = mydict["cluster"].ToDict()
315 for key in "nodes", "instances":
316 mydict[key] = self._ContainerToDicts(mydict[key])
321 def FromDict(cls, val):
322 """Custom function for top-level config data
325 obj = super(ConfigData, cls).FromDict(val)
326 obj.cluster = Cluster.FromDict(obj.cluster)
327 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
328 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
331 def UpgradeConfig(self):
332 """Fill defaults for missing configuration values.
335 self.cluster.UpgradeConfig()
336 for node in self.nodes.values():
338 for instance in self.instances.values():
339 instance.UpgradeConfig()
342 class NIC(ConfigObject):
343 """Config object representing a network card."""
344 __slots__ = ["mac", "ip", "bridge", "nicparams"]
347 def CheckParameterSyntax(cls, nicparams):
348 """Check the given parameters for validity.
350 @type nicparams: dict
351 @param nicparams: dictionary with parameter names/value
352 @raise errors.ConfigurationError: when a parameter is not valid
355 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
356 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
357 raise errors.ConfigurationError(err)
359 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
360 not nicparams[constants.NIC_LINK]):
361 err = "Missing bridged nic link"
362 raise errors.ConfigurationError(err)
364 def UpgradeConfig(self):
365 """Fill defaults for missing configuration values.
368 if self.nicparams is None:
370 if self.bridge is not None:
371 self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
372 self.nicparams[constants.NIC_LINK] = self.bridge
373 # bridge is no longer used it 2.1. The slot is left there to support
374 # upgrading, but will be removed in 2.2
375 if self.bridge is not None:
379 class Disk(ConfigObject):
380 """Config object representing a block device."""
381 __slots__ = ["dev_type", "logical_id", "physical_id",
382 "children", "iv_name", "size", "mode"]
384 def CreateOnSecondary(self):
385 """Test if this device needs to be created on a secondary node."""
386 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
388 def AssembleOnSecondary(self):
389 """Test if this device needs to be assembled on a secondary node."""
390 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
392 def OpenOnSecondary(self):
393 """Test if this device needs to be opened on a secondary node."""
394 return self.dev_type in (constants.LD_LV,)
396 def StaticDevPath(self):
397 """Return the device path if this device type has a static one.
399 Some devices (LVM for example) live always at the same /dev/ path,
400 irrespective of their status. For such devices, we return this
401 path, for others we return None.
404 if self.dev_type == constants.LD_LV:
405 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
408 def ChildrenNeeded(self):
409 """Compute the needed number of children for activation.
411 This method will return either -1 (all children) or a positive
412 number denoting the minimum number of children needed for
413 activation (only mirrored devices will usually return >=0).
415 Currently, only DRBD8 supports diskless activation (therefore we
416 return 0), for all other we keep the previous semantics and return
420 if self.dev_type == constants.LD_DRBD8:
424 def GetNodes(self, node):
425 """This function returns the nodes this device lives on.
427 Given the node on which the parent of the device lives on (or, in
428 case of a top-level device, the primary node of the devices'
429 instance), this function will return a list of nodes on which this
430 devices needs to (or can) be assembled.
433 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
435 elif self.dev_type in constants.LDS_DRBD:
436 result = [self.logical_id[0], self.logical_id[1]]
437 if node not in result:
438 raise errors.ConfigurationError("DRBD device passed unknown node")
440 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
443 def ComputeNodeTree(self, parent_node):
444 """Compute the node/disk tree for this disk and its children.
446 This method, given the node on which the parent disk lives, will
447 return the list of all (node, disk) pairs which describe the disk
448 tree in the most compact way. For example, a drbd/lvm stack
449 will be returned as (primary_node, drbd) and (secondary_node, drbd)
450 which represents all the top-level devices on the nodes.
453 my_nodes = self.GetNodes(parent_node)
454 result = [(node, self) for node in my_nodes]
455 if not self.children:
458 for node in my_nodes:
459 for child in self.children:
460 child_result = child.ComputeNodeTree(node)
461 if len(child_result) == 1:
462 # child (and all its descendants) is simple, doesn't split
463 # over multiple hosts, so we don't need to describe it, our
464 # own entry for this node describes it completely
467 # check if child nodes differ from my nodes; note that
468 # subdisk can differ from the child itself, and be instead
469 # one of its descendants
470 for subnode, subdisk in child_result:
471 if subnode not in my_nodes:
472 result.append((subnode, subdisk))
473 # otherwise child is under our own node, so we ignore this
474 # entry (but probably the other results in the list will
478 def RecordGrow(self, amount):
479 """Update the size of this disk after growth.
481 This method recurses over the disks's children and updates their
482 size correspondigly. The method needs to be kept in sync with the
483 actual algorithms from bdev.
486 if self.dev_type == constants.LD_LV:
488 elif self.dev_type == constants.LD_DRBD8:
490 self.children[0].RecordGrow(amount)
493 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
494 " disk type %s" % self.dev_type)
497 """Sets recursively the size to zero for the disk and its children.
501 for child in self.children:
505 def SetPhysicalID(self, target_node, nodes_ip):
506 """Convert the logical ID to the physical ID.
508 This is used only for drbd, which needs ip/port configuration.
510 The routine descends down and updates its children also, because
511 this helps when the only the top device is passed to the remote
515 - target_node: the node we wish to configure for
516 - nodes_ip: a mapping of node name to ip
518 The target_node must exist in in nodes_ip, and must be one of the
519 nodes in the logical ID for each of the DRBD devices encountered
524 for child in self.children:
525 child.SetPhysicalID(target_node, nodes_ip)
527 if self.logical_id is None and self.physical_id is not None:
529 if self.dev_type in constants.LDS_DRBD:
530 pnode, snode, port, pminor, sminor, secret = self.logical_id
531 if target_node not in (pnode, snode):
532 raise errors.ConfigurationError("DRBD device not knowing node %s" %
534 pnode_ip = nodes_ip.get(pnode, None)
535 snode_ip = nodes_ip.get(snode, None)
536 if pnode_ip is None or snode_ip is None:
537 raise errors.ConfigurationError("Can't find primary or secondary node"
538 " for %s" % str(self))
539 p_data = (pnode_ip, port)
540 s_data = (snode_ip, port)
541 if pnode == target_node:
542 self.physical_id = p_data + s_data + (pminor, secret)
543 else: # it must be secondary, we tested above
544 self.physical_id = s_data + p_data + (sminor, secret)
546 self.physical_id = self.logical_id
550 """Disk-specific conversion to standard python types.
552 This replaces the children lists of objects with lists of
553 standard python types.
556 bo = super(Disk, self).ToDict()
558 for attr in ("children",):
559 alist = bo.get(attr, None)
561 bo[attr] = self._ContainerToDicts(alist)
565 def FromDict(cls, val):
566 """Custom function for Disks
569 obj = super(Disk, cls).FromDict(val)
571 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
572 if obj.logical_id and isinstance(obj.logical_id, list):
573 obj.logical_id = tuple(obj.logical_id)
574 if obj.physical_id and isinstance(obj.physical_id, list):
575 obj.physical_id = tuple(obj.physical_id)
576 if obj.dev_type in constants.LDS_DRBD:
577 # we need a tuple of length six here
578 if len(obj.logical_id) < 6:
579 obj.logical_id += (None,) * (6 - len(obj.logical_id))
583 """Custom str() formatter for disks.
586 if self.dev_type == constants.LD_LV:
587 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
588 elif self.dev_type in constants.LDS_DRBD:
589 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
591 if self.physical_id is None:
594 phy = ("configured as %s:%s %s:%s" %
595 (self.physical_id[0], self.physical_id[1],
596 self.physical_id[2], self.physical_id[3]))
598 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
599 (node_a, minor_a, node_b, minor_b, port, phy))
600 if self.children and self.children.count(None) == 0:
601 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
603 val += "no local storage"
605 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
606 (self.dev_type, self.logical_id, self.physical_id, self.children))
607 if self.iv_name is None:
608 val += ", not visible"
610 val += ", visible as /dev/%s" % self.iv_name
611 if isinstance(self.size, int):
612 val += ", size=%dm)>" % self.size
614 val += ", size='%s')>" % (self.size,)
618 """Checks that this disk is correctly configured.
622 if self.mode not in constants.DISK_ACCESS_SET:
623 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
626 def UpgradeConfig(self):
627 """Fill defaults for missing configuration values.
631 for child in self.children:
632 child.UpgradeConfig()
633 # add here config upgrade for this disk
636 class Instance(TaggableObject):
637 """Config object representing an instance."""
638 __slots__ = TaggableObject.__slots__ + [
651 ] + _TIMESTAMPS + _UUID
653 def _ComputeSecondaryNodes(self):
654 """Compute the list of secondary nodes.
656 This is a simple wrapper over _ComputeAllNodes.
659 all_nodes = set(self._ComputeAllNodes())
660 all_nodes.discard(self.primary_node)
661 return tuple(all_nodes)
663 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
664 "List of secondary nodes")
666 def _ComputeAllNodes(self):
667 """Compute the list of all nodes.
669 Since the data is already there (in the drbd disks), keeping it as
670 a separate normal attribute is redundant and if not properly
671 synchronised can cause problems. Thus it's better to compute it
675 def _Helper(nodes, device):
676 """Recursively computes nodes given a top device."""
677 if device.dev_type in constants.LDS_DRBD:
678 nodea, nodeb = device.logical_id[:2]
682 for child in device.children:
683 _Helper(nodes, child)
686 all_nodes.add(self.primary_node)
687 for device in self.disks:
688 _Helper(all_nodes, device)
689 return tuple(all_nodes)
691 all_nodes = property(_ComputeAllNodes, None, None,
692 "List of all nodes of the instance")
694 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
695 """Provide a mapping of nodes to LVs this instance owns.
697 This function figures out what logical volumes should belong on
698 which nodes, recursing through a device tree.
700 @param lvmap: optional dictionary to receive the
701 'node' : ['lv', ...] data.
703 @return: None if lvmap arg is given, otherwise, a dictionary
704 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
708 node = self.primary_node
711 lvmap = { node : [] }
714 if not node in lvmap:
722 if dev.dev_type == constants.LD_LV:
723 lvmap[node].append(dev.logical_id[1])
725 elif dev.dev_type in constants.LDS_DRBD:
727 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
728 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
731 self.MapLVsByNode(lvmap, dev.children, node)
735 def FindDisk(self, idx):
736 """Find a disk given having a specified index.
738 This is just a wrapper that does validation of the index.
741 @param idx: the disk index
743 @return: the corresponding disk
744 @raise errors.OpPrereqError: when the given index is not valid
749 return self.disks[idx]
750 except ValueError, err:
751 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
754 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
755 " 0 to %d" % (idx, len(self.disks)),
759 """Instance-specific conversion to standard python types.
761 This replaces the children lists of objects with lists of standard
765 bo = super(Instance, self).ToDict()
767 for attr in "nics", "disks":
768 alist = bo.get(attr, None)
770 nlist = self._ContainerToDicts(alist)
777 def FromDict(cls, val):
778 """Custom function for instances.
781 obj = super(Instance, cls).FromDict(val)
782 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
783 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
786 def UpgradeConfig(self):
787 """Fill defaults for missing configuration values.
790 for nic in self.nics:
792 for disk in self.disks:
795 for key in constants.HVC_GLOBALS:
797 del self.hvparams[key]
802 class OS(ConfigObject):
803 """Config object representing an operating system."""
812 "supported_variants",
816 class Node(TaggableObject):
817 """Config object representing a node."""
818 __slots__ = TaggableObject.__slots__ + [
826 ] + _TIMESTAMPS + _UUID
829 class Cluster(TaggableObject):
830 """Config object representing the cluster."""
831 __slots__ = TaggableObject.__slots__ + [
839 "default_hypervisor",
845 "enabled_hypervisors",
849 "candidate_pool_size",
852 ] + _TIMESTAMPS + _UUID
854 def UpgradeConfig(self):
855 """Fill defaults for missing configuration values.
858 # pylint: disable-msg=E0203
859 # because these are "defined" via slots, not manually
860 if self.hvparams is None:
861 self.hvparams = constants.HVC_DEFAULTS
863 for hypervisor in self.hvparams:
864 self.hvparams[hypervisor] = FillDict(
865 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
867 self.beparams = UpgradeGroupedParams(self.beparams,
868 constants.BEC_DEFAULTS)
869 migrate_default_bridge = not self.nicparams
870 self.nicparams = UpgradeGroupedParams(self.nicparams,
871 constants.NICC_DEFAULTS)
872 if migrate_default_bridge:
873 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
876 if self.modify_etc_hosts is None:
877 self.modify_etc_hosts = True
879 if self.modify_ssh_setup is None:
880 self.modify_ssh_setup = True
882 # default_bridge is no longer used it 2.1. The slot is left there to
883 # support auto-upgrading, but will be removed in 2.2
884 if self.default_bridge is not None:
885 self.default_bridge = None
887 # default_hypervisor is just the first enabled one in 2.1
888 if self.default_hypervisor is not None:
889 self.enabled_hypervisors = ([self.default_hypervisor] +
890 [hvname for hvname in self.enabled_hypervisors
891 if hvname != self.default_hypervisor])
892 self.default_hypervisor = None
895 """Custom function for cluster.
898 mydict = super(Cluster, self).ToDict()
899 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
903 def FromDict(cls, val):
904 """Custom function for cluster.
907 obj = super(Cluster, cls).FromDict(val)
908 if not isinstance(obj.tcpudp_port_pool, set):
909 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
912 def FillHV(self, instance, skip_globals=False):
913 """Fill an instance's hvparams dict.
915 @type instance: L{objects.Instance}
916 @param instance: the instance parameter to fill
917 @type skip_globals: boolean
918 @param skip_globals: if True, the global hypervisor parameters will
921 @return: a copy of the instance's hvparams with missing keys filled from
926 skip_keys = constants.HVC_GLOBALS
929 return FillDict(self.hvparams.get(instance.hypervisor, {}),
930 instance.hvparams, skip_keys=skip_keys)
932 def FillBE(self, instance):
933 """Fill an instance's beparams dict.
935 @type instance: L{objects.Instance}
936 @param instance: the instance parameter to fill
938 @return: a copy of the instance's beparams with missing keys filled from
942 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
946 class BlockDevStatus(ConfigObject):
947 """Config object representing the status of a block device."""
959 class ConfdRequest(ConfigObject):
960 """Object holding a confd request.
962 @ivar protocol: confd protocol version
963 @ivar type: confd query type
964 @ivar query: query request
965 @ivar rsalt: requested reply salt
976 class ConfdReply(ConfigObject):
977 """Object holding a confd reply.
979 @ivar protocol: confd protocol version
980 @ivar status: reply status code (ok, error)
981 @ivar answer: confd query reply
982 @ivar serial: configuration serial number
993 class SerializableConfigParser(ConfigParser.SafeConfigParser):
994 """Simple wrapper over ConfigParse that allows serialization.
996 This class is basically ConfigParser.SafeConfigParser with two
997 additional methods that allow it to serialize/unserialize to/from a
1002 """Dump this instance and return the string representation."""
1005 return buf.getvalue()
1009 """Load data from a string."""
1010 buf = StringIO(data)
1011 cfp = SerializableConfigParser()