4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
38 from cStringIO import StringIO
40 from ganeti import errors
41 from ganeti import constants
44 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
45 "OS", "Node", "Cluster", "FillDict"]
47 _TIMESTAMPS = ["ctime", "mtime"]
50 def FillDict(defaults_dict, custom_dict, skip_keys=[]):
51 """Basic function to apply settings on top a default dict.
53 @type defaults_dict: dict
54 @param defaults_dict: dictionary holding the default values
55 @type custom_dict: dict
56 @param custom_dict: dictionary holding customized value
58 @param skip_keys: which keys not to fill
60 @return: dict with the 'full' values
63 ret_dict = copy.deepcopy(defaults_dict)
64 ret_dict.update(custom_dict)
73 def UpgradeGroupedParams(target, defaults):
74 """Update all groups for the target parameter.
76 @type target: dict of dicts
77 @param target: {group: {parameter: value}}
79 @param defaults: default parameter values
83 target = {constants.PP_DEFAULT: defaults}
86 target[group] = FillDict(defaults, target[group])
90 class ConfigObject(object):
91 """A generic config object.
93 It has the following properties:
95 - provides somewhat safe recursive unpickling and pickling for its classes
96 - unset attributes which are defined in slots are always returned
97 as None instead of raising an error
99 Classes derived from this must always declare __slots__ (we use many
100 config objects and the memory reduction is useful)
105 def __init__(self, **kwargs):
106 for k, v in kwargs.iteritems():
109 def __getattr__(self, name):
110 if name not in self.__slots__:
111 raise AttributeError("Invalid object attribute %s.%s" %
112 (type(self).__name__, name))
115 def __setstate__(self, state):
117 if name in self.__slots__:
118 setattr(self, name, state[name])
121 """Convert to a dict holding only standard python types.
123 The generic routine just dumps all of this object's attributes in
124 a dict. It does not work if the class has children who are
125 ConfigObjects themselves (e.g. the nics list in an Instance), in
126 which case the object should subclass the function in order to
127 make sure all objects returned are only standard python types.
131 for name in self.__slots__:
132 value = getattr(self, name, None)
133 if value is not None:
137 __getstate__ = ToDict
140 def FromDict(cls, val):
141 """Create an object from a dictionary.
143 This generic routine takes a dict, instantiates a new instance of
144 the given class, and sets attributes based on the dict content.
146 As for `ToDict`, this does not work if the class has children
147 who are ConfigObjects themselves (e.g. the nics list in an
148 Instance), in which case the object should subclass the function
149 and alter the objects.
152 if not isinstance(val, dict):
153 raise errors.ConfigurationError("Invalid object passed to FromDict:"
154 " expected dict, got %s" % type(val))
155 val_str = dict([(str(k), v) for k, v in val.iteritems()])
160 def _ContainerToDicts(container):
161 """Convert the elements of a container to standard python types.
163 This method converts a container with elements derived from
164 ConfigData to standard python types. If the container is a dict,
165 we don't touch the keys, only the values.
168 if isinstance(container, dict):
169 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
170 elif isinstance(container, (list, tuple, set, frozenset)):
171 ret = [elem.ToDict() for elem in container]
173 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
178 def _ContainerFromDicts(source, c_type, e_type):
179 """Convert a container from standard python types.
181 This method converts a container with standard python types to
182 ConfigData objects. If the container is a dict, we don't touch the
183 keys, only the values.
186 if not isinstance(c_type, type):
187 raise TypeError("Container type %s passed to _ContainerFromDicts is"
188 " not a type" % type(c_type))
190 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
191 elif c_type in (list, tuple, set, frozenset):
192 ret = c_type([e_type.FromDict(elem) for elem in source])
194 raise TypeError("Invalid container type %s passed to"
195 " _ContainerFromDicts" % c_type)
199 """Makes a deep copy of the current object and its children.
202 dict_form = self.ToDict()
203 clone_obj = self.__class__.FromDict(dict_form)
207 """Implement __repr__ for ConfigObjects."""
208 return repr(self.ToDict())
210 def UpgradeConfig(self):
211 """Fill defaults for missing configuration values.
213 This method will be called at configuration load time, and its
214 implementation will be object dependent.
220 class TaggableObject(ConfigObject):
221 """An generic class supporting tags.
224 __slots__ = ConfigObject.__slots__ + ["tags"]
225 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
228 def ValidateTag(cls, tag):
229 """Check if a tag is valid.
231 If the tag is invalid, an errors.TagError will be raised. The
232 function has no return value.
235 if not isinstance(tag, basestring):
236 raise errors.TagError("Invalid tag type (not a string)")
237 if len(tag) > constants.MAX_TAG_LEN:
238 raise errors.TagError("Tag too long (>%d characters)" %
239 constants.MAX_TAG_LEN)
241 raise errors.TagError("Tags cannot be empty")
242 if not cls.VALID_TAG_RE.match(tag):
243 raise errors.TagError("Tag contains invalid characters")
246 """Return the tags list.
249 tags = getattr(self, "tags", None)
251 tags = self.tags = set()
254 def AddTag(self, tag):
258 self.ValidateTag(tag)
259 tags = self.GetTags()
260 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
261 raise errors.TagError("Too many tags")
262 self.GetTags().add(tag)
264 def RemoveTag(self, tag):
268 self.ValidateTag(tag)
269 tags = self.GetTags()
273 raise errors.TagError("Tag not found")
276 """Taggable-object-specific conversion to standard python types.
278 This replaces the tags set with a list.
281 bo = super(TaggableObject, self).ToDict()
283 tags = bo.get("tags", None)
284 if isinstance(tags, set):
285 bo["tags"] = list(tags)
289 def FromDict(cls, val):
290 """Custom function for instances.
293 obj = super(TaggableObject, cls).FromDict(val)
294 if hasattr(obj, "tags") and isinstance(obj.tags, list):
295 obj.tags = set(obj.tags)
299 class ConfigData(ConfigObject):
300 """Top-level config object."""
301 __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
305 """Custom function for top-level config data.
307 This just replaces the list of instances, nodes and the cluster
308 with standard python types.
311 mydict = super(ConfigData, self).ToDict()
312 mydict["cluster"] = mydict["cluster"].ToDict()
313 for key in "nodes", "instances":
314 mydict[key] = self._ContainerToDicts(mydict[key])
319 def FromDict(cls, val):
320 """Custom function for top-level config data
323 obj = super(ConfigData, cls).FromDict(val)
324 obj.cluster = Cluster.FromDict(obj.cluster)
325 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
326 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
329 def UpgradeConfig(self):
330 """Fill defaults for missing configuration values.
333 self.cluster.UpgradeConfig()
334 for node in self.nodes.values():
336 for instance in self.instances.values():
337 instance.UpgradeConfig()
340 class NIC(ConfigObject):
341 """Config object representing a network card."""
342 __slots__ = ["mac", "ip", "bridge", "nicparams"]
345 def CheckParameterSyntax(cls, nicparams):
346 """Check the given parameters for validity.
348 @type nicparams: dict
349 @param nicparams: dictionary with parameter names/value
350 @raise errors.ConfigurationError: when a parameter is not valid
353 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
354 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
355 raise errors.ConfigurationError(err)
357 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
358 not nicparams[constants.NIC_LINK]):
359 err = "Missing bridged nic link"
360 raise errors.ConfigurationError(err)
362 def UpgradeConfig(self):
363 """Fill defaults for missing configuration values.
366 if self.nicparams is None:
368 if self.bridge is not None:
369 self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
370 self.nicparams[constants.NIC_LINK] = self.bridge
371 # bridge is no longer used it 2.1. The slot is left there to support
372 # upgrading, but will be removed in 2.2
373 if self.bridge is not None:
377 class Disk(ConfigObject):
378 """Config object representing a block device."""
379 __slots__ = ["dev_type", "logical_id", "physical_id",
380 "children", "iv_name", "size", "mode"]
382 def CreateOnSecondary(self):
383 """Test if this device needs to be created on a secondary node."""
384 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
386 def AssembleOnSecondary(self):
387 """Test if this device needs to be assembled on a secondary node."""
388 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
390 def OpenOnSecondary(self):
391 """Test if this device needs to be opened on a secondary node."""
392 return self.dev_type in (constants.LD_LV,)
394 def StaticDevPath(self):
395 """Return the device path if this device type has a static one.
397 Some devices (LVM for example) live always at the same /dev/ path,
398 irrespective of their status. For such devices, we return this
399 path, for others we return None.
402 if self.dev_type == constants.LD_LV:
403 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
406 def ChildrenNeeded(self):
407 """Compute the needed number of children for activation.
409 This method will return either -1 (all children) or a positive
410 number denoting the minimum number of children needed for
411 activation (only mirrored devices will usually return >=0).
413 Currently, only DRBD8 supports diskless activation (therefore we
414 return 0), for all other we keep the previous semantics and return
418 if self.dev_type == constants.LD_DRBD8:
422 def GetNodes(self, node):
423 """This function returns the nodes this device lives on.
425 Given the node on which the parent of the device lives on (or, in
426 case of a top-level device, the primary node of the devices'
427 instance), this function will return a list of nodes on which this
428 devices needs to (or can) be assembled.
431 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
433 elif self.dev_type in constants.LDS_DRBD:
434 result = [self.logical_id[0], self.logical_id[1]]
435 if node not in result:
436 raise errors.ConfigurationError("DRBD device passed unknown node")
438 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
441 def ComputeNodeTree(self, parent_node):
442 """Compute the node/disk tree for this disk and its children.
444 This method, given the node on which the parent disk lives, will
445 return the list of all (node, disk) pairs which describe the disk
446 tree in the most compact way. For example, a drbd/lvm stack
447 will be returned as (primary_node, drbd) and (secondary_node, drbd)
448 which represents all the top-level devices on the nodes.
451 my_nodes = self.GetNodes(parent_node)
452 result = [(node, self) for node in my_nodes]
453 if not self.children:
456 for node in my_nodes:
457 for child in self.children:
458 child_result = child.ComputeNodeTree(node)
459 if len(child_result) == 1:
460 # child (and all its descendants) is simple, doesn't split
461 # over multiple hosts, so we don't need to describe it, our
462 # own entry for this node describes it completely
465 # check if child nodes differ from my nodes; note that
466 # subdisk can differ from the child itself, and be instead
467 # one of its descendants
468 for subnode, subdisk in child_result:
469 if subnode not in my_nodes:
470 result.append((subnode, subdisk))
471 # otherwise child is under our own node, so we ignore this
472 # entry (but probably the other results in the list will
476 def RecordGrow(self, amount):
477 """Update the size of this disk after growth.
479 This method recurses over the disks's children and updates their
480 size correspondigly. The method needs to be kept in sync with the
481 actual algorithms from bdev.
484 if self.dev_type == constants.LD_LV:
486 elif self.dev_type == constants.LD_DRBD8:
488 self.children[0].RecordGrow(amount)
491 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
492 " disk type %s" % self.dev_type)
495 """Sets recursively the size to zero for the disk and its children.
499 for child in self.children:
503 def SetPhysicalID(self, target_node, nodes_ip):
504 """Convert the logical ID to the physical ID.
506 This is used only for drbd, which needs ip/port configuration.
508 The routine descends down and updates its children also, because
509 this helps when the only the top device is passed to the remote
513 - target_node: the node we wish to configure for
514 - nodes_ip: a mapping of node name to ip
516 The target_node must exist in in nodes_ip, and must be one of the
517 nodes in the logical ID for each of the DRBD devices encountered
522 for child in self.children:
523 child.SetPhysicalID(target_node, nodes_ip)
525 if self.logical_id is None and self.physical_id is not None:
527 if self.dev_type in constants.LDS_DRBD:
528 pnode, snode, port, pminor, sminor, secret = self.logical_id
529 if target_node not in (pnode, snode):
530 raise errors.ConfigurationError("DRBD device not knowing node %s" %
532 pnode_ip = nodes_ip.get(pnode, None)
533 snode_ip = nodes_ip.get(snode, None)
534 if pnode_ip is None or snode_ip is None:
535 raise errors.ConfigurationError("Can't find primary or secondary node"
536 " for %s" % str(self))
537 p_data = (pnode_ip, port)
538 s_data = (snode_ip, port)
539 if pnode == target_node:
540 self.physical_id = p_data + s_data + (pminor, secret)
541 else: # it must be secondary, we tested above
542 self.physical_id = s_data + p_data + (sminor, secret)
544 self.physical_id = self.logical_id
548 """Disk-specific conversion to standard python types.
550 This replaces the children lists of objects with lists of
551 standard python types.
554 bo = super(Disk, self).ToDict()
556 for attr in ("children",):
557 alist = bo.get(attr, None)
559 bo[attr] = self._ContainerToDicts(alist)
563 def FromDict(cls, val):
564 """Custom function for Disks
567 obj = super(Disk, cls).FromDict(val)
569 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
570 if obj.logical_id and isinstance(obj.logical_id, list):
571 obj.logical_id = tuple(obj.logical_id)
572 if obj.physical_id and isinstance(obj.physical_id, list):
573 obj.physical_id = tuple(obj.physical_id)
574 if obj.dev_type in constants.LDS_DRBD:
575 # we need a tuple of length six here
576 if len(obj.logical_id) < 6:
577 obj.logical_id += (None,) * (6 - len(obj.logical_id))
581 """Custom str() formatter for disks.
584 if self.dev_type == constants.LD_LV:
585 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
586 elif self.dev_type in constants.LDS_DRBD:
587 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
589 if self.physical_id is None:
592 phy = ("configured as %s:%s %s:%s" %
593 (self.physical_id[0], self.physical_id[1],
594 self.physical_id[2], self.physical_id[3]))
596 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
597 (node_a, minor_a, node_b, minor_b, port, phy))
598 if self.children and self.children.count(None) == 0:
599 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
601 val += "no local storage"
603 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
604 (self.dev_type, self.logical_id, self.physical_id, self.children))
605 if self.iv_name is None:
606 val += ", not visible"
608 val += ", visible as /dev/%s" % self.iv_name
609 if isinstance(self.size, int):
610 val += ", size=%dm)>" % self.size
612 val += ", size='%s')>" % (self.size,)
616 """Checks that this disk is correctly configured.
620 if self.mode not in constants.DISK_ACCESS_SET:
621 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
624 def UpgradeConfig(self):
625 """Fill defaults for missing configuration values.
629 for child in self.children:
630 child.UpgradeConfig()
631 # add here config upgrade for this disk
634 class Instance(TaggableObject):
635 """Config object representing an instance."""
636 __slots__ = TaggableObject.__slots__ + [
649 ] + _TIMESTAMPS + _UUID
651 def _ComputeSecondaryNodes(self):
652 """Compute the list of secondary nodes.
654 This is a simple wrapper over _ComputeAllNodes.
657 all_nodes = set(self._ComputeAllNodes())
658 all_nodes.discard(self.primary_node)
659 return tuple(all_nodes)
661 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
662 "List of secondary nodes")
664 def _ComputeAllNodes(self):
665 """Compute the list of all nodes.
667 Since the data is already there (in the drbd disks), keeping it as
668 a separate normal attribute is redundant and if not properly
669 synchronised can cause problems. Thus it's better to compute it
673 def _Helper(nodes, device):
674 """Recursively computes nodes given a top device."""
675 if device.dev_type in constants.LDS_DRBD:
676 nodea, nodeb = device.logical_id[:2]
680 for child in device.children:
681 _Helper(nodes, child)
684 all_nodes.add(self.primary_node)
685 for device in self.disks:
686 _Helper(all_nodes, device)
687 return tuple(all_nodes)
689 all_nodes = property(_ComputeAllNodes, None, None,
690 "List of all nodes of the instance")
692 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
693 """Provide a mapping of nodes to LVs this instance owns.
695 This function figures out what logical volumes should belong on
696 which nodes, recursing through a device tree.
698 @param lvmap: optional dictionary to receive the
699 'node' : ['lv', ...] data.
701 @return: None if lvmap arg is given, otherwise, a dictionary
702 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
706 node = self.primary_node
709 lvmap = { node : [] }
712 if not node in lvmap:
720 if dev.dev_type == constants.LD_LV:
721 lvmap[node].append(dev.logical_id[1])
723 elif dev.dev_type in constants.LDS_DRBD:
725 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
726 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
729 self.MapLVsByNode(lvmap, dev.children, node)
733 def FindDisk(self, idx):
734 """Find a disk given having a specified index.
736 This is just a wrapper that does validation of the index.
739 @param idx: the disk index
741 @return: the corresponding disk
742 @raise errors.OpPrereqError: when the given index is not valid
747 return self.disks[idx]
748 except ValueError, err:
749 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
752 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
753 " 0 to %d" % (idx, len(self.disks)),
757 """Instance-specific conversion to standard python types.
759 This replaces the children lists of objects with lists of standard
763 bo = super(Instance, self).ToDict()
765 for attr in "nics", "disks":
766 alist = bo.get(attr, None)
768 nlist = self._ContainerToDicts(alist)
775 def FromDict(cls, val):
776 """Custom function for instances.
779 obj = super(Instance, cls).FromDict(val)
780 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
781 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
784 def UpgradeConfig(self):
785 """Fill defaults for missing configuration values.
788 for nic in self.nics:
790 for disk in self.disks:
793 for key in constants.HVC_GLOBALS:
795 del self.hvparams[key]
800 class OS(ConfigObject):
801 """Config object representing an operating system."""
810 "supported_variants",
814 class Node(TaggableObject):
815 """Config object representing a node."""
816 __slots__ = TaggableObject.__slots__ + [
824 ] + _TIMESTAMPS + _UUID
827 class Cluster(TaggableObject):
828 """Config object representing the cluster."""
829 __slots__ = TaggableObject.__slots__ + [
837 "default_hypervisor",
843 "enabled_hypervisors",
847 "candidate_pool_size",
850 ] + _TIMESTAMPS + _UUID
852 def UpgradeConfig(self):
853 """Fill defaults for missing configuration values.
856 if self.hvparams is None:
857 self.hvparams = constants.HVC_DEFAULTS
859 for hypervisor in self.hvparams:
860 self.hvparams[hypervisor] = FillDict(
861 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
863 self.beparams = UpgradeGroupedParams(self.beparams,
864 constants.BEC_DEFAULTS)
865 migrate_default_bridge = not self.nicparams
866 self.nicparams = UpgradeGroupedParams(self.nicparams,
867 constants.NICC_DEFAULTS)
868 if migrate_default_bridge:
869 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
872 if self.modify_etc_hosts is None:
873 self.modify_etc_hosts = True
875 if self.modify_ssh_setup is None:
876 self.modify_ssh_setup = True
878 # default_bridge is no longer used it 2.1. The slot is left there to
879 # support auto-upgrading, but will be removed in 2.2
880 if self.default_bridge is not None:
881 self.default_bridge = None
883 # default_hypervisor is just the first enabled one in 2.1
884 if self.default_hypervisor is not None:
885 self.enabled_hypervisors = ([self.default_hypervisor] +
886 [hvname for hvname in self.enabled_hypervisors
887 if hvname != self.default_hypervisor])
888 self.default_hypervisor = None
891 """Custom function for cluster.
894 mydict = super(Cluster, self).ToDict()
895 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
899 def FromDict(cls, val):
900 """Custom function for cluster.
903 obj = super(Cluster, cls).FromDict(val)
904 if not isinstance(obj.tcpudp_port_pool, set):
905 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
908 def FillHV(self, instance, skip_globals=False):
909 """Fill an instance's hvparams dict.
911 @type instance: L{objects.Instance}
912 @param instance: the instance parameter to fill
913 @type skip_globals: boolean
914 @param skip_globals: if True, the global hypervisor parameters will
917 @return: a copy of the instance's hvparams with missing keys filled from
922 skip_keys = constants.HVC_GLOBALS
925 return FillDict(self.hvparams.get(instance.hypervisor, {}),
926 instance.hvparams, skip_keys=skip_keys)
928 def FillBE(self, instance):
929 """Fill an instance's beparams dict.
931 @type instance: L{objects.Instance}
932 @param instance: the instance parameter to fill
934 @return: a copy of the instance's beparams with missing keys filled from
938 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}),
942 class BlockDevStatus(ConfigObject):
943 """Config object representing the status of a block device."""
955 class ConfdRequest(ConfigObject):
956 """Object holding a confd request.
958 @ivar protocol: confd protocol version
959 @ivar type: confd query type
960 @ivar query: query request
961 @ivar rsalt: requested reply salt
972 class ConfdReply(ConfigObject):
973 """Object holding a confd reply.
975 @ivar protocol: confd protocol version
976 @ivar status: reply status code (ok, error)
977 @ivar answer: confd query reply
978 @ivar serial: configuration serial number
989 class SerializableConfigParser(ConfigParser.SafeConfigParser):
990 """Simple wrapper over ConfigParse that allows serialization.
992 This class is basically ConfigParser.SafeConfigParser with two
993 additional methods that allow it to serialize/unserialize to/from a
998 """Dump this instance and return the string representation."""
1001 return buf.getvalue()
1005 """Load data from a string."""
1006 buf = StringIO(data)
1007 cfp = SerializableConfigParser()