4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
39 from cStringIO import StringIO
41 from ganeti import errors
42 from ganeti import constants
44 from socket import AF_INET
47 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
48 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50 _TIMESTAMPS = ["ctime", "mtime"]
54 def FillDict(defaults_dict, custom_dict, skip_keys=None):
55 """Basic function to apply settings on top a default dict.
57 @type defaults_dict: dict
58 @param defaults_dict: dictionary holding the default values
59 @type custom_dict: dict
60 @param custom_dict: dictionary holding customized value
62 @param skip_keys: which keys not to fill
64 @return: dict with the 'full' values
67 ret_dict = copy.deepcopy(defaults_dict)
68 ret_dict.update(custom_dict)
78 def UpgradeGroupedParams(target, defaults):
79 """Update all groups for the target parameter.
81 @type target: dict of dicts
82 @param target: {group: {parameter: value}}
84 @param defaults: default parameter values
88 target = {constants.PP_DEFAULT: defaults}
91 target[group] = FillDict(defaults, target[group])
95 class ConfigObject(object):
96 """A generic config object.
98 It has the following properties:
100 - provides somewhat safe recursive unpickling and pickling for its classes
101 - unset attributes which are defined in slots are always returned
102 as None instead of raising an error
104 Classes derived from this must always declare __slots__ (we use many
105 config objects and the memory reduction is useful)
110 def __init__(self, **kwargs):
111 for k, v in kwargs.iteritems():
114 def __getattr__(self, name):
115 if name not in self._all_slots():
116 raise AttributeError("Invalid object attribute %s.%s" %
117 (type(self).__name__, name))
120 def __setstate__(self, state):
121 slots = self._all_slots()
124 setattr(self, name, state[name])
128 """Compute the list of all declared slots for a class.
132 for parent in cls.__mro__:
133 slots.extend(getattr(parent, "__slots__", []))
137 """Convert to a dict holding only standard python types.
139 The generic routine just dumps all of this object's attributes in
140 a dict. It does not work if the class has children who are
141 ConfigObjects themselves (e.g. the nics list in an Instance), in
142 which case the object should subclass the function in order to
143 make sure all objects returned are only standard python types.
147 for name in self._all_slots():
148 value = getattr(self, name, None)
149 if value is not None:
153 __getstate__ = ToDict
156 def FromDict(cls, val):
157 """Create an object from a dictionary.
159 This generic routine takes a dict, instantiates a new instance of
160 the given class, and sets attributes based on the dict content.
162 As for `ToDict`, this does not work if the class has children
163 who are ConfigObjects themselves (e.g. the nics list in an
164 Instance), in which case the object should subclass the function
165 and alter the objects.
168 if not isinstance(val, dict):
169 raise errors.ConfigurationError("Invalid object passed to FromDict:"
170 " expected dict, got %s" % type(val))
171 val_str = dict([(str(k), v) for k, v in val.iteritems()])
172 obj = cls(**val_str) # pylint: disable-msg=W0142
176 def _ContainerToDicts(container):
177 """Convert the elements of a container to standard python types.
179 This method converts a container with elements derived from
180 ConfigData to standard python types. If the container is a dict,
181 we don't touch the keys, only the values.
184 if isinstance(container, dict):
185 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
186 elif isinstance(container, (list, tuple, set, frozenset)):
187 ret = [elem.ToDict() for elem in container]
189 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
194 def _ContainerFromDicts(source, c_type, e_type):
195 """Convert a container from standard python types.
197 This method converts a container with standard python types to
198 ConfigData objects. If the container is a dict, we don't touch the
199 keys, only the values.
202 if not isinstance(c_type, type):
203 raise TypeError("Container type %s passed to _ContainerFromDicts is"
204 " not a type" % type(c_type))
208 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
209 elif c_type in (list, tuple, set, frozenset):
210 ret = c_type([e_type.FromDict(elem) for elem in source])
212 raise TypeError("Invalid container type %s passed to"
213 " _ContainerFromDicts" % c_type)
217 """Makes a deep copy of the current object and its children.
220 dict_form = self.ToDict()
221 clone_obj = self.__class__.FromDict(dict_form)
225 """Implement __repr__ for ConfigObjects."""
226 return repr(self.ToDict())
228 def UpgradeConfig(self):
229 """Fill defaults for missing configuration values.
231 This method will be called at configuration load time, and its
232 implementation will be object dependent.
238 class TaggableObject(ConfigObject):
239 """An generic class supporting tags.
243 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
246 def ValidateTag(cls, tag):
247 """Check if a tag is valid.
249 If the tag is invalid, an errors.TagError will be raised. The
250 function has no return value.
253 if not isinstance(tag, basestring):
254 raise errors.TagError("Invalid tag type (not a string)")
255 if len(tag) > constants.MAX_TAG_LEN:
256 raise errors.TagError("Tag too long (>%d characters)" %
257 constants.MAX_TAG_LEN)
259 raise errors.TagError("Tags cannot be empty")
260 if not cls.VALID_TAG_RE.match(tag):
261 raise errors.TagError("Tag contains invalid characters")
264 """Return the tags list.
267 tags = getattr(self, "tags", None)
269 tags = self.tags = set()
272 def AddTag(self, tag):
276 self.ValidateTag(tag)
277 tags = self.GetTags()
278 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
279 raise errors.TagError("Too many tags")
280 self.GetTags().add(tag)
282 def RemoveTag(self, tag):
286 self.ValidateTag(tag)
287 tags = self.GetTags()
291 raise errors.TagError("Tag not found")
294 """Taggable-object-specific conversion to standard python types.
296 This replaces the tags set with a list.
299 bo = super(TaggableObject, self).ToDict()
301 tags = bo.get("tags", None)
302 if isinstance(tags, set):
303 bo["tags"] = list(tags)
307 def FromDict(cls, val):
308 """Custom function for instances.
311 obj = super(TaggableObject, cls).FromDict(val)
312 if hasattr(obj, "tags") and isinstance(obj.tags, list):
313 obj.tags = set(obj.tags)
317 class ConfigData(ConfigObject):
318 """Top-level config object."""
329 """Custom function for top-level config data.
331 This just replaces the list of instances, nodes and the cluster
332 with standard python types.
335 mydict = super(ConfigData, self).ToDict()
336 mydict["cluster"] = mydict["cluster"].ToDict()
337 for key in "nodes", "instances", "nodegroups":
338 mydict[key] = self._ContainerToDicts(mydict[key])
343 def FromDict(cls, val):
344 """Custom function for top-level config data
347 obj = super(ConfigData, cls).FromDict(val)
348 obj.cluster = Cluster.FromDict(obj.cluster)
349 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
350 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
351 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
354 def HasAnyDiskOfType(self, dev_type):
355 """Check if in there is at disk of the given type in the configuration.
357 @type dev_type: L{constants.LDS_BLOCK}
358 @param dev_type: the type to look for
360 @return: boolean indicating if a disk of the given type was found or not
363 for instance in self.instances.values():
364 for disk in instance.disks:
365 if disk.IsBasedOnDiskType(dev_type):
369 def UpgradeConfig(self):
370 """Fill defaults for missing configuration values.
373 self.cluster.UpgradeConfig()
374 for node in self.nodes.values():
376 for instance in self.instances.values():
377 instance.UpgradeConfig()
378 if self.nodegroups is None:
380 for nodegroup in self.nodegroups.values():
381 nodegroup.UpgradeConfig()
382 if self.cluster.drbd_usermode_helper is None:
383 # To decide if we set an helper let's check if at least one instance has
384 # a DRBD disk. This does not cover all the possible scenarios but it
385 # gives a good approximation.
386 if self.HasAnyDiskOfType(constants.LD_DRBD8):
387 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
390 class NIC(ConfigObject):
391 """Config object representing a network card."""
392 __slots__ = ["mac", "ip", "bridge", "nicparams"]
395 def CheckParameterSyntax(cls, nicparams):
396 """Check the given parameters for validity.
398 @type nicparams: dict
399 @param nicparams: dictionary with parameter names/value
400 @raise errors.ConfigurationError: when a parameter is not valid
403 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
404 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
405 raise errors.ConfigurationError(err)
407 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
408 not nicparams[constants.NIC_LINK]):
409 err = "Missing bridged nic link"
410 raise errors.ConfigurationError(err)
412 def UpgradeConfig(self):
413 """Fill defaults for missing configuration values.
416 if self.nicparams is None:
418 if self.bridge is not None:
419 self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
420 self.nicparams[constants.NIC_LINK] = self.bridge
421 # bridge is no longer used it 2.1. The slot is left there to support
422 # upgrading, but can be removed once upgrades to the current version
423 # straight from 2.0 are deprecated.
424 if self.bridge is not None:
428 class Disk(ConfigObject):
429 """Config object representing a block device."""
430 __slots__ = ["dev_type", "logical_id", "physical_id",
431 "children", "iv_name", "size", "mode"]
433 def CreateOnSecondary(self):
434 """Test if this device needs to be created on a secondary node."""
435 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
437 def AssembleOnSecondary(self):
438 """Test if this device needs to be assembled on a secondary node."""
439 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
441 def OpenOnSecondary(self):
442 """Test if this device needs to be opened on a secondary node."""
443 return self.dev_type in (constants.LD_LV,)
445 def StaticDevPath(self):
446 """Return the device path if this device type has a static one.
448 Some devices (LVM for example) live always at the same /dev/ path,
449 irrespective of their status. For such devices, we return this
450 path, for others we return None.
452 @warning: The path returned is not a normalized pathname; callers
453 should check that it is a valid path.
456 if self.dev_type == constants.LD_LV:
457 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
460 def ChildrenNeeded(self):
461 """Compute the needed number of children for activation.
463 This method will return either -1 (all children) or a positive
464 number denoting the minimum number of children needed for
465 activation (only mirrored devices will usually return >=0).
467 Currently, only DRBD8 supports diskless activation (therefore we
468 return 0), for all other we keep the previous semantics and return
472 if self.dev_type == constants.LD_DRBD8:
476 def IsBasedOnDiskType(self, dev_type):
477 """Check if the disk or its children are based on the given type.
479 @type dev_type: L{constants.LDS_BLOCK}
480 @param dev_type: the type to look for
482 @return: boolean indicating if a device of the given type was found or not
486 for child in self.children:
487 if child.IsBasedOnDiskType(dev_type):
489 return self.dev_type == dev_type
491 def GetNodes(self, node):
492 """This function returns the nodes this device lives on.
494 Given the node on which the parent of the device lives on (or, in
495 case of a top-level device, the primary node of the devices'
496 instance), this function will return a list of nodes on which this
497 devices needs to (or can) be assembled.
500 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
502 elif self.dev_type in constants.LDS_DRBD:
503 result = [self.logical_id[0], self.logical_id[1]]
504 if node not in result:
505 raise errors.ConfigurationError("DRBD device passed unknown node")
507 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
510 def ComputeNodeTree(self, parent_node):
511 """Compute the node/disk tree for this disk and its children.
513 This method, given the node on which the parent disk lives, will
514 return the list of all (node, disk) pairs which describe the disk
515 tree in the most compact way. For example, a drbd/lvm stack
516 will be returned as (primary_node, drbd) and (secondary_node, drbd)
517 which represents all the top-level devices on the nodes.
520 my_nodes = self.GetNodes(parent_node)
521 result = [(node, self) for node in my_nodes]
522 if not self.children:
525 for node in my_nodes:
526 for child in self.children:
527 child_result = child.ComputeNodeTree(node)
528 if len(child_result) == 1:
529 # child (and all its descendants) is simple, doesn't split
530 # over multiple hosts, so we don't need to describe it, our
531 # own entry for this node describes it completely
534 # check if child nodes differ from my nodes; note that
535 # subdisk can differ from the child itself, and be instead
536 # one of its descendants
537 for subnode, subdisk in child_result:
538 if subnode not in my_nodes:
539 result.append((subnode, subdisk))
540 # otherwise child is under our own node, so we ignore this
541 # entry (but probably the other results in the list will
545 def RecordGrow(self, amount):
546 """Update the size of this disk after growth.
548 This method recurses over the disks's children and updates their
549 size correspondigly. The method needs to be kept in sync with the
550 actual algorithms from bdev.
553 if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
555 elif self.dev_type == constants.LD_DRBD8:
557 self.children[0].RecordGrow(amount)
560 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
561 " disk type %s" % self.dev_type)
564 """Sets recursively the size to zero for the disk and its children.
568 for child in self.children:
572 def SetPhysicalID(self, target_node, nodes_ip):
573 """Convert the logical ID to the physical ID.
575 This is used only for drbd, which needs ip/port configuration.
577 The routine descends down and updates its children also, because
578 this helps when the only the top device is passed to the remote
582 - target_node: the node we wish to configure for
583 - nodes_ip: a mapping of node name to ip
585 The target_node must exist in in nodes_ip, and must be one of the
586 nodes in the logical ID for each of the DRBD devices encountered
591 for child in self.children:
592 child.SetPhysicalID(target_node, nodes_ip)
594 if self.logical_id is None and self.physical_id is not None:
596 if self.dev_type in constants.LDS_DRBD:
597 pnode, snode, port, pminor, sminor, secret = self.logical_id
598 if target_node not in (pnode, snode):
599 raise errors.ConfigurationError("DRBD device not knowing node %s" %
601 pnode_ip = nodes_ip.get(pnode, None)
602 snode_ip = nodes_ip.get(snode, None)
603 if pnode_ip is None or snode_ip is None:
604 raise errors.ConfigurationError("Can't find primary or secondary node"
605 " for %s" % str(self))
606 p_data = (pnode_ip, port)
607 s_data = (snode_ip, port)
608 if pnode == target_node:
609 self.physical_id = p_data + s_data + (pminor, secret)
610 else: # it must be secondary, we tested above
611 self.physical_id = s_data + p_data + (sminor, secret)
613 self.physical_id = self.logical_id
617 """Disk-specific conversion to standard python types.
619 This replaces the children lists of objects with lists of
620 standard python types.
623 bo = super(Disk, self).ToDict()
625 for attr in ("children",):
626 alist = bo.get(attr, None)
628 bo[attr] = self._ContainerToDicts(alist)
632 def FromDict(cls, val):
633 """Custom function for Disks
636 obj = super(Disk, cls).FromDict(val)
638 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
639 if obj.logical_id and isinstance(obj.logical_id, list):
640 obj.logical_id = tuple(obj.logical_id)
641 if obj.physical_id and isinstance(obj.physical_id, list):
642 obj.physical_id = tuple(obj.physical_id)
643 if obj.dev_type in constants.LDS_DRBD:
644 # we need a tuple of length six here
645 if len(obj.logical_id) < 6:
646 obj.logical_id += (None,) * (6 - len(obj.logical_id))
650 """Custom str() formatter for disks.
653 if self.dev_type == constants.LD_LV:
654 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
655 elif self.dev_type in constants.LDS_DRBD:
656 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
658 if self.physical_id is None:
661 phy = ("configured as %s:%s %s:%s" %
662 (self.physical_id[0], self.physical_id[1],
663 self.physical_id[2], self.physical_id[3]))
665 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
666 (node_a, minor_a, node_b, minor_b, port, phy))
667 if self.children and self.children.count(None) == 0:
668 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
670 val += "no local storage"
672 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
673 (self.dev_type, self.logical_id, self.physical_id, self.children))
674 if self.iv_name is None:
675 val += ", not visible"
677 val += ", visible as /dev/%s" % self.iv_name
678 if isinstance(self.size, int):
679 val += ", size=%dm)>" % self.size
681 val += ", size='%s')>" % (self.size,)
685 """Checks that this disk is correctly configured.
689 if self.mode not in constants.DISK_ACCESS_SET:
690 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
693 def UpgradeConfig(self):
694 """Fill defaults for missing configuration values.
698 for child in self.children:
699 child.UpgradeConfig()
700 # add here config upgrade for this disk
703 class Instance(TaggableObject):
704 """Config object representing an instance."""
719 ] + _TIMESTAMPS + _UUID
721 def _ComputeSecondaryNodes(self):
722 """Compute the list of secondary nodes.
724 This is a simple wrapper over _ComputeAllNodes.
727 all_nodes = set(self._ComputeAllNodes())
728 all_nodes.discard(self.primary_node)
729 return tuple(all_nodes)
731 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
732 "List of secondary nodes")
734 def _ComputeAllNodes(self):
735 """Compute the list of all nodes.
737 Since the data is already there (in the drbd disks), keeping it as
738 a separate normal attribute is redundant and if not properly
739 synchronised can cause problems. Thus it's better to compute it
743 def _Helper(nodes, device):
744 """Recursively computes nodes given a top device."""
745 if device.dev_type in constants.LDS_DRBD:
746 nodea, nodeb = device.logical_id[:2]
750 for child in device.children:
751 _Helper(nodes, child)
754 all_nodes.add(self.primary_node)
755 for device in self.disks:
756 _Helper(all_nodes, device)
757 return tuple(all_nodes)
759 all_nodes = property(_ComputeAllNodes, None, None,
760 "List of all nodes of the instance")
762 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
763 """Provide a mapping of nodes to LVs this instance owns.
765 This function figures out what logical volumes should belong on
766 which nodes, recursing through a device tree.
768 @param lvmap: optional dictionary to receive the
769 'node' : ['lv', ...] data.
771 @return: None if lvmap arg is given, otherwise, a dictionary
772 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
776 node = self.primary_node
779 lvmap = { node : [] }
782 if not node in lvmap:
790 if dev.dev_type == constants.LD_LV:
791 lvmap[node].append(dev.logical_id[1])
793 elif dev.dev_type in constants.LDS_DRBD:
795 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
796 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
799 self.MapLVsByNode(lvmap, dev.children, node)
803 def FindDisk(self, idx):
804 """Find a disk given having a specified index.
806 This is just a wrapper that does validation of the index.
809 @param idx: the disk index
811 @return: the corresponding disk
812 @raise errors.OpPrereqError: when the given index is not valid
817 return self.disks[idx]
818 except (TypeError, ValueError), err:
819 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
822 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
823 " 0 to %d" % (idx, len(self.disks)),
827 """Instance-specific conversion to standard python types.
829 This replaces the children lists of objects with lists of standard
833 bo = super(Instance, self).ToDict()
835 for attr in "nics", "disks":
836 alist = bo.get(attr, None)
838 nlist = self._ContainerToDicts(alist)
845 def FromDict(cls, val):
846 """Custom function for instances.
849 obj = super(Instance, cls).FromDict(val)
850 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
851 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
854 def UpgradeConfig(self):
855 """Fill defaults for missing configuration values.
858 for nic in self.nics:
860 for disk in self.disks:
863 for key in constants.HVC_GLOBALS:
865 del self.hvparams[key]
868 if self.osparams is None:
872 class OS(ConfigObject):
873 """Config object representing an operating system.
875 @type supported_parameters: list
876 @ivar supported_parameters: a list of tuples, name and description,
877 containing the supported parameters by this OS
879 @type VARIANT_DELIM: string
880 @cvar VARIANT_DELIM: the variant delimiter
892 "supported_variants",
893 "supported_parameters",
899 def SplitNameVariant(cls, name):
900 """Splits the name into the proper name and variant.
902 @param name: the OS (unprocessed) name
904 @return: a list of two elements; if the original name didn't
905 contain a variant, it's returned as an empty string
908 nv = name.split(cls.VARIANT_DELIM, 1)
914 def GetName(cls, name):
915 """Returns the proper name of the os (without the variant).
917 @param name: the OS (unprocessed) name
920 return cls.SplitNameVariant(name)[0]
923 def GetVariant(cls, name):
924 """Returns the variant the os (without the base name).
926 @param name: the OS (unprocessed) name
929 return cls.SplitNameVariant(name)[1]
932 class Node(TaggableObject):
933 """Config object representing a node."""
945 ] + _TIMESTAMPS + _UUID
947 def UpgradeConfig(self):
948 """Fill defaults for missing configuration values.
951 # pylint: disable-msg=E0203
952 # because these are "defined" via slots, not manually
953 if self.master_capable is None:
954 self.master_capable = True
956 if self.vm_capable is None:
957 self.vm_capable = True
960 class NodeGroup(ConfigObject):
961 """Config object representing a node group."""
965 ] + _TIMESTAMPS + _UUID
968 """Custom function for nodegroup.
970 This discards the members object, which gets recalculated and is only kept
974 mydict = super(NodeGroup, self).ToDict()
975 del mydict["members"]
979 def FromDict(cls, val):
980 """Custom function for nodegroup.
982 The members slot is initialized to an empty list, upon deserialization.
985 obj = super(NodeGroup, cls).FromDict(val)
990 class Cluster(TaggableObject):
991 """Config object representing the cluster."""
1000 "drbd_usermode_helper",
1002 "default_hypervisor",
1008 "enabled_hypervisors",
1014 "candidate_pool_size",
1017 "maintain_node_health",
1019 "default_iallocator",
1022 "primary_ip_family",
1023 "prealloc_wipe_disks",
1024 ] + _TIMESTAMPS + _UUID
1026 def UpgradeConfig(self):
1027 """Fill defaults for missing configuration values.
1030 # pylint: disable-msg=E0203
1031 # because these are "defined" via slots, not manually
1032 if self.hvparams is None:
1033 self.hvparams = constants.HVC_DEFAULTS
1035 for hypervisor in self.hvparams:
1036 self.hvparams[hypervisor] = FillDict(
1037 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1039 if self.os_hvp is None:
1042 # osparams added before 2.2
1043 if self.osparams is None:
1046 self.beparams = UpgradeGroupedParams(self.beparams,
1047 constants.BEC_DEFAULTS)
1048 migrate_default_bridge = not self.nicparams
1049 self.nicparams = UpgradeGroupedParams(self.nicparams,
1050 constants.NICC_DEFAULTS)
1051 if migrate_default_bridge:
1052 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1055 if self.modify_etc_hosts is None:
1056 self.modify_etc_hosts = True
1058 if self.modify_ssh_setup is None:
1059 self.modify_ssh_setup = True
1061 # default_bridge is no longer used it 2.1. The slot is left there to
1062 # support auto-upgrading. It can be removed once we decide to deprecate
1063 # upgrading straight from 2.0.
1064 if self.default_bridge is not None:
1065 self.default_bridge = None
1067 # default_hypervisor is just the first enabled one in 2.1. This slot and
1068 # code can be removed once upgrading straight from 2.0 is deprecated.
1069 if self.default_hypervisor is not None:
1070 self.enabled_hypervisors = ([self.default_hypervisor] +
1071 [hvname for hvname in self.enabled_hypervisors
1072 if hvname != self.default_hypervisor])
1073 self.default_hypervisor = None
1075 # maintain_node_health added after 2.1.1
1076 if self.maintain_node_health is None:
1077 self.maintain_node_health = False
1079 if self.uid_pool is None:
1082 if self.default_iallocator is None:
1083 self.default_iallocator = ""
1085 # reserved_lvs added before 2.2
1086 if self.reserved_lvs is None:
1087 self.reserved_lvs = []
1089 # hidden and blacklisted operating systems added before 2.2.1
1090 if self.hidden_os is None:
1093 if self.blacklisted_os is None:
1094 self.blacklisted_os = []
1096 # primary_ip_family added before 2.3
1097 if self.primary_ip_family is None:
1098 self.primary_ip_family = AF_INET
1100 if self.prealloc_wipe_disks is None:
1101 self.prealloc_wipe_disks = False
1104 """Custom function for cluster.
1107 mydict = super(Cluster, self).ToDict()
1108 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1112 def FromDict(cls, val):
1113 """Custom function for cluster.
1116 obj = super(Cluster, cls).FromDict(val)
1117 if not isinstance(obj.tcpudp_port_pool, set):
1118 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1121 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1122 """Get the default hypervisor parameters for the cluster.
1124 @param hypervisor: the hypervisor name
1125 @param os_name: if specified, we'll also update the defaults for this OS
1126 @param skip_keys: if passed, list of keys not to use
1127 @return: the defaults dict
1130 if skip_keys is None:
1133 fill_stack = [self.hvparams.get(hypervisor, {})]
1134 if os_name is not None:
1135 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1136 fill_stack.append(os_hvp)
1139 for o_dict in fill_stack:
1140 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1144 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1145 """Fill a given hvparams dict with cluster defaults.
1147 @type hv_name: string
1148 @param hv_name: the hypervisor to use
1149 @type os_name: string
1150 @param os_name: the OS to use for overriding the hypervisor defaults
1151 @type skip_globals: boolean
1152 @param skip_globals: if True, the global hypervisor parameters will
1155 @return: a copy of the given hvparams with missing keys filled from
1156 the cluster defaults
1160 skip_keys = constants.HVC_GLOBALS
1164 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1165 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1167 def FillHV(self, instance, skip_globals=False):
1168 """Fill an instance's hvparams dict with cluster defaults.
1170 @type instance: L{objects.Instance}
1171 @param instance: the instance parameter to fill
1172 @type skip_globals: boolean
1173 @param skip_globals: if True, the global hypervisor parameters will
1176 @return: a copy of the instance's hvparams with missing keys filled from
1177 the cluster defaults
1180 return self.SimpleFillHV(instance.hypervisor, instance.os,
1181 instance.hvparams, skip_globals)
1183 def SimpleFillBE(self, beparams):
1184 """Fill a given beparams dict with cluster defaults.
1186 @type beparams: dict
1187 @param beparams: the dict to fill
1189 @return: a copy of the passed in beparams with missing keys filled
1190 from the cluster defaults
1193 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1195 def FillBE(self, instance):
1196 """Fill an instance's beparams dict with cluster defaults.
1198 @type instance: L{objects.Instance}
1199 @param instance: the instance parameter to fill
1201 @return: a copy of the instance's beparams with missing keys filled from
1202 the cluster defaults
1205 return self.SimpleFillBE(instance.beparams)
1207 def SimpleFillNIC(self, nicparams):
1208 """Fill a given nicparams dict with cluster defaults.
1210 @type nicparams: dict
1211 @param nicparams: the dict to fill
1213 @return: a copy of the passed in nicparams with missing keys filled
1214 from the cluster defaults
1217 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1219 def SimpleFillOS(self, os_name, os_params):
1220 """Fill an instance's osparams dict with cluster defaults.
1222 @type os_name: string
1223 @param os_name: the OS name to use
1224 @type os_params: dict
1225 @param os_params: the dict to fill with default values
1227 @return: a copy of the instance's osparams with missing keys filled from
1228 the cluster defaults
1231 name_only = os_name.split("+", 1)[0]
1233 result = self.osparams.get(name_only, {})
1235 result = FillDict(result, self.osparams.get(os_name, {}))
1237 return FillDict(result, os_params)
1240 class BlockDevStatus(ConfigObject):
1241 """Config object representing the status of a block device."""
1253 class ImportExportStatus(ConfigObject):
1254 """Config object representing the status of an import or export."""
1260 "progress_throughput",
1268 class ImportExportOptions(ConfigObject):
1269 """Options for import/export daemon
1271 @ivar key_name: X509 key name (None for cluster certificate)
1272 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1273 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1274 @ivar magic: Used to ensure the connection goes to the right disk
1275 @ivar connect_timeout: Number of seconds for establishing connection
1287 class ConfdRequest(ConfigObject):
1288 """Object holding a confd request.
1290 @ivar protocol: confd protocol version
1291 @ivar type: confd query type
1292 @ivar query: query request
1293 @ivar rsalt: requested reply salt
1304 class ConfdReply(ConfigObject):
1305 """Object holding a confd reply.
1307 @ivar protocol: confd protocol version
1308 @ivar status: reply status code (ok, error)
1309 @ivar answer: confd query reply
1310 @ivar serial: configuration serial number
1321 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1322 """Simple wrapper over ConfigParse that allows serialization.
1324 This class is basically ConfigParser.SafeConfigParser with two
1325 additional methods that allow it to serialize/unserialize to/from a
1330 """Dump this instance and return the string representation."""
1333 return buf.getvalue()
1336 def Loads(cls, data):
1337 """Load data from a string."""
1338 buf = StringIO(data)