4 # Copyright (C) 2006, 2007, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
39 from cStringIO import StringIO
41 from ganeti import errors
42 from ganeti import constants
44 from socket import AF_INET
47 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
48 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
50 _TIMESTAMPS = ["ctime", "mtime"]
54 def FillDict(defaults_dict, custom_dict, skip_keys=None):
55 """Basic function to apply settings on top a default dict.
57 @type defaults_dict: dict
58 @param defaults_dict: dictionary holding the default values
59 @type custom_dict: dict
60 @param custom_dict: dictionary holding customized value
62 @param skip_keys: which keys not to fill
64 @return: dict with the 'full' values
67 ret_dict = copy.deepcopy(defaults_dict)
68 ret_dict.update(custom_dict)
78 def UpgradeGroupedParams(target, defaults):
79 """Update all groups for the target parameter.
81 @type target: dict of dicts
82 @param target: {group: {parameter: value}}
84 @param defaults: default parameter values
88 target = {constants.PP_DEFAULT: defaults}
91 target[group] = FillDict(defaults, target[group])
95 class ConfigObject(object):
96 """A generic config object.
98 It has the following properties:
100 - provides somewhat safe recursive unpickling and pickling for its classes
101 - unset attributes which are defined in slots are always returned
102 as None instead of raising an error
104 Classes derived from this must always declare __slots__ (we use many
105 config objects and the memory reduction is useful)
110 def __init__(self, **kwargs):
111 for k, v in kwargs.iteritems():
114 def __getattr__(self, name):
115 if name not in self._all_slots():
116 raise AttributeError("Invalid object attribute %s.%s" %
117 (type(self).__name__, name))
120 def __setstate__(self, state):
121 slots = self._all_slots()
124 setattr(self, name, state[name])
128 """Compute the list of all declared slots for a class.
132 for parent in cls.__mro__:
133 slots.extend(getattr(parent, "__slots__", []))
137 """Convert to a dict holding only standard python types.
139 The generic routine just dumps all of this object's attributes in
140 a dict. It does not work if the class has children who are
141 ConfigObjects themselves (e.g. the nics list in an Instance), in
142 which case the object should subclass the function in order to
143 make sure all objects returned are only standard python types.
147 for name in self._all_slots():
148 value = getattr(self, name, None)
149 if value is not None:
153 __getstate__ = ToDict
156 def FromDict(cls, val):
157 """Create an object from a dictionary.
159 This generic routine takes a dict, instantiates a new instance of
160 the given class, and sets attributes based on the dict content.
162 As for `ToDict`, this does not work if the class has children
163 who are ConfigObjects themselves (e.g. the nics list in an
164 Instance), in which case the object should subclass the function
165 and alter the objects.
168 if not isinstance(val, dict):
169 raise errors.ConfigurationError("Invalid object passed to FromDict:"
170 " expected dict, got %s" % type(val))
171 val_str = dict([(str(k), v) for k, v in val.iteritems()])
172 obj = cls(**val_str) # pylint: disable-msg=W0142
176 def _ContainerToDicts(container):
177 """Convert the elements of a container to standard python types.
179 This method converts a container with elements derived from
180 ConfigData to standard python types. If the container is a dict,
181 we don't touch the keys, only the values.
184 if isinstance(container, dict):
185 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
186 elif isinstance(container, (list, tuple, set, frozenset)):
187 ret = [elem.ToDict() for elem in container]
189 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
194 def _ContainerFromDicts(source, c_type, e_type):
195 """Convert a container from standard python types.
197 This method converts a container with standard python types to
198 ConfigData objects. If the container is a dict, we don't touch the
199 keys, only the values.
202 if not isinstance(c_type, type):
203 raise TypeError("Container type %s passed to _ContainerFromDicts is"
204 " not a type" % type(c_type))
206 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
207 elif c_type in (list, tuple, set, frozenset):
208 ret = c_type([e_type.FromDict(elem) for elem in source])
210 raise TypeError("Invalid container type %s passed to"
211 " _ContainerFromDicts" % c_type)
215 """Makes a deep copy of the current object and its children.
218 dict_form = self.ToDict()
219 clone_obj = self.__class__.FromDict(dict_form)
223 """Implement __repr__ for ConfigObjects."""
224 return repr(self.ToDict())
226 def UpgradeConfig(self):
227 """Fill defaults for missing configuration values.
229 This method will be called at configuration load time, and its
230 implementation will be object dependent.
236 class TaggableObject(ConfigObject):
237 """An generic class supporting tags.
241 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
244 def ValidateTag(cls, tag):
245 """Check if a tag is valid.
247 If the tag is invalid, an errors.TagError will be raised. The
248 function has no return value.
251 if not isinstance(tag, basestring):
252 raise errors.TagError("Invalid tag type (not a string)")
253 if len(tag) > constants.MAX_TAG_LEN:
254 raise errors.TagError("Tag too long (>%d characters)" %
255 constants.MAX_TAG_LEN)
257 raise errors.TagError("Tags cannot be empty")
258 if not cls.VALID_TAG_RE.match(tag):
259 raise errors.TagError("Tag contains invalid characters")
262 """Return the tags list.
265 tags = getattr(self, "tags", None)
267 tags = self.tags = set()
270 def AddTag(self, tag):
274 self.ValidateTag(tag)
275 tags = self.GetTags()
276 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
277 raise errors.TagError("Too many tags")
278 self.GetTags().add(tag)
280 def RemoveTag(self, tag):
284 self.ValidateTag(tag)
285 tags = self.GetTags()
289 raise errors.TagError("Tag not found")
292 """Taggable-object-specific conversion to standard python types.
294 This replaces the tags set with a list.
297 bo = super(TaggableObject, self).ToDict()
299 tags = bo.get("tags", None)
300 if isinstance(tags, set):
301 bo["tags"] = list(tags)
305 def FromDict(cls, val):
306 """Custom function for instances.
309 obj = super(TaggableObject, cls).FromDict(val)
310 if hasattr(obj, "tags") and isinstance(obj.tags, list):
311 obj.tags = set(obj.tags)
315 class ConfigData(ConfigObject):
316 """Top-level config object."""
317 __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
321 """Custom function for top-level config data.
323 This just replaces the list of instances, nodes and the cluster
324 with standard python types.
327 mydict = super(ConfigData, self).ToDict()
328 mydict["cluster"] = mydict["cluster"].ToDict()
329 for key in "nodes", "instances":
330 mydict[key] = self._ContainerToDicts(mydict[key])
335 def FromDict(cls, val):
336 """Custom function for top-level config data
339 obj = super(ConfigData, cls).FromDict(val)
340 obj.cluster = Cluster.FromDict(obj.cluster)
341 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
342 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
345 def HasAnyDiskOfType(self, dev_type):
346 """Check if in there is at disk of the given type in the configuration.
348 @type dev_type: L{constants.LDS_BLOCK}
349 @param dev_type: the type to look for
351 @return: boolean indicating if a disk of the given type was found or not
354 for instance in self.instances.values():
355 for disk in instance.disks:
356 if disk.IsBasedOnDiskType(dev_type):
360 def UpgradeConfig(self):
361 """Fill defaults for missing configuration values.
364 self.cluster.UpgradeConfig()
365 for node in self.nodes.values():
367 for instance in self.instances.values():
368 instance.UpgradeConfig()
369 if self.cluster.drbd_usermode_helper is None:
370 # To decide if we set an helper let's check if at least one instance has
371 # a DRBD disk. This does not cover all the possible scenarios but it
372 # gives a good approximation.
373 if self.HasAnyDiskOfType(constants.LD_DRBD8):
374 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
377 class NIC(ConfigObject):
378 """Config object representing a network card."""
379 __slots__ = ["mac", "ip", "bridge", "nicparams"]
382 def CheckParameterSyntax(cls, nicparams):
383 """Check the given parameters for validity.
385 @type nicparams: dict
386 @param nicparams: dictionary with parameter names/value
387 @raise errors.ConfigurationError: when a parameter is not valid
390 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
391 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
392 raise errors.ConfigurationError(err)
394 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
395 not nicparams[constants.NIC_LINK]):
396 err = "Missing bridged nic link"
397 raise errors.ConfigurationError(err)
399 def UpgradeConfig(self):
400 """Fill defaults for missing configuration values.
403 if self.nicparams is None:
405 if self.bridge is not None:
406 self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
407 self.nicparams[constants.NIC_LINK] = self.bridge
408 # bridge is no longer used it 2.1. The slot is left there to support
409 # upgrading, but can be removed once upgrades to the current version
410 # straight from 2.0 are deprecated.
411 if self.bridge is not None:
415 class Disk(ConfigObject):
416 """Config object representing a block device."""
417 __slots__ = ["dev_type", "logical_id", "physical_id",
418 "children", "iv_name", "size", "mode"]
420 def CreateOnSecondary(self):
421 """Test if this device needs to be created on a secondary node."""
422 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
424 def AssembleOnSecondary(self):
425 """Test if this device needs to be assembled on a secondary node."""
426 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
428 def OpenOnSecondary(self):
429 """Test if this device needs to be opened on a secondary node."""
430 return self.dev_type in (constants.LD_LV,)
432 def StaticDevPath(self):
433 """Return the device path if this device type has a static one.
435 Some devices (LVM for example) live always at the same /dev/ path,
436 irrespective of their status. For such devices, we return this
437 path, for others we return None.
439 @warning: The path returned is not a normalized pathname; callers
440 should check that it is a valid path.
443 if self.dev_type == constants.LD_LV:
444 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
447 def ChildrenNeeded(self):
448 """Compute the needed number of children for activation.
450 This method will return either -1 (all children) or a positive
451 number denoting the minimum number of children needed for
452 activation (only mirrored devices will usually return >=0).
454 Currently, only DRBD8 supports diskless activation (therefore we
455 return 0), for all other we keep the previous semantics and return
459 if self.dev_type == constants.LD_DRBD8:
463 def IsBasedOnDiskType(self, dev_type):
464 """Check if the disk or its children are based on the given type.
466 @type dev_type: L{constants.LDS_BLOCK}
467 @param dev_type: the type to look for
469 @return: boolean indicating if a device of the given type was found or not
473 for child in self.children:
474 if child.IsBasedOnDiskType(dev_type):
476 return self.dev_type == dev_type
478 def GetNodes(self, node):
479 """This function returns the nodes this device lives on.
481 Given the node on which the parent of the device lives on (or, in
482 case of a top-level device, the primary node of the devices'
483 instance), this function will return a list of nodes on which this
484 devices needs to (or can) be assembled.
487 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
489 elif self.dev_type in constants.LDS_DRBD:
490 result = [self.logical_id[0], self.logical_id[1]]
491 if node not in result:
492 raise errors.ConfigurationError("DRBD device passed unknown node")
494 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
497 def ComputeNodeTree(self, parent_node):
498 """Compute the node/disk tree for this disk and its children.
500 This method, given the node on which the parent disk lives, will
501 return the list of all (node, disk) pairs which describe the disk
502 tree in the most compact way. For example, a drbd/lvm stack
503 will be returned as (primary_node, drbd) and (secondary_node, drbd)
504 which represents all the top-level devices on the nodes.
507 my_nodes = self.GetNodes(parent_node)
508 result = [(node, self) for node in my_nodes]
509 if not self.children:
512 for node in my_nodes:
513 for child in self.children:
514 child_result = child.ComputeNodeTree(node)
515 if len(child_result) == 1:
516 # child (and all its descendants) is simple, doesn't split
517 # over multiple hosts, so we don't need to describe it, our
518 # own entry for this node describes it completely
521 # check if child nodes differ from my nodes; note that
522 # subdisk can differ from the child itself, and be instead
523 # one of its descendants
524 for subnode, subdisk in child_result:
525 if subnode not in my_nodes:
526 result.append((subnode, subdisk))
527 # otherwise child is under our own node, so we ignore this
528 # entry (but probably the other results in the list will
532 def RecordGrow(self, amount):
533 """Update the size of this disk after growth.
535 This method recurses over the disks's children and updates their
536 size correspondigly. The method needs to be kept in sync with the
537 actual algorithms from bdev.
540 if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
542 elif self.dev_type == constants.LD_DRBD8:
544 self.children[0].RecordGrow(amount)
547 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
548 " disk type %s" % self.dev_type)
551 """Sets recursively the size to zero for the disk and its children.
555 for child in self.children:
559 def SetPhysicalID(self, target_node, nodes_ip):
560 """Convert the logical ID to the physical ID.
562 This is used only for drbd, which needs ip/port configuration.
564 The routine descends down and updates its children also, because
565 this helps when the only the top device is passed to the remote
569 - target_node: the node we wish to configure for
570 - nodes_ip: a mapping of node name to ip
572 The target_node must exist in in nodes_ip, and must be one of the
573 nodes in the logical ID for each of the DRBD devices encountered
578 for child in self.children:
579 child.SetPhysicalID(target_node, nodes_ip)
581 if self.logical_id is None and self.physical_id is not None:
583 if self.dev_type in constants.LDS_DRBD:
584 pnode, snode, port, pminor, sminor, secret = self.logical_id
585 if target_node not in (pnode, snode):
586 raise errors.ConfigurationError("DRBD device not knowing node %s" %
588 pnode_ip = nodes_ip.get(pnode, None)
589 snode_ip = nodes_ip.get(snode, None)
590 if pnode_ip is None or snode_ip is None:
591 raise errors.ConfigurationError("Can't find primary or secondary node"
592 " for %s" % str(self))
593 p_data = (pnode_ip, port)
594 s_data = (snode_ip, port)
595 if pnode == target_node:
596 self.physical_id = p_data + s_data + (pminor, secret)
597 else: # it must be secondary, we tested above
598 self.physical_id = s_data + p_data + (sminor, secret)
600 self.physical_id = self.logical_id
604 """Disk-specific conversion to standard python types.
606 This replaces the children lists of objects with lists of
607 standard python types.
610 bo = super(Disk, self).ToDict()
612 for attr in ("children",):
613 alist = bo.get(attr, None)
615 bo[attr] = self._ContainerToDicts(alist)
619 def FromDict(cls, val):
620 """Custom function for Disks
623 obj = super(Disk, cls).FromDict(val)
625 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
626 if obj.logical_id and isinstance(obj.logical_id, list):
627 obj.logical_id = tuple(obj.logical_id)
628 if obj.physical_id and isinstance(obj.physical_id, list):
629 obj.physical_id = tuple(obj.physical_id)
630 if obj.dev_type in constants.LDS_DRBD:
631 # we need a tuple of length six here
632 if len(obj.logical_id) < 6:
633 obj.logical_id += (None,) * (6 - len(obj.logical_id))
637 """Custom str() formatter for disks.
640 if self.dev_type == constants.LD_LV:
641 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
642 elif self.dev_type in constants.LDS_DRBD:
643 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
645 if self.physical_id is None:
648 phy = ("configured as %s:%s %s:%s" %
649 (self.physical_id[0], self.physical_id[1],
650 self.physical_id[2], self.physical_id[3]))
652 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
653 (node_a, minor_a, node_b, minor_b, port, phy))
654 if self.children and self.children.count(None) == 0:
655 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
657 val += "no local storage"
659 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
660 (self.dev_type, self.logical_id, self.physical_id, self.children))
661 if self.iv_name is None:
662 val += ", not visible"
664 val += ", visible as /dev/%s" % self.iv_name
665 if isinstance(self.size, int):
666 val += ", size=%dm)>" % self.size
668 val += ", size='%s')>" % (self.size,)
672 """Checks that this disk is correctly configured.
676 if self.mode not in constants.DISK_ACCESS_SET:
677 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
680 def UpgradeConfig(self):
681 """Fill defaults for missing configuration values.
685 for child in self.children:
686 child.UpgradeConfig()
687 # add here config upgrade for this disk
690 class Instance(TaggableObject):
691 """Config object representing an instance."""
706 ] + _TIMESTAMPS + _UUID
708 def _ComputeSecondaryNodes(self):
709 """Compute the list of secondary nodes.
711 This is a simple wrapper over _ComputeAllNodes.
714 all_nodes = set(self._ComputeAllNodes())
715 all_nodes.discard(self.primary_node)
716 return tuple(all_nodes)
718 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
719 "List of secondary nodes")
721 def _ComputeAllNodes(self):
722 """Compute the list of all nodes.
724 Since the data is already there (in the drbd disks), keeping it as
725 a separate normal attribute is redundant and if not properly
726 synchronised can cause problems. Thus it's better to compute it
730 def _Helper(nodes, device):
731 """Recursively computes nodes given a top device."""
732 if device.dev_type in constants.LDS_DRBD:
733 nodea, nodeb = device.logical_id[:2]
737 for child in device.children:
738 _Helper(nodes, child)
741 all_nodes.add(self.primary_node)
742 for device in self.disks:
743 _Helper(all_nodes, device)
744 return tuple(all_nodes)
746 all_nodes = property(_ComputeAllNodes, None, None,
747 "List of all nodes of the instance")
749 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
750 """Provide a mapping of nodes to LVs this instance owns.
752 This function figures out what logical volumes should belong on
753 which nodes, recursing through a device tree.
755 @param lvmap: optional dictionary to receive the
756 'node' : ['lv', ...] data.
758 @return: None if lvmap arg is given, otherwise, a dictionary
759 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
763 node = self.primary_node
766 lvmap = { node : [] }
769 if not node in lvmap:
777 if dev.dev_type == constants.LD_LV:
778 lvmap[node].append(dev.logical_id[1])
780 elif dev.dev_type in constants.LDS_DRBD:
782 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
783 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
786 self.MapLVsByNode(lvmap, dev.children, node)
790 def FindDisk(self, idx):
791 """Find a disk given having a specified index.
793 This is just a wrapper that does validation of the index.
796 @param idx: the disk index
798 @return: the corresponding disk
799 @raise errors.OpPrereqError: when the given index is not valid
804 return self.disks[idx]
805 except (TypeError, ValueError), err:
806 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
809 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
810 " 0 to %d" % (idx, len(self.disks)),
814 """Instance-specific conversion to standard python types.
816 This replaces the children lists of objects with lists of standard
820 bo = super(Instance, self).ToDict()
822 for attr in "nics", "disks":
823 alist = bo.get(attr, None)
825 nlist = self._ContainerToDicts(alist)
832 def FromDict(cls, val):
833 """Custom function for instances.
836 obj = super(Instance, cls).FromDict(val)
837 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
838 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
841 def UpgradeConfig(self):
842 """Fill defaults for missing configuration values.
845 for nic in self.nics:
847 for disk in self.disks:
850 for key in constants.HVC_GLOBALS:
852 del self.hvparams[key]
855 if self.osparams is None:
859 class OS(ConfigObject):
860 """Config object representing an operating system.
862 @type supported_parameters: list
863 @ivar supported_parameters: a list of tuples, name and description,
864 containing the supported parameters by this OS
876 "supported_variants",
877 "supported_parameters",
881 class Node(TaggableObject):
882 """Config object representing a node."""
891 ] + _TIMESTAMPS + _UUID
894 class NodeGroup(ConfigObject):
895 """Config object representing a node group."""
899 ] + _TIMESTAMPS + _UUID
902 """Custom function for nodegroup.
904 This discards the members object, which gets recalculated and is only kept in memory.
907 mydict = super(NodeGroup, self).ToDict()
908 del mydict["members"]
912 def FromDict(cls, val):
913 """Custom function for nodegroup.
915 The members slot is initialized to an empty list, upon deserialization.
918 obj = super(NodeGroup, cls).FromDict(val)
923 class Cluster(TaggableObject):
924 """Config object representing the cluster."""
933 "drbd_usermode_helper",
935 "default_hypervisor",
941 "enabled_hypervisors",
947 "candidate_pool_size",
950 "maintain_node_health",
952 "default_iallocator",
954 ] + _TIMESTAMPS + _UUID
956 def UpgradeConfig(self):
957 """Fill defaults for missing configuration values.
960 # pylint: disable-msg=E0203
961 # because these are "defined" via slots, not manually
962 if self.hvparams is None:
963 self.hvparams = constants.HVC_DEFAULTS
965 for hypervisor in self.hvparams:
966 self.hvparams[hypervisor] = FillDict(
967 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
969 if self.os_hvp is None:
972 # osparams added before 2.2
973 if self.osparams is None:
976 self.beparams = UpgradeGroupedParams(self.beparams,
977 constants.BEC_DEFAULTS)
978 migrate_default_bridge = not self.nicparams
979 self.nicparams = UpgradeGroupedParams(self.nicparams,
980 constants.NICC_DEFAULTS)
981 if migrate_default_bridge:
982 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
985 if self.modify_etc_hosts is None:
986 self.modify_etc_hosts = True
988 if self.modify_ssh_setup is None:
989 self.modify_ssh_setup = True
991 # default_bridge is no longer used it 2.1. The slot is left there to
992 # support auto-upgrading. It can be removed once we decide to deprecate
993 # upgrading straight from 2.0.
994 if self.default_bridge is not None:
995 self.default_bridge = None
997 # default_hypervisor is just the first enabled one in 2.1. This slot and
998 # code can be removed once upgrading straight from 2.0 is deprecated.
999 if self.default_hypervisor is not None:
1000 self.enabled_hypervisors = ([self.default_hypervisor] +
1001 [hvname for hvname in self.enabled_hypervisors
1002 if hvname != self.default_hypervisor])
1003 self.default_hypervisor = None
1005 # maintain_node_health added after 2.1.1
1006 if self.maintain_node_health is None:
1007 self.maintain_node_health = False
1009 if self.uid_pool is None:
1012 if self.default_iallocator is None:
1013 self.default_iallocator = ""
1015 # reserved_lvs added before 2.2
1016 if self.reserved_lvs is None:
1017 self.reserved_lvs = []
1019 # primary_ip_family added before 2.3
1020 if self.primary_ip_family is None:
1021 self.primary_ip_family = AF_INET
1024 """Custom function for cluster.
1027 mydict = super(Cluster, self).ToDict()
1028 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1032 def FromDict(cls, val):
1033 """Custom function for cluster.
1036 obj = super(Cluster, cls).FromDict(val)
1037 if not isinstance(obj.tcpudp_port_pool, set):
1038 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1041 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1042 """Get the default hypervisor parameters for the cluster.
1044 @param hypervisor: the hypervisor name
1045 @param os_name: if specified, we'll also update the defaults for this OS
1046 @param skip_keys: if passed, list of keys not to use
1047 @return: the defaults dict
1050 if skip_keys is None:
1053 fill_stack = [self.hvparams.get(hypervisor, {})]
1054 if os_name is not None:
1055 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1056 fill_stack.append(os_hvp)
1059 for o_dict in fill_stack:
1060 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1064 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1065 """Fill a given hvparams dict with cluster defaults.
1067 @type hv_name: string
1068 @param hv_name: the hypervisor to use
1069 @type os_name: string
1070 @param os_name: the OS to use for overriding the hypervisor defaults
1071 @type skip_globals: boolean
1072 @param skip_globals: if True, the global hypervisor parameters will
1075 @return: a copy of the given hvparams with missing keys filled from
1076 the cluster defaults
1080 skip_keys = constants.HVC_GLOBALS
1084 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1085 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1087 def FillHV(self, instance, skip_globals=False):
1088 """Fill an instance's hvparams dict with cluster defaults.
1090 @type instance: L{objects.Instance}
1091 @param instance: the instance parameter to fill
1092 @type skip_globals: boolean
1093 @param skip_globals: if True, the global hypervisor parameters will
1096 @return: a copy of the instance's hvparams with missing keys filled from
1097 the cluster defaults
1100 return self.SimpleFillHV(instance.hypervisor, instance.os,
1101 instance.hvparams, skip_globals)
1103 def SimpleFillBE(self, beparams):
1104 """Fill a given beparams dict with cluster defaults.
1106 @type beparams: dict
1107 @param beparams: the dict to fill
1109 @return: a copy of the passed in beparams with missing keys filled
1110 from the cluster defaults
1113 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1115 def FillBE(self, instance):
1116 """Fill an instance's beparams dict with cluster defaults.
1118 @type instance: L{objects.Instance}
1119 @param instance: the instance parameter to fill
1121 @return: a copy of the instance's beparams with missing keys filled from
1122 the cluster defaults
1125 return self.SimpleFillBE(instance.beparams)
1127 def SimpleFillNIC(self, nicparams):
1128 """Fill a given nicparams dict with cluster defaults.
1130 @type nicparams: dict
1131 @param nicparams: the dict to fill
1133 @return: a copy of the passed in nicparams with missing keys filled
1134 from the cluster defaults
1137 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1139 def SimpleFillOS(self, os_name, os_params):
1140 """Fill an instance's osparams dict with cluster defaults.
1142 @type os_name: string
1143 @param os_name: the OS name to use
1144 @type os_params: dict
1145 @param os_params: the dict to fill with default values
1147 @return: a copy of the instance's osparams with missing keys filled from
1148 the cluster defaults
1151 name_only = os_name.split("+", 1)[0]
1153 result = self.osparams.get(name_only, {})
1155 result = FillDict(result, self.osparams.get(os_name, {}))
1157 return FillDict(result, os_params)
1160 class BlockDevStatus(ConfigObject):
1161 """Config object representing the status of a block device."""
1173 class ImportExportStatus(ConfigObject):
1174 """Config object representing the status of an import or export."""
1180 "progress_throughput",
1188 class ImportExportOptions(ConfigObject):
1189 """Options for import/export daemon
1191 @ivar key_name: X509 key name (None for cluster certificate)
1192 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1193 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1194 @ivar magic: Used to ensure the connection goes to the right disk
1205 class ConfdRequest(ConfigObject):
1206 """Object holding a confd request.
1208 @ivar protocol: confd protocol version
1209 @ivar type: confd query type
1210 @ivar query: query request
1211 @ivar rsalt: requested reply salt
1222 class ConfdReply(ConfigObject):
1223 """Object holding a confd reply.
1225 @ivar protocol: confd protocol version
1226 @ivar status: reply status code (ok, error)
1227 @ivar answer: confd query reply
1228 @ivar serial: configuration serial number
1239 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1240 """Simple wrapper over ConfigParse that allows serialization.
1242 This class is basically ConfigParser.SafeConfigParser with two
1243 additional methods that allow it to serialize/unserialize to/from a
1248 """Dump this instance and return the string representation."""
1251 return buf.getvalue()
1254 def Loads(cls, data):
1255 """Load data from a string."""
1256 buf = StringIO(data)