4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
39 from cStringIO import StringIO
41 from ganeti import errors
42 from ganeti import constants
45 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
46 "OS", "Node", "Cluster", "FillDict"]
48 _TIMESTAMPS = ["ctime", "mtime"]
52 def FillDict(defaults_dict, custom_dict, skip_keys=None):
53 """Basic function to apply settings on top a default dict.
55 @type defaults_dict: dict
56 @param defaults_dict: dictionary holding the default values
57 @type custom_dict: dict
58 @param custom_dict: dictionary holding customized value
60 @param skip_keys: which keys not to fill
62 @return: dict with the 'full' values
65 ret_dict = copy.deepcopy(defaults_dict)
66 ret_dict.update(custom_dict)
76 def UpgradeGroupedParams(target, defaults):
77 """Update all groups for the target parameter.
79 @type target: dict of dicts
80 @param target: {group: {parameter: value}}
82 @param defaults: default parameter values
86 target = {constants.PP_DEFAULT: defaults}
89 target[group] = FillDict(defaults, target[group])
93 class ConfigObject(object):
94 """A generic config object.
96 It has the following properties:
98 - provides somewhat safe recursive unpickling and pickling for its classes
99 - unset attributes which are defined in slots are always returned
100 as None instead of raising an error
102 Classes derived from this must always declare __slots__ (we use many
103 config objects and the memory reduction is useful)
108 def __init__(self, **kwargs):
109 for k, v in kwargs.iteritems():
112 def __getattr__(self, name):
113 if name not in self._all_slots():
114 raise AttributeError("Invalid object attribute %s.%s" %
115 (type(self).__name__, name))
118 def __setstate__(self, state):
119 slots = self._all_slots()
122 setattr(self, name, state[name])
126 """Compute the list of all declared slots for a class.
130 for parent in cls.__mro__:
131 slots.extend(getattr(parent, "__slots__", []))
135 """Convert to a dict holding only standard python types.
137 The generic routine just dumps all of this object's attributes in
138 a dict. It does not work if the class has children who are
139 ConfigObjects themselves (e.g. the nics list in an Instance), in
140 which case the object should subclass the function in order to
141 make sure all objects returned are only standard python types.
145 for name in self._all_slots():
146 value = getattr(self, name, None)
147 if value is not None:
151 __getstate__ = ToDict
154 def FromDict(cls, val):
155 """Create an object from a dictionary.
157 This generic routine takes a dict, instantiates a new instance of
158 the given class, and sets attributes based on the dict content.
160 As for `ToDict`, this does not work if the class has children
161 who are ConfigObjects themselves (e.g. the nics list in an
162 Instance), in which case the object should subclass the function
163 and alter the objects.
166 if not isinstance(val, dict):
167 raise errors.ConfigurationError("Invalid object passed to FromDict:"
168 " expected dict, got %s" % type(val))
169 val_str = dict([(str(k), v) for k, v in val.iteritems()])
170 obj = cls(**val_str) # pylint: disable-msg=W0142
174 def _ContainerToDicts(container):
175 """Convert the elements of a container to standard python types.
177 This method converts a container with elements derived from
178 ConfigData to standard python types. If the container is a dict,
179 we don't touch the keys, only the values.
182 if isinstance(container, dict):
183 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
184 elif isinstance(container, (list, tuple, set, frozenset)):
185 ret = [elem.ToDict() for elem in container]
187 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
192 def _ContainerFromDicts(source, c_type, e_type):
193 """Convert a container from standard python types.
195 This method converts a container with standard python types to
196 ConfigData objects. If the container is a dict, we don't touch the
197 keys, only the values.
200 if not isinstance(c_type, type):
201 raise TypeError("Container type %s passed to _ContainerFromDicts is"
202 " not a type" % type(c_type))
204 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
205 elif c_type in (list, tuple, set, frozenset):
206 ret = c_type([e_type.FromDict(elem) for elem in source])
208 raise TypeError("Invalid container type %s passed to"
209 " _ContainerFromDicts" % c_type)
213 """Makes a deep copy of the current object and its children.
216 dict_form = self.ToDict()
217 clone_obj = self.__class__.FromDict(dict_form)
221 """Implement __repr__ for ConfigObjects."""
222 return repr(self.ToDict())
224 def UpgradeConfig(self):
225 """Fill defaults for missing configuration values.
227 This method will be called at configuration load time, and its
228 implementation will be object dependent.
234 class TaggableObject(ConfigObject):
235 """An generic class supporting tags.
239 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
242 def ValidateTag(cls, tag):
243 """Check if a tag is valid.
245 If the tag is invalid, an errors.TagError will be raised. The
246 function has no return value.
249 if not isinstance(tag, basestring):
250 raise errors.TagError("Invalid tag type (not a string)")
251 if len(tag) > constants.MAX_TAG_LEN:
252 raise errors.TagError("Tag too long (>%d characters)" %
253 constants.MAX_TAG_LEN)
255 raise errors.TagError("Tags cannot be empty")
256 if not cls.VALID_TAG_RE.match(tag):
257 raise errors.TagError("Tag contains invalid characters")
260 """Return the tags list.
263 tags = getattr(self, "tags", None)
265 tags = self.tags = set()
268 def AddTag(self, tag):
272 self.ValidateTag(tag)
273 tags = self.GetTags()
274 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
275 raise errors.TagError("Too many tags")
276 self.GetTags().add(tag)
278 def RemoveTag(self, tag):
282 self.ValidateTag(tag)
283 tags = self.GetTags()
287 raise errors.TagError("Tag not found")
290 """Taggable-object-specific conversion to standard python types.
292 This replaces the tags set with a list.
295 bo = super(TaggableObject, self).ToDict()
297 tags = bo.get("tags", None)
298 if isinstance(tags, set):
299 bo["tags"] = list(tags)
303 def FromDict(cls, val):
304 """Custom function for instances.
307 obj = super(TaggableObject, cls).FromDict(val)
308 if hasattr(obj, "tags") and isinstance(obj.tags, list):
309 obj.tags = set(obj.tags)
313 class ConfigData(ConfigObject):
314 """Top-level config object."""
315 __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] +
319 """Custom function for top-level config data.
321 This just replaces the list of instances, nodes and the cluster
322 with standard python types.
325 mydict = super(ConfigData, self).ToDict()
326 mydict["cluster"] = mydict["cluster"].ToDict()
327 for key in "nodes", "instances":
328 mydict[key] = self._ContainerToDicts(mydict[key])
333 def FromDict(cls, val):
334 """Custom function for top-level config data
337 obj = super(ConfigData, cls).FromDict(val)
338 obj.cluster = Cluster.FromDict(obj.cluster)
339 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
340 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
343 def HasAnyDiskOfType(self, dev_type):
344 """Check if in there is at disk of the given type in the configuration.
346 @type dev_type: L{constants.LDS_BLOCK}
347 @param dev_type: the type to look for
349 @return: boolean indicating if a disk of the given type was found or not
352 for instance in self.instances.values():
353 for disk in instance.disks:
354 if disk.IsBasedOnDiskType(dev_type):
358 def UpgradeConfig(self):
359 """Fill defaults for missing configuration values.
362 self.cluster.UpgradeConfig()
363 for node in self.nodes.values():
365 for instance in self.instances.values():
366 instance.UpgradeConfig()
367 if self.cluster.drbd_usermode_helper is None:
368 # To decide if we set an helper let's check if at least one instance has
369 # a DRBD disk. This does not cover all the possible scenarios but it
370 # gives a good approximation.
371 if self.HasAnyDiskOfType(constants.LD_DRBD8):
372 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
375 class NIC(ConfigObject):
376 """Config object representing a network card."""
377 __slots__ = ["mac", "ip", "bridge", "nicparams"]
380 def CheckParameterSyntax(cls, nicparams):
381 """Check the given parameters for validity.
383 @type nicparams: dict
384 @param nicparams: dictionary with parameter names/value
385 @raise errors.ConfigurationError: when a parameter is not valid
388 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
389 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
390 raise errors.ConfigurationError(err)
392 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
393 not nicparams[constants.NIC_LINK]):
394 err = "Missing bridged nic link"
395 raise errors.ConfigurationError(err)
397 def UpgradeConfig(self):
398 """Fill defaults for missing configuration values.
401 if self.nicparams is None:
403 if self.bridge is not None:
404 self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED
405 self.nicparams[constants.NIC_LINK] = self.bridge
406 # bridge is no longer used it 2.1. The slot is left there to support
407 # upgrading, but can be removed once upgrades to the current version
408 # straight from 2.0 are deprecated.
409 if self.bridge is not None:
413 class Disk(ConfigObject):
414 """Config object representing a block device."""
415 __slots__ = ["dev_type", "logical_id", "physical_id",
416 "children", "iv_name", "size", "mode"]
418 def CreateOnSecondary(self):
419 """Test if this device needs to be created on a secondary node."""
420 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422 def AssembleOnSecondary(self):
423 """Test if this device needs to be assembled on a secondary node."""
424 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426 def OpenOnSecondary(self):
427 """Test if this device needs to be opened on a secondary node."""
428 return self.dev_type in (constants.LD_LV,)
430 def StaticDevPath(self):
431 """Return the device path if this device type has a static one.
433 Some devices (LVM for example) live always at the same /dev/ path,
434 irrespective of their status. For such devices, we return this
435 path, for others we return None.
437 @warning: The path returned is not a normalized pathname; callers
438 should check that it is a valid path.
441 if self.dev_type == constants.LD_LV:
442 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
445 def ChildrenNeeded(self):
446 """Compute the needed number of children for activation.
448 This method will return either -1 (all children) or a positive
449 number denoting the minimum number of children needed for
450 activation (only mirrored devices will usually return >=0).
452 Currently, only DRBD8 supports diskless activation (therefore we
453 return 0), for all other we keep the previous semantics and return
457 if self.dev_type == constants.LD_DRBD8:
461 def IsBasedOnDiskType(self, dev_type):
462 """Check if the disk or its children are based on the given type.
464 @type dev_type: L{constants.LDS_BLOCK}
465 @param dev_type: the type to look for
467 @return: boolean indicating if a device of the given type was found or not
471 for child in self.children:
472 if child.IsBasedOnDiskType(dev_type):
474 return self.dev_type == dev_type
476 def GetNodes(self, node):
477 """This function returns the nodes this device lives on.
479 Given the node on which the parent of the device lives on (or, in
480 case of a top-level device, the primary node of the devices'
481 instance), this function will return a list of nodes on which this
482 devices needs to (or can) be assembled.
485 if self.dev_type in [constants.LD_LV, constants.LD_FILE]:
487 elif self.dev_type in constants.LDS_DRBD:
488 result = [self.logical_id[0], self.logical_id[1]]
489 if node not in result:
490 raise errors.ConfigurationError("DRBD device passed unknown node")
492 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
495 def ComputeNodeTree(self, parent_node):
496 """Compute the node/disk tree for this disk and its children.
498 This method, given the node on which the parent disk lives, will
499 return the list of all (node, disk) pairs which describe the disk
500 tree in the most compact way. For example, a drbd/lvm stack
501 will be returned as (primary_node, drbd) and (secondary_node, drbd)
502 which represents all the top-level devices on the nodes.
505 my_nodes = self.GetNodes(parent_node)
506 result = [(node, self) for node in my_nodes]
507 if not self.children:
510 for node in my_nodes:
511 for child in self.children:
512 child_result = child.ComputeNodeTree(node)
513 if len(child_result) == 1:
514 # child (and all its descendants) is simple, doesn't split
515 # over multiple hosts, so we don't need to describe it, our
516 # own entry for this node describes it completely
519 # check if child nodes differ from my nodes; note that
520 # subdisk can differ from the child itself, and be instead
521 # one of its descendants
522 for subnode, subdisk in child_result:
523 if subnode not in my_nodes:
524 result.append((subnode, subdisk))
525 # otherwise child is under our own node, so we ignore this
526 # entry (but probably the other results in the list will
530 def RecordGrow(self, amount):
531 """Update the size of this disk after growth.
533 This method recurses over the disks's children and updates their
534 size correspondigly. The method needs to be kept in sync with the
535 actual algorithms from bdev.
538 if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE:
540 elif self.dev_type == constants.LD_DRBD8:
542 self.children[0].RecordGrow(amount)
545 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
546 " disk type %s" % self.dev_type)
549 """Sets recursively the size to zero for the disk and its children.
553 for child in self.children:
557 def SetPhysicalID(self, target_node, nodes_ip):
558 """Convert the logical ID to the physical ID.
560 This is used only for drbd, which needs ip/port configuration.
562 The routine descends down and updates its children also, because
563 this helps when the only the top device is passed to the remote
567 - target_node: the node we wish to configure for
568 - nodes_ip: a mapping of node name to ip
570 The target_node must exist in in nodes_ip, and must be one of the
571 nodes in the logical ID for each of the DRBD devices encountered
576 for child in self.children:
577 child.SetPhysicalID(target_node, nodes_ip)
579 if self.logical_id is None and self.physical_id is not None:
581 if self.dev_type in constants.LDS_DRBD:
582 pnode, snode, port, pminor, sminor, secret = self.logical_id
583 if target_node not in (pnode, snode):
584 raise errors.ConfigurationError("DRBD device not knowing node %s" %
586 pnode_ip = nodes_ip.get(pnode, None)
587 snode_ip = nodes_ip.get(snode, None)
588 if pnode_ip is None or snode_ip is None:
589 raise errors.ConfigurationError("Can't find primary or secondary node"
590 " for %s" % str(self))
591 p_data = (pnode_ip, port)
592 s_data = (snode_ip, port)
593 if pnode == target_node:
594 self.physical_id = p_data + s_data + (pminor, secret)
595 else: # it must be secondary, we tested above
596 self.physical_id = s_data + p_data + (sminor, secret)
598 self.physical_id = self.logical_id
602 """Disk-specific conversion to standard python types.
604 This replaces the children lists of objects with lists of
605 standard python types.
608 bo = super(Disk, self).ToDict()
610 for attr in ("children",):
611 alist = bo.get(attr, None)
613 bo[attr] = self._ContainerToDicts(alist)
617 def FromDict(cls, val):
618 """Custom function for Disks
621 obj = super(Disk, cls).FromDict(val)
623 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
624 if obj.logical_id and isinstance(obj.logical_id, list):
625 obj.logical_id = tuple(obj.logical_id)
626 if obj.physical_id and isinstance(obj.physical_id, list):
627 obj.physical_id = tuple(obj.physical_id)
628 if obj.dev_type in constants.LDS_DRBD:
629 # we need a tuple of length six here
630 if len(obj.logical_id) < 6:
631 obj.logical_id += (None,) * (6 - len(obj.logical_id))
635 """Custom str() formatter for disks.
638 if self.dev_type == constants.LD_LV:
639 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
640 elif self.dev_type in constants.LDS_DRBD:
641 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
643 if self.physical_id is None:
646 phy = ("configured as %s:%s %s:%s" %
647 (self.physical_id[0], self.physical_id[1],
648 self.physical_id[2], self.physical_id[3]))
650 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
651 (node_a, minor_a, node_b, minor_b, port, phy))
652 if self.children and self.children.count(None) == 0:
653 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
655 val += "no local storage"
657 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
658 (self.dev_type, self.logical_id, self.physical_id, self.children))
659 if self.iv_name is None:
660 val += ", not visible"
662 val += ", visible as /dev/%s" % self.iv_name
663 if isinstance(self.size, int):
664 val += ", size=%dm)>" % self.size
666 val += ", size='%s')>" % (self.size,)
670 """Checks that this disk is correctly configured.
674 if self.mode not in constants.DISK_ACCESS_SET:
675 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
678 def UpgradeConfig(self):
679 """Fill defaults for missing configuration values.
683 for child in self.children:
684 child.UpgradeConfig()
685 # add here config upgrade for this disk
688 class Instance(TaggableObject):
689 """Config object representing an instance."""
704 ] + _TIMESTAMPS + _UUID
706 def _ComputeSecondaryNodes(self):
707 """Compute the list of secondary nodes.
709 This is a simple wrapper over _ComputeAllNodes.
712 all_nodes = set(self._ComputeAllNodes())
713 all_nodes.discard(self.primary_node)
714 return tuple(all_nodes)
716 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
717 "List of secondary nodes")
719 def _ComputeAllNodes(self):
720 """Compute the list of all nodes.
722 Since the data is already there (in the drbd disks), keeping it as
723 a separate normal attribute is redundant and if not properly
724 synchronised can cause problems. Thus it's better to compute it
728 def _Helper(nodes, device):
729 """Recursively computes nodes given a top device."""
730 if device.dev_type in constants.LDS_DRBD:
731 nodea, nodeb = device.logical_id[:2]
735 for child in device.children:
736 _Helper(nodes, child)
739 all_nodes.add(self.primary_node)
740 for device in self.disks:
741 _Helper(all_nodes, device)
742 return tuple(all_nodes)
744 all_nodes = property(_ComputeAllNodes, None, None,
745 "List of all nodes of the instance")
747 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
748 """Provide a mapping of nodes to LVs this instance owns.
750 This function figures out what logical volumes should belong on
751 which nodes, recursing through a device tree.
753 @param lvmap: optional dictionary to receive the
754 'node' : ['lv', ...] data.
756 @return: None if lvmap arg is given, otherwise, a dictionary
757 of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
761 node = self.primary_node
764 lvmap = { node : [] }
767 if not node in lvmap:
775 if dev.dev_type == constants.LD_LV:
776 lvmap[node].append(dev.logical_id[1])
778 elif dev.dev_type in constants.LDS_DRBD:
780 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
781 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
784 self.MapLVsByNode(lvmap, dev.children, node)
788 def FindDisk(self, idx):
789 """Find a disk given having a specified index.
791 This is just a wrapper that does validation of the index.
794 @param idx: the disk index
796 @return: the corresponding disk
797 @raise errors.OpPrereqError: when the given index is not valid
802 return self.disks[idx]
803 except (TypeError, ValueError), err:
804 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
807 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
808 " 0 to %d" % (idx, len(self.disks)),
812 """Instance-specific conversion to standard python types.
814 This replaces the children lists of objects with lists of standard
818 bo = super(Instance, self).ToDict()
820 for attr in "nics", "disks":
821 alist = bo.get(attr, None)
823 nlist = self._ContainerToDicts(alist)
830 def FromDict(cls, val):
831 """Custom function for instances.
834 obj = super(Instance, cls).FromDict(val)
835 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
836 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
839 def UpgradeConfig(self):
840 """Fill defaults for missing configuration values.
843 for nic in self.nics:
845 for disk in self.disks:
848 for key in constants.HVC_GLOBALS:
850 del self.hvparams[key]
853 if self.osparams is None:
857 class OS(ConfigObject):
858 """Config object representing an operating system.
860 @type supported_parameters: list
861 @ivar supported_parameters: a list of tuples, name and description,
862 containing the supported parameters by this OS
864 @type VARIANT_DELIM: string
865 @cvar VARIANT_DELIM: the variant delimiter
877 "supported_variants",
878 "supported_parameters",
884 def SplitNameVariant(cls, name):
885 """Splits the name into the proper name and variant.
887 @param name: the OS (unprocessed) name
889 @return: a list of two elements; if the original name didn't
890 contain a variant, it's returned as an empty string
893 nv = name.split(cls.VARIANT_DELIM, 1)
899 def GetName(cls, name):
900 """Returns the proper name of the os (without the variant).
902 @param name: the OS (unprocessed) name
905 return cls.SplitNameVariant(name)[0]
908 def GetVariant(cls, name):
909 """Returns the variant the os (without the base name).
911 @param name: the OS (unprocessed) name
914 return cls.SplitNameVariant(name)[1]
917 class Node(TaggableObject):
918 """Config object representing a node."""
927 ] + _TIMESTAMPS + _UUID
930 class Cluster(TaggableObject):
931 """Config object representing the cluster."""
940 "drbd_usermode_helper",
942 "default_hypervisor",
948 "enabled_hypervisors",
954 "candidate_pool_size",
957 "maintain_node_health",
959 "default_iallocator",
962 ] + _TIMESTAMPS + _UUID
964 def UpgradeConfig(self):
965 """Fill defaults for missing configuration values.
968 # pylint: disable-msg=E0203
969 # because these are "defined" via slots, not manually
970 if self.hvparams is None:
971 self.hvparams = constants.HVC_DEFAULTS
973 for hypervisor in self.hvparams:
974 self.hvparams[hypervisor] = FillDict(
975 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
977 if self.os_hvp is None:
980 # osparams added before 2.2
981 if self.osparams is None:
984 self.beparams = UpgradeGroupedParams(self.beparams,
985 constants.BEC_DEFAULTS)
986 migrate_default_bridge = not self.nicparams
987 self.nicparams = UpgradeGroupedParams(self.nicparams,
988 constants.NICC_DEFAULTS)
989 if migrate_default_bridge:
990 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
993 if self.modify_etc_hosts is None:
994 self.modify_etc_hosts = True
996 if self.modify_ssh_setup is None:
997 self.modify_ssh_setup = True
999 # default_bridge is no longer used it 2.1. The slot is left there to
1000 # support auto-upgrading. It can be removed once we decide to deprecate
1001 # upgrading straight from 2.0.
1002 if self.default_bridge is not None:
1003 self.default_bridge = None
1005 # default_hypervisor is just the first enabled one in 2.1. This slot and
1006 # code can be removed once upgrading straight from 2.0 is deprecated.
1007 if self.default_hypervisor is not None:
1008 self.enabled_hypervisors = ([self.default_hypervisor] +
1009 [hvname for hvname in self.enabled_hypervisors
1010 if hvname != self.default_hypervisor])
1011 self.default_hypervisor = None
1013 # maintain_node_health added after 2.1.1
1014 if self.maintain_node_health is None:
1015 self.maintain_node_health = False
1017 if self.uid_pool is None:
1020 if self.default_iallocator is None:
1021 self.default_iallocator = ""
1023 # reserved_lvs added before 2.2
1024 if self.reserved_lvs is None:
1025 self.reserved_lvs = []
1027 # hidden and blacklisted operating systems added before 2.2.1
1028 if self.hidden_os is None:
1031 if self.blacklisted_os is None:
1032 self.blacklisted_os = []
1035 """Custom function for cluster.
1038 mydict = super(Cluster, self).ToDict()
1039 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1043 def FromDict(cls, val):
1044 """Custom function for cluster.
1047 obj = super(Cluster, cls).FromDict(val)
1048 if not isinstance(obj.tcpudp_port_pool, set):
1049 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1052 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1053 """Get the default hypervisor parameters for the cluster.
1055 @param hypervisor: the hypervisor name
1056 @param os_name: if specified, we'll also update the defaults for this OS
1057 @param skip_keys: if passed, list of keys not to use
1058 @return: the defaults dict
1061 if skip_keys is None:
1064 fill_stack = [self.hvparams.get(hypervisor, {})]
1065 if os_name is not None:
1066 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1067 fill_stack.append(os_hvp)
1070 for o_dict in fill_stack:
1071 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1075 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1076 """Fill a given hvparams dict with cluster defaults.
1078 @type hv_name: string
1079 @param hv_name: the hypervisor to use
1080 @type os_name: string
1081 @param os_name: the OS to use for overriding the hypervisor defaults
1082 @type skip_globals: boolean
1083 @param skip_globals: if True, the global hypervisor parameters will
1086 @return: a copy of the given hvparams with missing keys filled from
1087 the cluster defaults
1091 skip_keys = constants.HVC_GLOBALS
1095 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1096 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1098 def FillHV(self, instance, skip_globals=False):
1099 """Fill an instance's hvparams dict with cluster defaults.
1101 @type instance: L{objects.Instance}
1102 @param instance: the instance parameter to fill
1103 @type skip_globals: boolean
1104 @param skip_globals: if True, the global hypervisor parameters will
1107 @return: a copy of the instance's hvparams with missing keys filled from
1108 the cluster defaults
1111 return self.SimpleFillHV(instance.hypervisor, instance.os,
1112 instance.hvparams, skip_globals)
1114 def SimpleFillBE(self, beparams):
1115 """Fill a given beparams dict with cluster defaults.
1117 @type beparams: dict
1118 @param beparams: the dict to fill
1120 @return: a copy of the passed in beparams with missing keys filled
1121 from the cluster defaults
1124 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1126 def FillBE(self, instance):
1127 """Fill an instance's beparams dict with cluster defaults.
1129 @type instance: L{objects.Instance}
1130 @param instance: the instance parameter to fill
1132 @return: a copy of the instance's beparams with missing keys filled from
1133 the cluster defaults
1136 return self.SimpleFillBE(instance.beparams)
1138 def SimpleFillNIC(self, nicparams):
1139 """Fill a given nicparams dict with cluster defaults.
1141 @type nicparams: dict
1142 @param nicparams: the dict to fill
1144 @return: a copy of the passed in nicparams with missing keys filled
1145 from the cluster defaults
1148 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1150 def SimpleFillOS(self, os_name, os_params):
1151 """Fill an instance's osparams dict with cluster defaults.
1153 @type os_name: string
1154 @param os_name: the OS name to use
1155 @type os_params: dict
1156 @param os_params: the dict to fill with default values
1158 @return: a copy of the instance's osparams with missing keys filled from
1159 the cluster defaults
1162 name_only = os_name.split("+", 1)[0]
1164 result = self.osparams.get(name_only, {})
1166 result = FillDict(result, self.osparams.get(os_name, {}))
1168 return FillDict(result, os_params)
1171 class BlockDevStatus(ConfigObject):
1172 """Config object representing the status of a block device."""
1184 class ImportExportStatus(ConfigObject):
1185 """Config object representing the status of an import or export."""
1191 "progress_throughput",
1199 class ImportExportOptions(ConfigObject):
1200 """Options for import/export daemon
1202 @ivar key_name: X509 key name (None for cluster certificate)
1203 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1204 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1205 @ivar magic: Used to ensure the connection goes to the right disk
1216 class ConfdRequest(ConfigObject):
1217 """Object holding a confd request.
1219 @ivar protocol: confd protocol version
1220 @ivar type: confd query type
1221 @ivar query: query request
1222 @ivar rsalt: requested reply salt
1233 class ConfdReply(ConfigObject):
1234 """Object holding a confd reply.
1236 @ivar protocol: confd protocol version
1237 @ivar status: reply status code (ok, error)
1238 @ivar answer: confd query reply
1239 @ivar serial: configuration serial number
1250 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1251 """Simple wrapper over ConfigParse that allows serialization.
1253 This class is basically ConfigParser.SafeConfigParser with two
1254 additional methods that allow it to serialize/unserialize to/from a
1259 """Dump this instance and return the string representation."""
1262 return buf.getvalue()
1265 def Loads(cls, data):
1266 """Load data from a string."""
1267 buf = StringIO(data)