4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable=E0203,W0201,R0902
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
36 # R0902: Allow instances of these objects to have more than 20 attributes
42 from cStringIO import StringIO
44 from ganeti import errors
45 from ganeti import constants
46 from ganeti import netutils
48 from socket import AF_INET
51 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
52 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54 _TIMESTAMPS = ["ctime", "mtime"]
58 def FillDict(defaults_dict, custom_dict, skip_keys=None):
59 """Basic function to apply settings on top a default dict.
61 @type defaults_dict: dict
62 @param defaults_dict: dictionary holding the default values
63 @type custom_dict: dict
64 @param custom_dict: dictionary holding customized value
66 @param skip_keys: which keys not to fill
68 @return: dict with the 'full' values
71 ret_dict = copy.deepcopy(defaults_dict)
72 ret_dict.update(custom_dict)
82 def UpgradeGroupedParams(target, defaults):
83 """Update all groups for the target parameter.
85 @type target: dict of dicts
86 @param target: {group: {parameter: value}}
88 @param defaults: default parameter values
92 target = {constants.PP_DEFAULT: defaults}
95 target[group] = FillDict(defaults, target[group])
99 def UpgradeBeParams(target):
100 """Update the be parameters dict to the new format.
103 @param target: "be" parameters dict
106 if constants.BE_MEMORY in target:
107 memory = target[constants.BE_MEMORY]
108 target[constants.BE_MAXMEM] = memory
109 target[constants.BE_MINMEM] = memory
110 del target[constants.BE_MEMORY]
113 def UpgradeDiskParams(diskparams):
114 """Upgrade the disk parameters.
116 @type diskparams: dict
117 @param diskparams: disk parameters to upgrade
119 @return: the upgraded disk parameters dit
123 if diskparams is None:
124 result = constants.DISK_DT_DEFAULTS.copy()
126 # Update the disk parameter values for each disk template.
127 # The code iterates over constants.DISK_TEMPLATES because new templates
128 # might have been added.
129 for template in constants.DISK_TEMPLATES:
130 if template not in diskparams:
131 result[template] = constants.DISK_DT_DEFAULTS[template].copy()
133 result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
134 diskparams[template])
139 class ConfigObject(object):
140 """A generic config object.
142 It has the following properties:
144 - provides somewhat safe recursive unpickling and pickling for its classes
145 - unset attributes which are defined in slots are always returned
146 as None instead of raising an error
148 Classes derived from this must always declare __slots__ (we use many
149 config objects and the memory reduction is useful)
154 def __init__(self, **kwargs):
155 for k, v in kwargs.iteritems():
158 def __getattr__(self, name):
159 if name not in self._all_slots():
160 raise AttributeError("Invalid object attribute %s.%s" %
161 (type(self).__name__, name))
164 def __setstate__(self, state):
165 slots = self._all_slots()
168 setattr(self, name, state[name])
172 """Compute the list of all declared slots for a class.
176 for parent in cls.__mro__:
177 slots.extend(getattr(parent, "__slots__", []))
181 """Convert to a dict holding only standard python types.
183 The generic routine just dumps all of this object's attributes in
184 a dict. It does not work if the class has children who are
185 ConfigObjects themselves (e.g. the nics list in an Instance), in
186 which case the object should subclass the function in order to
187 make sure all objects returned are only standard python types.
191 for name in self._all_slots():
192 value = getattr(self, name, None)
193 if value is not None:
197 __getstate__ = ToDict
200 def FromDict(cls, val):
201 """Create an object from a dictionary.
203 This generic routine takes a dict, instantiates a new instance of
204 the given class, and sets attributes based on the dict content.
206 As for `ToDict`, this does not work if the class has children
207 who are ConfigObjects themselves (e.g. the nics list in an
208 Instance), in which case the object should subclass the function
209 and alter the objects.
212 if not isinstance(val, dict):
213 raise errors.ConfigurationError("Invalid object passed to FromDict:"
214 " expected dict, got %s" % type(val))
215 val_str = dict([(str(k), v) for k, v in val.iteritems()])
216 obj = cls(**val_str) # pylint: disable=W0142
220 def _ContainerToDicts(container):
221 """Convert the elements of a container to standard python types.
223 This method converts a container with elements derived from
224 ConfigData to standard python types. If the container is a dict,
225 we don't touch the keys, only the values.
228 if isinstance(container, dict):
229 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
230 elif isinstance(container, (list, tuple, set, frozenset)):
231 ret = [elem.ToDict() for elem in container]
233 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
238 def _ContainerFromDicts(source, c_type, e_type):
239 """Convert a container from standard python types.
241 This method converts a container with standard python types to
242 ConfigData objects. If the container is a dict, we don't touch the
243 keys, only the values.
246 if not isinstance(c_type, type):
247 raise TypeError("Container type %s passed to _ContainerFromDicts is"
248 " not a type" % type(c_type))
252 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
253 elif c_type in (list, tuple, set, frozenset):
254 ret = c_type([e_type.FromDict(elem) for elem in source])
256 raise TypeError("Invalid container type %s passed to"
257 " _ContainerFromDicts" % c_type)
261 """Makes a deep copy of the current object and its children.
264 dict_form = self.ToDict()
265 clone_obj = self.__class__.FromDict(dict_form)
269 """Implement __repr__ for ConfigObjects."""
270 return repr(self.ToDict())
272 def UpgradeConfig(self):
273 """Fill defaults for missing configuration values.
275 This method will be called at configuration load time, and its
276 implementation will be object dependent.
282 class TaggableObject(ConfigObject):
283 """An generic class supporting tags.
287 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
290 def ValidateTag(cls, tag):
291 """Check if a tag is valid.
293 If the tag is invalid, an errors.TagError will be raised. The
294 function has no return value.
297 if not isinstance(tag, basestring):
298 raise errors.TagError("Invalid tag type (not a string)")
299 if len(tag) > constants.MAX_TAG_LEN:
300 raise errors.TagError("Tag too long (>%d characters)" %
301 constants.MAX_TAG_LEN)
303 raise errors.TagError("Tags cannot be empty")
304 if not cls.VALID_TAG_RE.match(tag):
305 raise errors.TagError("Tag contains invalid characters")
308 """Return the tags list.
311 tags = getattr(self, "tags", None)
313 tags = self.tags = set()
316 def AddTag(self, tag):
320 self.ValidateTag(tag)
321 tags = self.GetTags()
322 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
323 raise errors.TagError("Too many tags")
324 self.GetTags().add(tag)
326 def RemoveTag(self, tag):
330 self.ValidateTag(tag)
331 tags = self.GetTags()
335 raise errors.TagError("Tag not found")
338 """Taggable-object-specific conversion to standard python types.
340 This replaces the tags set with a list.
343 bo = super(TaggableObject, self).ToDict()
345 tags = bo.get("tags", None)
346 if isinstance(tags, set):
347 bo["tags"] = list(tags)
351 def FromDict(cls, val):
352 """Custom function for instances.
355 obj = super(TaggableObject, cls).FromDict(val)
356 if hasattr(obj, "tags") and isinstance(obj.tags, list):
357 obj.tags = set(obj.tags)
361 class MasterNetworkParameters(ConfigObject):
362 """Network configuration parameters for the master
364 @ivar name: master name
366 @ivar netmask: master netmask
367 @ivar netdev: master network device
368 @ivar ip_family: master IP family
380 class ConfigData(ConfigObject):
381 """Top-level config object."""
392 """Custom function for top-level config data.
394 This just replaces the list of instances, nodes and the cluster
395 with standard python types.
398 mydict = super(ConfigData, self).ToDict()
399 mydict["cluster"] = mydict["cluster"].ToDict()
400 for key in "nodes", "instances", "nodegroups":
401 mydict[key] = self._ContainerToDicts(mydict[key])
406 def FromDict(cls, val):
407 """Custom function for top-level config data
410 obj = super(ConfigData, cls).FromDict(val)
411 obj.cluster = Cluster.FromDict(obj.cluster)
412 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
413 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
414 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
417 def HasAnyDiskOfType(self, dev_type):
418 """Check if in there is at disk of the given type in the configuration.
420 @type dev_type: L{constants.LDS_BLOCK}
421 @param dev_type: the type to look for
423 @return: boolean indicating if a disk of the given type was found or not
426 for instance in self.instances.values():
427 for disk in instance.disks:
428 if disk.IsBasedOnDiskType(dev_type):
432 def UpgradeConfig(self):
433 """Fill defaults for missing configuration values.
436 self.cluster.UpgradeConfig()
437 for node in self.nodes.values():
439 for instance in self.instances.values():
440 instance.UpgradeConfig()
441 if self.nodegroups is None:
443 for nodegroup in self.nodegroups.values():
444 nodegroup.UpgradeConfig()
445 if self.cluster.drbd_usermode_helper is None:
446 # To decide if we set an helper let's check if at least one instance has
447 # a DRBD disk. This does not cover all the possible scenarios but it
448 # gives a good approximation.
449 if self.HasAnyDiskOfType(constants.LD_DRBD8):
450 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
453 class NIC(ConfigObject):
454 """Config object representing a network card."""
455 __slots__ = ["mac", "ip", "nicparams"]
458 def CheckParameterSyntax(cls, nicparams):
459 """Check the given parameters for validity.
461 @type nicparams: dict
462 @param nicparams: dictionary with parameter names/value
463 @raise errors.ConfigurationError: when a parameter is not valid
466 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
467 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
468 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
469 raise errors.ConfigurationError(err)
471 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
472 not nicparams[constants.NIC_LINK]):
473 err = "Missing bridged nic link"
474 raise errors.ConfigurationError(err)
477 class Disk(ConfigObject):
478 """Config object representing a block device."""
479 __slots__ = ["dev_type", "logical_id", "physical_id",
480 "children", "iv_name", "size", "mode", "params"]
482 def CreateOnSecondary(self):
483 """Test if this device needs to be created on a secondary node."""
484 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
486 def AssembleOnSecondary(self):
487 """Test if this device needs to be assembled on a secondary node."""
488 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
490 def OpenOnSecondary(self):
491 """Test if this device needs to be opened on a secondary node."""
492 return self.dev_type in (constants.LD_LV,)
494 def StaticDevPath(self):
495 """Return the device path if this device type has a static one.
497 Some devices (LVM for example) live always at the same /dev/ path,
498 irrespective of their status. For such devices, we return this
499 path, for others we return None.
501 @warning: The path returned is not a normalized pathname; callers
502 should check that it is a valid path.
505 if self.dev_type == constants.LD_LV:
506 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
507 elif self.dev_type == constants.LD_BLOCKDEV:
508 return self.logical_id[1]
511 def ChildrenNeeded(self):
512 """Compute the needed number of children for activation.
514 This method will return either -1 (all children) or a positive
515 number denoting the minimum number of children needed for
516 activation (only mirrored devices will usually return >=0).
518 Currently, only DRBD8 supports diskless activation (therefore we
519 return 0), for all other we keep the previous semantics and return
523 if self.dev_type == constants.LD_DRBD8:
527 def IsBasedOnDiskType(self, dev_type):
528 """Check if the disk or its children are based on the given type.
530 @type dev_type: L{constants.LDS_BLOCK}
531 @param dev_type: the type to look for
533 @return: boolean indicating if a device of the given type was found or not
537 for child in self.children:
538 if child.IsBasedOnDiskType(dev_type):
540 return self.dev_type == dev_type
542 def GetNodes(self, node):
543 """This function returns the nodes this device lives on.
545 Given the node on which the parent of the device lives on (or, in
546 case of a top-level device, the primary node of the devices'
547 instance), this function will return a list of nodes on which this
548 devices needs to (or can) be assembled.
551 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
552 constants.LD_BLOCKDEV]:
554 elif self.dev_type in constants.LDS_DRBD:
555 result = [self.logical_id[0], self.logical_id[1]]
556 if node not in result:
557 raise errors.ConfigurationError("DRBD device passed unknown node")
559 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
562 def ComputeNodeTree(self, parent_node):
563 """Compute the node/disk tree for this disk and its children.
565 This method, given the node on which the parent disk lives, will
566 return the list of all (node, disk) pairs which describe the disk
567 tree in the most compact way. For example, a drbd/lvm stack
568 will be returned as (primary_node, drbd) and (secondary_node, drbd)
569 which represents all the top-level devices on the nodes.
572 my_nodes = self.GetNodes(parent_node)
573 result = [(node, self) for node in my_nodes]
574 if not self.children:
577 for node in my_nodes:
578 for child in self.children:
579 child_result = child.ComputeNodeTree(node)
580 if len(child_result) == 1:
581 # child (and all its descendants) is simple, doesn't split
582 # over multiple hosts, so we don't need to describe it, our
583 # own entry for this node describes it completely
586 # check if child nodes differ from my nodes; note that
587 # subdisk can differ from the child itself, and be instead
588 # one of its descendants
589 for subnode, subdisk in child_result:
590 if subnode not in my_nodes:
591 result.append((subnode, subdisk))
592 # otherwise child is under our own node, so we ignore this
593 # entry (but probably the other results in the list will
597 def ComputeGrowth(self, amount):
598 """Compute the per-VG growth requirements.
600 This only works for VG-based disks.
602 @type amount: integer
603 @param amount: the desired increase in (user-visible) disk space
605 @return: a dictionary of volume-groups and the required size
608 if self.dev_type == constants.LD_LV:
609 return {self.logical_id[0]: amount}
610 elif self.dev_type == constants.LD_DRBD8:
612 return self.children[0].ComputeGrowth(amount)
616 # Other disk types do not require VG space
619 def RecordGrow(self, amount):
620 """Update the size of this disk after growth.
622 This method recurses over the disks's children and updates their
623 size correspondigly. The method needs to be kept in sync with the
624 actual algorithms from bdev.
627 if self.dev_type in (constants.LD_LV, constants.LD_FILE):
629 elif self.dev_type == constants.LD_DRBD8:
631 self.children[0].RecordGrow(amount)
634 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
635 " disk type %s" % self.dev_type)
638 """Sets recursively the size to zero for the disk and its children.
642 for child in self.children:
646 def SetPhysicalID(self, target_node, nodes_ip):
647 """Convert the logical ID to the physical ID.
649 This is used only for drbd, which needs ip/port configuration.
651 The routine descends down and updates its children also, because
652 this helps when the only the top device is passed to the remote
656 - target_node: the node we wish to configure for
657 - nodes_ip: a mapping of node name to ip
659 The target_node must exist in in nodes_ip, and must be one of the
660 nodes in the logical ID for each of the DRBD devices encountered
665 for child in self.children:
666 child.SetPhysicalID(target_node, nodes_ip)
668 if self.logical_id is None and self.physical_id is not None:
670 if self.dev_type in constants.LDS_DRBD:
671 pnode, snode, port, pminor, sminor, secret = self.logical_id
672 if target_node not in (pnode, snode):
673 raise errors.ConfigurationError("DRBD device not knowing node %s" %
675 pnode_ip = nodes_ip.get(pnode, None)
676 snode_ip = nodes_ip.get(snode, None)
677 if pnode_ip is None or snode_ip is None:
678 raise errors.ConfigurationError("Can't find primary or secondary node"
679 " for %s" % str(self))
680 p_data = (pnode_ip, port)
681 s_data = (snode_ip, port)
682 if pnode == target_node:
683 self.physical_id = p_data + s_data + (pminor, secret)
684 else: # it must be secondary, we tested above
685 self.physical_id = s_data + p_data + (sminor, secret)
687 self.physical_id = self.logical_id
691 """Disk-specific conversion to standard python types.
693 This replaces the children lists of objects with lists of
694 standard python types.
697 bo = super(Disk, self).ToDict()
699 for attr in ("children",):
700 alist = bo.get(attr, None)
702 bo[attr] = self._ContainerToDicts(alist)
706 def FromDict(cls, val):
707 """Custom function for Disks
710 obj = super(Disk, cls).FromDict(val)
712 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
713 if obj.logical_id and isinstance(obj.logical_id, list):
714 obj.logical_id = tuple(obj.logical_id)
715 if obj.physical_id and isinstance(obj.physical_id, list):
716 obj.physical_id = tuple(obj.physical_id)
717 if obj.dev_type in constants.LDS_DRBD:
718 # we need a tuple of length six here
719 if len(obj.logical_id) < 6:
720 obj.logical_id += (None,) * (6 - len(obj.logical_id))
724 """Custom str() formatter for disks.
727 if self.dev_type == constants.LD_LV:
728 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
729 elif self.dev_type in constants.LDS_DRBD:
730 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
732 if self.physical_id is None:
735 phy = ("configured as %s:%s %s:%s" %
736 (self.physical_id[0], self.physical_id[1],
737 self.physical_id[2], self.physical_id[3]))
739 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
740 (node_a, minor_a, node_b, minor_b, port, phy))
741 if self.children and self.children.count(None) == 0:
742 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
744 val += "no local storage"
746 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
747 (self.dev_type, self.logical_id, self.physical_id, self.children))
748 if self.iv_name is None:
749 val += ", not visible"
751 val += ", visible as /dev/%s" % self.iv_name
752 if isinstance(self.size, int):
753 val += ", size=%dm)>" % self.size
755 val += ", size='%s')>" % (self.size,)
759 """Checks that this disk is correctly configured.
763 if self.mode not in constants.DISK_ACCESS_SET:
764 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
767 def UpgradeConfig(self):
768 """Fill defaults for missing configuration values.
772 for child in self.children:
773 child.UpgradeConfig()
776 self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
778 self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
780 # add here config upgrade for this disk
783 class Instance(TaggableObject):
784 """Config object representing an instance."""
799 ] + _TIMESTAMPS + _UUID
801 def _ComputeSecondaryNodes(self):
802 """Compute the list of secondary nodes.
804 This is a simple wrapper over _ComputeAllNodes.
807 all_nodes = set(self._ComputeAllNodes())
808 all_nodes.discard(self.primary_node)
809 return tuple(all_nodes)
811 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
812 "List of secondary nodes")
814 def _ComputeAllNodes(self):
815 """Compute the list of all nodes.
817 Since the data is already there (in the drbd disks), keeping it as
818 a separate normal attribute is redundant and if not properly
819 synchronised can cause problems. Thus it's better to compute it
823 def _Helper(nodes, device):
824 """Recursively computes nodes given a top device."""
825 if device.dev_type in constants.LDS_DRBD:
826 nodea, nodeb = device.logical_id[:2]
830 for child in device.children:
831 _Helper(nodes, child)
834 all_nodes.add(self.primary_node)
835 for device in self.disks:
836 _Helper(all_nodes, device)
837 return tuple(all_nodes)
839 all_nodes = property(_ComputeAllNodes, None, None,
840 "List of all nodes of the instance")
842 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
843 """Provide a mapping of nodes to LVs this instance owns.
845 This function figures out what logical volumes should belong on
846 which nodes, recursing through a device tree.
848 @param lvmap: optional dictionary to receive the
849 'node' : ['lv', ...] data.
851 @return: None if lvmap arg is given, otherwise, a dictionary of
852 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
853 volumeN is of the form "vg_name/lv_name", compatible with
858 node = self.primary_node
866 if not node in lvmap:
874 if dev.dev_type == constants.LD_LV:
875 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
877 elif dev.dev_type in constants.LDS_DRBD:
879 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
880 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
883 self.MapLVsByNode(lvmap, dev.children, node)
887 def FindDisk(self, idx):
888 """Find a disk given having a specified index.
890 This is just a wrapper that does validation of the index.
893 @param idx: the disk index
895 @return: the corresponding disk
896 @raise errors.OpPrereqError: when the given index is not valid
901 return self.disks[idx]
902 except (TypeError, ValueError), err:
903 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
906 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
907 " 0 to %d" % (idx, len(self.disks) - 1),
911 """Instance-specific conversion to standard python types.
913 This replaces the children lists of objects with lists of standard
917 bo = super(Instance, self).ToDict()
919 for attr in "nics", "disks":
920 alist = bo.get(attr, None)
922 nlist = self._ContainerToDicts(alist)
929 def FromDict(cls, val):
930 """Custom function for instances.
933 if "admin_state" not in val:
934 if val.get("admin_up", False):
935 val["admin_state"] = constants.ADMINST_UP
937 val["admin_state"] = constants.ADMINST_DOWN
938 if "admin_up" in val:
940 obj = super(Instance, cls).FromDict(val)
941 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
942 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
945 def UpgradeConfig(self):
946 """Fill defaults for missing configuration values.
949 for nic in self.nics:
951 for disk in self.disks:
954 for key in constants.HVC_GLOBALS:
956 del self.hvparams[key]
959 if self.osparams is None:
961 UpgradeBeParams(self.beparams)
964 class OS(ConfigObject):
965 """Config object representing an operating system.
967 @type supported_parameters: list
968 @ivar supported_parameters: a list of tuples, name and description,
969 containing the supported parameters by this OS
971 @type VARIANT_DELIM: string
972 @cvar VARIANT_DELIM: the variant delimiter
984 "supported_variants",
985 "supported_parameters",
991 def SplitNameVariant(cls, name):
992 """Splits the name into the proper name and variant.
994 @param name: the OS (unprocessed) name
996 @return: a list of two elements; if the original name didn't
997 contain a variant, it's returned as an empty string
1000 nv = name.split(cls.VARIANT_DELIM, 1)
1006 def GetName(cls, name):
1007 """Returns the proper name of the os (without the variant).
1009 @param name: the OS (unprocessed) name
1012 return cls.SplitNameVariant(name)[0]
1015 def GetVariant(cls, name):
1016 """Returns the variant the os (without the base name).
1018 @param name: the OS (unprocessed) name
1021 return cls.SplitNameVariant(name)[1]
1024 class NodeHvState(ConfigObject):
1025 """Hypvervisor state on a node.
1027 @ivar mem_total: Total amount of memory
1028 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1030 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1032 @ivar mem_inst: Memory used by instances living on node
1033 @ivar cpu_total: Total node CPU core count
1034 @ivar cpu_node: Number of CPU cores reserved for the node itself
1047 class NodeDiskState(ConfigObject):
1048 """Disk state on a node.
1058 class Node(TaggableObject):
1059 """Config object representing a node.
1061 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1062 @ivar hv_state_static: Hypervisor state overriden by user
1063 @ivar disk_state: Disk state (e.g. free space)
1064 @ivar disk_state_static: Disk state overriden by user
1083 "disk_state_static",
1084 ] + _TIMESTAMPS + _UUID
1086 def UpgradeConfig(self):
1087 """Fill defaults for missing configuration values.
1090 # pylint: disable=E0203
1091 # because these are "defined" via slots, not manually
1092 if self.master_capable is None:
1093 self.master_capable = True
1095 if self.vm_capable is None:
1096 self.vm_capable = True
1098 if self.ndparams is None:
1101 if self.powered is None:
1105 """Custom function for serializing.
1108 data = super(Node, self).ToDict()
1110 hv_state = data.get("hv_state", None)
1111 if hv_state is not None:
1112 data["hv_state"] = self._ContainerToDicts(hv_state)
1114 disk_state = data.get("disk_state", None)
1115 if disk_state is not None:
1116 data["disk_state"] = \
1117 dict((key, self._ContainerToDicts(value))
1118 for (key, value) in disk_state.items())
1123 def FromDict(cls, val):
1124 """Custom function for deserializing.
1127 obj = super(Node, cls).FromDict(val)
1129 if obj.hv_state is not None:
1130 obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1132 if obj.disk_state is not None:
1134 dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
1135 for (key, value) in obj.disk_state.items())
1140 class NodeGroup(TaggableObject):
1141 """Config object representing a node group."""
1149 ] + _TIMESTAMPS + _UUID
1152 """Custom function for nodegroup.
1154 This discards the members object, which gets recalculated and is only kept
1158 mydict = super(NodeGroup, self).ToDict()
1159 del mydict["members"]
1163 def FromDict(cls, val):
1164 """Custom function for nodegroup.
1166 The members slot is initialized to an empty list, upon deserialization.
1169 obj = super(NodeGroup, cls).FromDict(val)
1173 def UpgradeConfig(self):
1174 """Fill defaults for missing configuration values.
1177 if self.ndparams is None:
1180 if self.serial_no is None:
1183 if self.alloc_policy is None:
1184 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1186 # We only update mtime, and not ctime, since we would not be able to provide
1187 # a correct value for creation time.
1188 if self.mtime is None:
1189 self.mtime = time.time()
1191 self.diskparams = UpgradeDiskParams(self.diskparams)
1193 def FillND(self, node):
1194 """Return filled out ndparams for L{objects.Node}
1196 @type node: L{objects.Node}
1197 @param node: A Node object to fill
1198 @return a copy of the node's ndparams with defaults filled
1201 return self.SimpleFillND(node.ndparams)
1203 def SimpleFillND(self, ndparams):
1204 """Fill a given ndparams dict with defaults.
1206 @type ndparams: dict
1207 @param ndparams: the dict to fill
1209 @return: a copy of the passed in ndparams with missing keys filled
1210 from the node group defaults
1213 return FillDict(self.ndparams, ndparams)
1216 class Cluster(TaggableObject):
1217 """Config object representing the cluster."""
1221 "highest_used_port",
1224 "volume_group_name",
1226 "drbd_usermode_helper",
1228 "default_hypervisor",
1233 "use_external_mip_script",
1236 "shared_file_storage_dir",
1237 "enabled_hypervisors",
1245 "candidate_pool_size",
1248 "maintain_node_health",
1250 "default_iallocator",
1253 "primary_ip_family",
1254 "prealloc_wipe_disks",
1255 ] + _TIMESTAMPS + _UUID
1257 def UpgradeConfig(self):
1258 """Fill defaults for missing configuration values.
1261 # pylint: disable=E0203
1262 # because these are "defined" via slots, not manually
1263 if self.hvparams is None:
1264 self.hvparams = constants.HVC_DEFAULTS
1266 for hypervisor in self.hvparams:
1267 self.hvparams[hypervisor] = FillDict(
1268 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1270 if self.os_hvp is None:
1273 # osparams added before 2.2
1274 if self.osparams is None:
1277 if self.ndparams is None:
1278 self.ndparams = constants.NDC_DEFAULTS
1280 self.beparams = UpgradeGroupedParams(self.beparams,
1281 constants.BEC_DEFAULTS)
1282 for beparams_group in self.beparams:
1283 UpgradeBeParams(self.beparams[beparams_group])
1285 migrate_default_bridge = not self.nicparams
1286 self.nicparams = UpgradeGroupedParams(self.nicparams,
1287 constants.NICC_DEFAULTS)
1288 if migrate_default_bridge:
1289 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1292 if self.modify_etc_hosts is None:
1293 self.modify_etc_hosts = True
1295 if self.modify_ssh_setup is None:
1296 self.modify_ssh_setup = True
1298 # default_bridge is no longer used in 2.1. The slot is left there to
1299 # support auto-upgrading. It can be removed once we decide to deprecate
1300 # upgrading straight from 2.0.
1301 if self.default_bridge is not None:
1302 self.default_bridge = None
1304 # default_hypervisor is just the first enabled one in 2.1. This slot and
1305 # code can be removed once upgrading straight from 2.0 is deprecated.
1306 if self.default_hypervisor is not None:
1307 self.enabled_hypervisors = ([self.default_hypervisor] +
1308 [hvname for hvname in self.enabled_hypervisors
1309 if hvname != self.default_hypervisor])
1310 self.default_hypervisor = None
1312 # maintain_node_health added after 2.1.1
1313 if self.maintain_node_health is None:
1314 self.maintain_node_health = False
1316 if self.uid_pool is None:
1319 if self.default_iallocator is None:
1320 self.default_iallocator = ""
1322 # reserved_lvs added before 2.2
1323 if self.reserved_lvs is None:
1324 self.reserved_lvs = []
1326 # hidden and blacklisted operating systems added before 2.2.1
1327 if self.hidden_os is None:
1330 if self.blacklisted_os is None:
1331 self.blacklisted_os = []
1333 # primary_ip_family added before 2.3
1334 if self.primary_ip_family is None:
1335 self.primary_ip_family = AF_INET
1337 if self.master_netmask is None:
1338 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1339 self.master_netmask = ipcls.iplen
1341 if self.prealloc_wipe_disks is None:
1342 self.prealloc_wipe_disks = False
1344 # shared_file_storage_dir added before 2.5
1345 if self.shared_file_storage_dir is None:
1346 self.shared_file_storage_dir = ""
1348 if self.use_external_mip_script is None:
1349 self.use_external_mip_script = False
1351 self.diskparams = UpgradeDiskParams(self.diskparams)
1354 def primary_hypervisor(self):
1355 """The first hypervisor is the primary.
1357 Useful, for example, for L{Node}'s hv/disk state.
1360 return self.enabled_hypervisors[0]
1363 """Custom function for cluster.
1366 mydict = super(Cluster, self).ToDict()
1367 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1371 def FromDict(cls, val):
1372 """Custom function for cluster.
1375 obj = super(Cluster, cls).FromDict(val)
1376 if not isinstance(obj.tcpudp_port_pool, set):
1377 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1380 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1381 """Get the default hypervisor parameters for the cluster.
1383 @param hypervisor: the hypervisor name
1384 @param os_name: if specified, we'll also update the defaults for this OS
1385 @param skip_keys: if passed, list of keys not to use
1386 @return: the defaults dict
1389 if skip_keys is None:
1392 fill_stack = [self.hvparams.get(hypervisor, {})]
1393 if os_name is not None:
1394 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1395 fill_stack.append(os_hvp)
1398 for o_dict in fill_stack:
1399 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1403 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1404 """Fill a given hvparams dict with cluster defaults.
1406 @type hv_name: string
1407 @param hv_name: the hypervisor to use
1408 @type os_name: string
1409 @param os_name: the OS to use for overriding the hypervisor defaults
1410 @type skip_globals: boolean
1411 @param skip_globals: if True, the global hypervisor parameters will
1414 @return: a copy of the given hvparams with missing keys filled from
1415 the cluster defaults
1419 skip_keys = constants.HVC_GLOBALS
1423 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1424 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1426 def FillHV(self, instance, skip_globals=False):
1427 """Fill an instance's hvparams dict with cluster defaults.
1429 @type instance: L{objects.Instance}
1430 @param instance: the instance parameter to fill
1431 @type skip_globals: boolean
1432 @param skip_globals: if True, the global hypervisor parameters will
1435 @return: a copy of the instance's hvparams with missing keys filled from
1436 the cluster defaults
1439 return self.SimpleFillHV(instance.hypervisor, instance.os,
1440 instance.hvparams, skip_globals)
1442 def SimpleFillBE(self, beparams):
1443 """Fill a given beparams dict with cluster defaults.
1445 @type beparams: dict
1446 @param beparams: the dict to fill
1448 @return: a copy of the passed in beparams with missing keys filled
1449 from the cluster defaults
1452 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1454 def FillBE(self, instance):
1455 """Fill an instance's beparams dict with cluster defaults.
1457 @type instance: L{objects.Instance}
1458 @param instance: the instance parameter to fill
1460 @return: a copy of the instance's beparams with missing keys filled from
1461 the cluster defaults
1464 return self.SimpleFillBE(instance.beparams)
1466 def SimpleFillNIC(self, nicparams):
1467 """Fill a given nicparams dict with cluster defaults.
1469 @type nicparams: dict
1470 @param nicparams: the dict to fill
1472 @return: a copy of the passed in nicparams with missing keys filled
1473 from the cluster defaults
1476 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1478 def SimpleFillOS(self, os_name, os_params):
1479 """Fill an instance's osparams dict with cluster defaults.
1481 @type os_name: string
1482 @param os_name: the OS name to use
1483 @type os_params: dict
1484 @param os_params: the dict to fill with default values
1486 @return: a copy of the instance's osparams with missing keys filled from
1487 the cluster defaults
1490 name_only = os_name.split("+", 1)[0]
1492 result = self.osparams.get(name_only, {})
1494 result = FillDict(result, self.osparams.get(os_name, {}))
1496 return FillDict(result, os_params)
1498 def FillND(self, node, nodegroup):
1499 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1501 @type node: L{objects.Node}
1502 @param node: A Node object to fill
1503 @type nodegroup: L{objects.NodeGroup}
1504 @param nodegroup: A Node object to fill
1505 @return a copy of the node's ndparams with defaults filled
1508 return self.SimpleFillND(nodegroup.FillND(node))
1510 def SimpleFillND(self, ndparams):
1511 """Fill a given ndparams dict with defaults.
1513 @type ndparams: dict
1514 @param ndparams: the dict to fill
1516 @return: a copy of the passed in ndparams with missing keys filled
1517 from the cluster defaults
1520 return FillDict(self.ndparams, ndparams)
1523 class BlockDevStatus(ConfigObject):
1524 """Config object representing the status of a block device."""
1536 class ImportExportStatus(ConfigObject):
1537 """Config object representing the status of an import or export."""
1543 "progress_throughput",
1551 class ImportExportOptions(ConfigObject):
1552 """Options for import/export daemon
1554 @ivar key_name: X509 key name (None for cluster certificate)
1555 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1556 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1557 @ivar magic: Used to ensure the connection goes to the right disk
1558 @ivar ipv6: Whether to use IPv6
1559 @ivar connect_timeout: Number of seconds for establishing connection
1572 class ConfdRequest(ConfigObject):
1573 """Object holding a confd request.
1575 @ivar protocol: confd protocol version
1576 @ivar type: confd query type
1577 @ivar query: query request
1578 @ivar rsalt: requested reply salt
1589 class ConfdReply(ConfigObject):
1590 """Object holding a confd reply.
1592 @ivar protocol: confd protocol version
1593 @ivar status: reply status code (ok, error)
1594 @ivar answer: confd query reply
1595 @ivar serial: configuration serial number
1606 class QueryFieldDefinition(ConfigObject):
1607 """Object holding a query field definition.
1609 @ivar name: Field name
1610 @ivar title: Human-readable title
1611 @ivar kind: Field type
1612 @ivar doc: Human-readable description
1623 class _QueryResponseBase(ConfigObject):
1629 """Custom function for serializing.
1632 mydict = super(_QueryResponseBase, self).ToDict()
1633 mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1637 def FromDict(cls, val):
1638 """Custom function for de-serializing.
1641 obj = super(_QueryResponseBase, cls).FromDict(val)
1642 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1646 class QueryRequest(ConfigObject):
1647 """Object holding a query request.
1657 class QueryResponse(_QueryResponseBase):
1658 """Object holding the response to a query.
1660 @ivar fields: List of L{QueryFieldDefinition} objects
1661 @ivar data: Requested data
1669 class QueryFieldsRequest(ConfigObject):
1670 """Object holding a request for querying available fields.
1679 class QueryFieldsResponse(_QueryResponseBase):
1680 """Object holding the response to a query for fields.
1682 @ivar fields: List of L{QueryFieldDefinition} objects
1689 class MigrationStatus(ConfigObject):
1690 """Object holding the status of a migration.
1700 class InstanceConsole(ConfigObject):
1701 """Object describing how to access the console of an instance.
1716 """Validates contents of this object.
1719 assert self.kind in constants.CONS_ALL, "Unknown console type"
1720 assert self.instance, "Missing instance name"
1721 assert self.message or self.kind in [constants.CONS_SSH,
1722 constants.CONS_SPICE,
1724 assert self.host or self.kind == constants.CONS_MESSAGE
1725 assert self.port or self.kind in [constants.CONS_MESSAGE,
1727 assert self.user or self.kind in [constants.CONS_MESSAGE,
1728 constants.CONS_SPICE,
1730 assert self.command or self.kind in [constants.CONS_MESSAGE,
1731 constants.CONS_SPICE,
1733 assert self.display or self.kind in [constants.CONS_MESSAGE,
1734 constants.CONS_SPICE,
1739 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1740 """Simple wrapper over ConfigParse that allows serialization.
1742 This class is basically ConfigParser.SafeConfigParser with two
1743 additional methods that allow it to serialize/unserialize to/from a
1748 """Dump this instance and return the string representation."""
1751 return buf.getvalue()
1754 def Loads(cls, data):
1755 """Load data from a string."""
1756 buf = StringIO(data)