4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable=E0203,W0201,R0902
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
36 # R0902: Allow instances of these objects to have more than 20 attributes
42 from cStringIO import StringIO
44 from ganeti import errors
45 from ganeti import constants
46 from ganeti import netutils
48 from socket import AF_INET
51 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
52 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
54 _TIMESTAMPS = ["ctime", "mtime"]
58 def FillDict(defaults_dict, custom_dict, skip_keys=None):
59 """Basic function to apply settings on top a default dict.
61 @type defaults_dict: dict
62 @param defaults_dict: dictionary holding the default values
63 @type custom_dict: dict
64 @param custom_dict: dictionary holding customized value
66 @param skip_keys: which keys not to fill
68 @return: dict with the 'full' values
71 ret_dict = copy.deepcopy(defaults_dict)
72 ret_dict.update(custom_dict)
82 def UpgradeGroupedParams(target, defaults):
83 """Update all groups for the target parameter.
85 @type target: dict of dicts
86 @param target: {group: {parameter: value}}
88 @param defaults: default parameter values
92 target = {constants.PP_DEFAULT: defaults}
95 target[group] = FillDict(defaults, target[group])
99 class ConfigObject(object):
100 """A generic config object.
102 It has the following properties:
104 - provides somewhat safe recursive unpickling and pickling for its classes
105 - unset attributes which are defined in slots are always returned
106 as None instead of raising an error
108 Classes derived from this must always declare __slots__ (we use many
109 config objects and the memory reduction is useful)
114 def __init__(self, **kwargs):
115 for k, v in kwargs.iteritems():
118 def __getattr__(self, name):
119 if name not in self._all_slots():
120 raise AttributeError("Invalid object attribute %s.%s" %
121 (type(self).__name__, name))
124 def __setstate__(self, state):
125 slots = self._all_slots()
128 setattr(self, name, state[name])
132 """Compute the list of all declared slots for a class.
136 for parent in cls.__mro__:
137 slots.extend(getattr(parent, "__slots__", []))
141 """Convert to a dict holding only standard python types.
143 The generic routine just dumps all of this object's attributes in
144 a dict. It does not work if the class has children who are
145 ConfigObjects themselves (e.g. the nics list in an Instance), in
146 which case the object should subclass the function in order to
147 make sure all objects returned are only standard python types.
151 for name in self._all_slots():
152 value = getattr(self, name, None)
153 if value is not None:
157 __getstate__ = ToDict
160 def FromDict(cls, val):
161 """Create an object from a dictionary.
163 This generic routine takes a dict, instantiates a new instance of
164 the given class, and sets attributes based on the dict content.
166 As for `ToDict`, this does not work if the class has children
167 who are ConfigObjects themselves (e.g. the nics list in an
168 Instance), in which case the object should subclass the function
169 and alter the objects.
172 if not isinstance(val, dict):
173 raise errors.ConfigurationError("Invalid object passed to FromDict:"
174 " expected dict, got %s" % type(val))
175 val_str = dict([(str(k), v) for k, v in val.iteritems()])
176 obj = cls(**val_str) # pylint: disable=W0142
180 def _ContainerToDicts(container):
181 """Convert the elements of a container to standard python types.
183 This method converts a container with elements derived from
184 ConfigData to standard python types. If the container is a dict,
185 we don't touch the keys, only the values.
188 if isinstance(container, dict):
189 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
190 elif isinstance(container, (list, tuple, set, frozenset)):
191 ret = [elem.ToDict() for elem in container]
193 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
198 def _ContainerFromDicts(source, c_type, e_type):
199 """Convert a container from standard python types.
201 This method converts a container with standard python types to
202 ConfigData objects. If the container is a dict, we don't touch the
203 keys, only the values.
206 if not isinstance(c_type, type):
207 raise TypeError("Container type %s passed to _ContainerFromDicts is"
208 " not a type" % type(c_type))
212 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
213 elif c_type in (list, tuple, set, frozenset):
214 ret = c_type([e_type.FromDict(elem) for elem in source])
216 raise TypeError("Invalid container type %s passed to"
217 " _ContainerFromDicts" % c_type)
221 """Makes a deep copy of the current object and its children.
224 dict_form = self.ToDict()
225 clone_obj = self.__class__.FromDict(dict_form)
229 """Implement __repr__ for ConfigObjects."""
230 return repr(self.ToDict())
232 def UpgradeConfig(self):
233 """Fill defaults for missing configuration values.
235 This method will be called at configuration load time, and its
236 implementation will be object dependent.
242 class TaggableObject(ConfigObject):
243 """An generic class supporting tags.
247 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
250 def ValidateTag(cls, tag):
251 """Check if a tag is valid.
253 If the tag is invalid, an errors.TagError will be raised. The
254 function has no return value.
257 if not isinstance(tag, basestring):
258 raise errors.TagError("Invalid tag type (not a string)")
259 if len(tag) > constants.MAX_TAG_LEN:
260 raise errors.TagError("Tag too long (>%d characters)" %
261 constants.MAX_TAG_LEN)
263 raise errors.TagError("Tags cannot be empty")
264 if not cls.VALID_TAG_RE.match(tag):
265 raise errors.TagError("Tag contains invalid characters")
268 """Return the tags list.
271 tags = getattr(self, "tags", None)
273 tags = self.tags = set()
276 def AddTag(self, tag):
280 self.ValidateTag(tag)
281 tags = self.GetTags()
282 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
283 raise errors.TagError("Too many tags")
284 self.GetTags().add(tag)
286 def RemoveTag(self, tag):
290 self.ValidateTag(tag)
291 tags = self.GetTags()
295 raise errors.TagError("Tag not found")
298 """Taggable-object-specific conversion to standard python types.
300 This replaces the tags set with a list.
303 bo = super(TaggableObject, self).ToDict()
305 tags = bo.get("tags", None)
306 if isinstance(tags, set):
307 bo["tags"] = list(tags)
311 def FromDict(cls, val):
312 """Custom function for instances.
315 obj = super(TaggableObject, cls).FromDict(val)
316 if hasattr(obj, "tags") and isinstance(obj.tags, list):
317 obj.tags = set(obj.tags)
321 class MasterNetworkParameters(ConfigObject):
322 """Network configuration parameters for the master
324 @ivar name: master name
326 @ivar netmask: master netmask
327 @ivar netdev: master network device
328 @ivar ip_family: master IP family
340 class ConfigData(ConfigObject):
341 """Top-level config object."""
352 """Custom function for top-level config data.
354 This just replaces the list of instances, nodes and the cluster
355 with standard python types.
358 mydict = super(ConfigData, self).ToDict()
359 mydict["cluster"] = mydict["cluster"].ToDict()
360 for key in "nodes", "instances", "nodegroups":
361 mydict[key] = self._ContainerToDicts(mydict[key])
366 def FromDict(cls, val):
367 """Custom function for top-level config data
370 obj = super(ConfigData, cls).FromDict(val)
371 obj.cluster = Cluster.FromDict(obj.cluster)
372 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
373 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
374 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
377 def HasAnyDiskOfType(self, dev_type):
378 """Check if in there is at disk of the given type in the configuration.
380 @type dev_type: L{constants.LDS_BLOCK}
381 @param dev_type: the type to look for
383 @return: boolean indicating if a disk of the given type was found or not
386 for instance in self.instances.values():
387 for disk in instance.disks:
388 if disk.IsBasedOnDiskType(dev_type):
392 def UpgradeConfig(self):
393 """Fill defaults for missing configuration values.
396 self.cluster.UpgradeConfig()
397 for node in self.nodes.values():
399 for instance in self.instances.values():
400 instance.UpgradeConfig()
401 if self.nodegroups is None:
403 for nodegroup in self.nodegroups.values():
404 nodegroup.UpgradeConfig()
405 if self.cluster.drbd_usermode_helper is None:
406 # To decide if we set an helper let's check if at least one instance has
407 # a DRBD disk. This does not cover all the possible scenarios but it
408 # gives a good approximation.
409 if self.HasAnyDiskOfType(constants.LD_DRBD8):
410 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
413 class NIC(ConfigObject):
414 """Config object representing a network card."""
415 __slots__ = ["mac", "ip", "nicparams"]
418 def CheckParameterSyntax(cls, nicparams):
419 """Check the given parameters for validity.
421 @type nicparams: dict
422 @param nicparams: dictionary with parameter names/value
423 @raise errors.ConfigurationError: when a parameter is not valid
426 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
427 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
428 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
429 raise errors.ConfigurationError(err)
431 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
432 not nicparams[constants.NIC_LINK]):
433 err = "Missing bridged nic link"
434 raise errors.ConfigurationError(err)
437 class Disk(ConfigObject):
438 """Config object representing a block device."""
439 __slots__ = ["dev_type", "logical_id", "physical_id",
440 "children", "iv_name", "size", "mode"]
442 def CreateOnSecondary(self):
443 """Test if this device needs to be created on a secondary node."""
444 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
446 def AssembleOnSecondary(self):
447 """Test if this device needs to be assembled on a secondary node."""
448 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
450 def OpenOnSecondary(self):
451 """Test if this device needs to be opened on a secondary node."""
452 return self.dev_type in (constants.LD_LV,)
454 def StaticDevPath(self):
455 """Return the device path if this device type has a static one.
457 Some devices (LVM for example) live always at the same /dev/ path,
458 irrespective of their status. For such devices, we return this
459 path, for others we return None.
461 @warning: The path returned is not a normalized pathname; callers
462 should check that it is a valid path.
465 if self.dev_type == constants.LD_LV:
466 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
467 elif self.dev_type == constants.LD_BLOCKDEV:
468 return self.logical_id[1]
471 def ChildrenNeeded(self):
472 """Compute the needed number of children for activation.
474 This method will return either -1 (all children) or a positive
475 number denoting the minimum number of children needed for
476 activation (only mirrored devices will usually return >=0).
478 Currently, only DRBD8 supports diskless activation (therefore we
479 return 0), for all other we keep the previous semantics and return
483 if self.dev_type == constants.LD_DRBD8:
487 def IsBasedOnDiskType(self, dev_type):
488 """Check if the disk or its children are based on the given type.
490 @type dev_type: L{constants.LDS_BLOCK}
491 @param dev_type: the type to look for
493 @return: boolean indicating if a device of the given type was found or not
497 for child in self.children:
498 if child.IsBasedOnDiskType(dev_type):
500 return self.dev_type == dev_type
502 def GetNodes(self, node):
503 """This function returns the nodes this device lives on.
505 Given the node on which the parent of the device lives on (or, in
506 case of a top-level device, the primary node of the devices'
507 instance), this function will return a list of nodes on which this
508 devices needs to (or can) be assembled.
511 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
512 constants.LD_BLOCKDEV]:
514 elif self.dev_type in constants.LDS_DRBD:
515 result = [self.logical_id[0], self.logical_id[1]]
516 if node not in result:
517 raise errors.ConfigurationError("DRBD device passed unknown node")
519 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
522 def ComputeNodeTree(self, parent_node):
523 """Compute the node/disk tree for this disk and its children.
525 This method, given the node on which the parent disk lives, will
526 return the list of all (node, disk) pairs which describe the disk
527 tree in the most compact way. For example, a drbd/lvm stack
528 will be returned as (primary_node, drbd) and (secondary_node, drbd)
529 which represents all the top-level devices on the nodes.
532 my_nodes = self.GetNodes(parent_node)
533 result = [(node, self) for node in my_nodes]
534 if not self.children:
537 for node in my_nodes:
538 for child in self.children:
539 child_result = child.ComputeNodeTree(node)
540 if len(child_result) == 1:
541 # child (and all its descendants) is simple, doesn't split
542 # over multiple hosts, so we don't need to describe it, our
543 # own entry for this node describes it completely
546 # check if child nodes differ from my nodes; note that
547 # subdisk can differ from the child itself, and be instead
548 # one of its descendants
549 for subnode, subdisk in child_result:
550 if subnode not in my_nodes:
551 result.append((subnode, subdisk))
552 # otherwise child is under our own node, so we ignore this
553 # entry (but probably the other results in the list will
557 def ComputeGrowth(self, amount):
558 """Compute the per-VG growth requirements.
560 This only works for VG-based disks.
562 @type amount: integer
563 @param amount: the desired increase in (user-visible) disk space
565 @return: a dictionary of volume-groups and the required size
568 if self.dev_type == constants.LD_LV:
569 return {self.logical_id[0]: amount}
570 elif self.dev_type == constants.LD_DRBD8:
572 return self.children[0].ComputeGrowth(amount)
576 # Other disk types do not require VG space
579 def RecordGrow(self, amount):
580 """Update the size of this disk after growth.
582 This method recurses over the disks's children and updates their
583 size correspondigly. The method needs to be kept in sync with the
584 actual algorithms from bdev.
587 if self.dev_type in (constants.LD_LV, constants.LD_FILE):
589 elif self.dev_type == constants.LD_DRBD8:
591 self.children[0].RecordGrow(amount)
594 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
595 " disk type %s" % self.dev_type)
598 """Sets recursively the size to zero for the disk and its children.
602 for child in self.children:
606 def SetPhysicalID(self, target_node, nodes_ip):
607 """Convert the logical ID to the physical ID.
609 This is used only for drbd, which needs ip/port configuration.
611 The routine descends down and updates its children also, because
612 this helps when the only the top device is passed to the remote
616 - target_node: the node we wish to configure for
617 - nodes_ip: a mapping of node name to ip
619 The target_node must exist in in nodes_ip, and must be one of the
620 nodes in the logical ID for each of the DRBD devices encountered
625 for child in self.children:
626 child.SetPhysicalID(target_node, nodes_ip)
628 if self.logical_id is None and self.physical_id is not None:
630 if self.dev_type in constants.LDS_DRBD:
631 pnode, snode, port, pminor, sminor, secret = self.logical_id
632 if target_node not in (pnode, snode):
633 raise errors.ConfigurationError("DRBD device not knowing node %s" %
635 pnode_ip = nodes_ip.get(pnode, None)
636 snode_ip = nodes_ip.get(snode, None)
637 if pnode_ip is None or snode_ip is None:
638 raise errors.ConfigurationError("Can't find primary or secondary node"
639 " for %s" % str(self))
640 p_data = (pnode_ip, port)
641 s_data = (snode_ip, port)
642 if pnode == target_node:
643 self.physical_id = p_data + s_data + (pminor, secret)
644 else: # it must be secondary, we tested above
645 self.physical_id = s_data + p_data + (sminor, secret)
647 self.physical_id = self.logical_id
651 """Disk-specific conversion to standard python types.
653 This replaces the children lists of objects with lists of
654 standard python types.
657 bo = super(Disk, self).ToDict()
659 for attr in ("children",):
660 alist = bo.get(attr, None)
662 bo[attr] = self._ContainerToDicts(alist)
666 def FromDict(cls, val):
667 """Custom function for Disks
670 obj = super(Disk, cls).FromDict(val)
672 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
673 if obj.logical_id and isinstance(obj.logical_id, list):
674 obj.logical_id = tuple(obj.logical_id)
675 if obj.physical_id and isinstance(obj.physical_id, list):
676 obj.physical_id = tuple(obj.physical_id)
677 if obj.dev_type in constants.LDS_DRBD:
678 # we need a tuple of length six here
679 if len(obj.logical_id) < 6:
680 obj.logical_id += (None,) * (6 - len(obj.logical_id))
684 """Custom str() formatter for disks.
687 if self.dev_type == constants.LD_LV:
688 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
689 elif self.dev_type in constants.LDS_DRBD:
690 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
692 if self.physical_id is None:
695 phy = ("configured as %s:%s %s:%s" %
696 (self.physical_id[0], self.physical_id[1],
697 self.physical_id[2], self.physical_id[3]))
699 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
700 (node_a, minor_a, node_b, minor_b, port, phy))
701 if self.children and self.children.count(None) == 0:
702 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
704 val += "no local storage"
706 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
707 (self.dev_type, self.logical_id, self.physical_id, self.children))
708 if self.iv_name is None:
709 val += ", not visible"
711 val += ", visible as /dev/%s" % self.iv_name
712 if isinstance(self.size, int):
713 val += ", size=%dm)>" % self.size
715 val += ", size='%s')>" % (self.size,)
719 """Checks that this disk is correctly configured.
723 if self.mode not in constants.DISK_ACCESS_SET:
724 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
727 def UpgradeConfig(self):
728 """Fill defaults for missing configuration values.
732 for child in self.children:
733 child.UpgradeConfig()
734 # add here config upgrade for this disk
737 class Instance(TaggableObject):
738 """Config object representing an instance."""
753 ] + _TIMESTAMPS + _UUID
755 def _ComputeSecondaryNodes(self):
756 """Compute the list of secondary nodes.
758 This is a simple wrapper over _ComputeAllNodes.
761 all_nodes = set(self._ComputeAllNodes())
762 all_nodes.discard(self.primary_node)
763 return tuple(all_nodes)
765 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
766 "List of secondary nodes")
768 def _ComputeAllNodes(self):
769 """Compute the list of all nodes.
771 Since the data is already there (in the drbd disks), keeping it as
772 a separate normal attribute is redundant and if not properly
773 synchronised can cause problems. Thus it's better to compute it
777 def _Helper(nodes, device):
778 """Recursively computes nodes given a top device."""
779 if device.dev_type in constants.LDS_DRBD:
780 nodea, nodeb = device.logical_id[:2]
784 for child in device.children:
785 _Helper(nodes, child)
788 all_nodes.add(self.primary_node)
789 for device in self.disks:
790 _Helper(all_nodes, device)
791 return tuple(all_nodes)
793 all_nodes = property(_ComputeAllNodes, None, None,
794 "List of all nodes of the instance")
796 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
797 """Provide a mapping of nodes to LVs this instance owns.
799 This function figures out what logical volumes should belong on
800 which nodes, recursing through a device tree.
802 @param lvmap: optional dictionary to receive the
803 'node' : ['lv', ...] data.
805 @return: None if lvmap arg is given, otherwise, a dictionary of
806 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
807 volumeN is of the form "vg_name/lv_name", compatible with
812 node = self.primary_node
820 if not node in lvmap:
828 if dev.dev_type == constants.LD_LV:
829 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
831 elif dev.dev_type in constants.LDS_DRBD:
833 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
834 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
837 self.MapLVsByNode(lvmap, dev.children, node)
841 def FindDisk(self, idx):
842 """Find a disk given having a specified index.
844 This is just a wrapper that does validation of the index.
847 @param idx: the disk index
849 @return: the corresponding disk
850 @raise errors.OpPrereqError: when the given index is not valid
855 return self.disks[idx]
856 except (TypeError, ValueError), err:
857 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
860 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
861 " 0 to %d" % (idx, len(self.disks) - 1),
865 """Instance-specific conversion to standard python types.
867 This replaces the children lists of objects with lists of standard
871 bo = super(Instance, self).ToDict()
873 for attr in "nics", "disks":
874 alist = bo.get(attr, None)
876 nlist = self._ContainerToDicts(alist)
883 def FromDict(cls, val):
884 """Custom function for instances.
887 obj = super(Instance, cls).FromDict(val)
888 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
889 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
892 def UpgradeConfig(self):
893 """Fill defaults for missing configuration values.
896 for nic in self.nics:
898 for disk in self.disks:
901 for key in constants.HVC_GLOBALS:
903 del self.hvparams[key]
906 if self.osparams is None:
910 class OS(ConfigObject):
911 """Config object representing an operating system.
913 @type supported_parameters: list
914 @ivar supported_parameters: a list of tuples, name and description,
915 containing the supported parameters by this OS
917 @type VARIANT_DELIM: string
918 @cvar VARIANT_DELIM: the variant delimiter
930 "supported_variants",
931 "supported_parameters",
937 def SplitNameVariant(cls, name):
938 """Splits the name into the proper name and variant.
940 @param name: the OS (unprocessed) name
942 @return: a list of two elements; if the original name didn't
943 contain a variant, it's returned as an empty string
946 nv = name.split(cls.VARIANT_DELIM, 1)
952 def GetName(cls, name):
953 """Returns the proper name of the os (without the variant).
955 @param name: the OS (unprocessed) name
958 return cls.SplitNameVariant(name)[0]
961 def GetVariant(cls, name):
962 """Returns the variant the os (without the base name).
964 @param name: the OS (unprocessed) name
967 return cls.SplitNameVariant(name)[1]
970 class Node(TaggableObject):
971 """Config object representing a node."""
987 ] + _TIMESTAMPS + _UUID
989 def UpgradeConfig(self):
990 """Fill defaults for missing configuration values.
993 # pylint: disable=E0203
994 # because these are "defined" via slots, not manually
995 if self.master_capable is None:
996 self.master_capable = True
998 if self.vm_capable is None:
999 self.vm_capable = True
1001 if self.ndparams is None:
1004 if self.powered is None:
1008 class NodeGroup(TaggableObject):
1009 """Config object representing a node group."""
1016 ] + _TIMESTAMPS + _UUID
1019 """Custom function for nodegroup.
1021 This discards the members object, which gets recalculated and is only kept
1025 mydict = super(NodeGroup, self).ToDict()
1026 del mydict["members"]
1030 def FromDict(cls, val):
1031 """Custom function for nodegroup.
1033 The members slot is initialized to an empty list, upon deserialization.
1036 obj = super(NodeGroup, cls).FromDict(val)
1040 def UpgradeConfig(self):
1041 """Fill defaults for missing configuration values.
1044 if self.ndparams is None:
1047 if self.serial_no is None:
1050 if self.alloc_policy is None:
1051 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1053 # We only update mtime, and not ctime, since we would not be able to provide
1054 # a correct value for creation time.
1055 if self.mtime is None:
1056 self.mtime = time.time()
1058 def FillND(self, node):
1059 """Return filled out ndparams for L{objects.Node}
1061 @type node: L{objects.Node}
1062 @param node: A Node object to fill
1063 @return a copy of the node's ndparams with defaults filled
1066 return self.SimpleFillND(node.ndparams)
1068 def SimpleFillND(self, ndparams):
1069 """Fill a given ndparams dict with defaults.
1071 @type ndparams: dict
1072 @param ndparams: the dict to fill
1074 @return: a copy of the passed in ndparams with missing keys filled
1075 from the node group defaults
1078 return FillDict(self.ndparams, ndparams)
1081 class Cluster(TaggableObject):
1082 """Config object representing the cluster."""
1086 "highest_used_port",
1089 "volume_group_name",
1091 "drbd_usermode_helper",
1093 "default_hypervisor",
1098 "use_external_mip_script",
1101 "shared_file_storage_dir",
1102 "enabled_hypervisors",
1109 "candidate_pool_size",
1112 "maintain_node_health",
1114 "default_iallocator",
1117 "primary_ip_family",
1118 "prealloc_wipe_disks",
1119 ] + _TIMESTAMPS + _UUID
1121 def UpgradeConfig(self):
1122 """Fill defaults for missing configuration values.
1125 # pylint: disable=E0203
1126 # because these are "defined" via slots, not manually
1127 if self.hvparams is None:
1128 self.hvparams = constants.HVC_DEFAULTS
1130 for hypervisor in self.hvparams:
1131 self.hvparams[hypervisor] = FillDict(
1132 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1134 if self.os_hvp is None:
1137 # osparams added before 2.2
1138 if self.osparams is None:
1141 if self.ndparams is None:
1142 self.ndparams = constants.NDC_DEFAULTS
1144 self.beparams = UpgradeGroupedParams(self.beparams,
1145 constants.BEC_DEFAULTS)
1146 migrate_default_bridge = not self.nicparams
1147 self.nicparams = UpgradeGroupedParams(self.nicparams,
1148 constants.NICC_DEFAULTS)
1149 if migrate_default_bridge:
1150 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1153 if self.modify_etc_hosts is None:
1154 self.modify_etc_hosts = True
1156 if self.modify_ssh_setup is None:
1157 self.modify_ssh_setup = True
1159 # default_bridge is no longer used in 2.1. The slot is left there to
1160 # support auto-upgrading. It can be removed once we decide to deprecate
1161 # upgrading straight from 2.0.
1162 if self.default_bridge is not None:
1163 self.default_bridge = None
1165 # default_hypervisor is just the first enabled one in 2.1. This slot and
1166 # code can be removed once upgrading straight from 2.0 is deprecated.
1167 if self.default_hypervisor is not None:
1168 self.enabled_hypervisors = ([self.default_hypervisor] +
1169 [hvname for hvname in self.enabled_hypervisors
1170 if hvname != self.default_hypervisor])
1171 self.default_hypervisor = None
1173 # maintain_node_health added after 2.1.1
1174 if self.maintain_node_health is None:
1175 self.maintain_node_health = False
1177 if self.uid_pool is None:
1180 if self.default_iallocator is None:
1181 self.default_iallocator = ""
1183 # reserved_lvs added before 2.2
1184 if self.reserved_lvs is None:
1185 self.reserved_lvs = []
1187 # hidden and blacklisted operating systems added before 2.2.1
1188 if self.hidden_os is None:
1191 if self.blacklisted_os is None:
1192 self.blacklisted_os = []
1194 # primary_ip_family added before 2.3
1195 if self.primary_ip_family is None:
1196 self.primary_ip_family = AF_INET
1198 if self.master_netmask is None:
1199 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1200 self.master_netmask = ipcls.iplen
1202 if self.prealloc_wipe_disks is None:
1203 self.prealloc_wipe_disks = False
1205 # shared_file_storage_dir added before 2.5
1206 if self.shared_file_storage_dir is None:
1207 self.shared_file_storage_dir = ""
1209 if self.use_external_mip_script is None:
1210 self.use_external_mip_script = False
1213 """Custom function for cluster.
1216 mydict = super(Cluster, self).ToDict()
1217 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1221 def FromDict(cls, val):
1222 """Custom function for cluster.
1225 obj = super(Cluster, cls).FromDict(val)
1226 if not isinstance(obj.tcpudp_port_pool, set):
1227 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1230 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1231 """Get the default hypervisor parameters for the cluster.
1233 @param hypervisor: the hypervisor name
1234 @param os_name: if specified, we'll also update the defaults for this OS
1235 @param skip_keys: if passed, list of keys not to use
1236 @return: the defaults dict
1239 if skip_keys is None:
1242 fill_stack = [self.hvparams.get(hypervisor, {})]
1243 if os_name is not None:
1244 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1245 fill_stack.append(os_hvp)
1248 for o_dict in fill_stack:
1249 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1253 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1254 """Fill a given hvparams dict with cluster defaults.
1256 @type hv_name: string
1257 @param hv_name: the hypervisor to use
1258 @type os_name: string
1259 @param os_name: the OS to use for overriding the hypervisor defaults
1260 @type skip_globals: boolean
1261 @param skip_globals: if True, the global hypervisor parameters will
1264 @return: a copy of the given hvparams with missing keys filled from
1265 the cluster defaults
1269 skip_keys = constants.HVC_GLOBALS
1273 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1274 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1276 def FillHV(self, instance, skip_globals=False):
1277 """Fill an instance's hvparams dict with cluster defaults.
1279 @type instance: L{objects.Instance}
1280 @param instance: the instance parameter to fill
1281 @type skip_globals: boolean
1282 @param skip_globals: if True, the global hypervisor parameters will
1285 @return: a copy of the instance's hvparams with missing keys filled from
1286 the cluster defaults
1289 return self.SimpleFillHV(instance.hypervisor, instance.os,
1290 instance.hvparams, skip_globals)
1292 def SimpleFillBE(self, beparams):
1293 """Fill a given beparams dict with cluster defaults.
1295 @type beparams: dict
1296 @param beparams: the dict to fill
1298 @return: a copy of the passed in beparams with missing keys filled
1299 from the cluster defaults
1302 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1304 def FillBE(self, instance):
1305 """Fill an instance's beparams dict with cluster defaults.
1307 @type instance: L{objects.Instance}
1308 @param instance: the instance parameter to fill
1310 @return: a copy of the instance's beparams with missing keys filled from
1311 the cluster defaults
1314 return self.SimpleFillBE(instance.beparams)
1316 def SimpleFillNIC(self, nicparams):
1317 """Fill a given nicparams dict with cluster defaults.
1319 @type nicparams: dict
1320 @param nicparams: the dict to fill
1322 @return: a copy of the passed in nicparams with missing keys filled
1323 from the cluster defaults
1326 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1328 def SimpleFillOS(self, os_name, os_params):
1329 """Fill an instance's osparams dict with cluster defaults.
1331 @type os_name: string
1332 @param os_name: the OS name to use
1333 @type os_params: dict
1334 @param os_params: the dict to fill with default values
1336 @return: a copy of the instance's osparams with missing keys filled from
1337 the cluster defaults
1340 name_only = os_name.split("+", 1)[0]
1342 result = self.osparams.get(name_only, {})
1344 result = FillDict(result, self.osparams.get(os_name, {}))
1346 return FillDict(result, os_params)
1348 def FillND(self, node, nodegroup):
1349 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1351 @type node: L{objects.Node}
1352 @param node: A Node object to fill
1353 @type nodegroup: L{objects.NodeGroup}
1354 @param nodegroup: A Node object to fill
1355 @return a copy of the node's ndparams with defaults filled
1358 return self.SimpleFillND(nodegroup.FillND(node))
1360 def SimpleFillND(self, ndparams):
1361 """Fill a given ndparams dict with defaults.
1363 @type ndparams: dict
1364 @param ndparams: the dict to fill
1366 @return: a copy of the passed in ndparams with missing keys filled
1367 from the cluster defaults
1370 return FillDict(self.ndparams, ndparams)
1373 class BlockDevStatus(ConfigObject):
1374 """Config object representing the status of a block device."""
1386 class ImportExportStatus(ConfigObject):
1387 """Config object representing the status of an import or export."""
1393 "progress_throughput",
1401 class ImportExportOptions(ConfigObject):
1402 """Options for import/export daemon
1404 @ivar key_name: X509 key name (None for cluster certificate)
1405 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1406 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1407 @ivar magic: Used to ensure the connection goes to the right disk
1408 @ivar ipv6: Whether to use IPv6
1409 @ivar connect_timeout: Number of seconds for establishing connection
1422 class ConfdRequest(ConfigObject):
1423 """Object holding a confd request.
1425 @ivar protocol: confd protocol version
1426 @ivar type: confd query type
1427 @ivar query: query request
1428 @ivar rsalt: requested reply salt
1439 class ConfdReply(ConfigObject):
1440 """Object holding a confd reply.
1442 @ivar protocol: confd protocol version
1443 @ivar status: reply status code (ok, error)
1444 @ivar answer: confd query reply
1445 @ivar serial: configuration serial number
1456 class QueryFieldDefinition(ConfigObject):
1457 """Object holding a query field definition.
1459 @ivar name: Field name
1460 @ivar title: Human-readable title
1461 @ivar kind: Field type
1462 @ivar doc: Human-readable description
1473 class _QueryResponseBase(ConfigObject):
1479 """Custom function for serializing.
1482 mydict = super(_QueryResponseBase, self).ToDict()
1483 mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1487 def FromDict(cls, val):
1488 """Custom function for de-serializing.
1491 obj = super(_QueryResponseBase, cls).FromDict(val)
1492 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1496 class QueryRequest(ConfigObject):
1497 """Object holding a query request.
1507 class QueryResponse(_QueryResponseBase):
1508 """Object holding the response to a query.
1510 @ivar fields: List of L{QueryFieldDefinition} objects
1511 @ivar data: Requested data
1519 class QueryFieldsRequest(ConfigObject):
1520 """Object holding a request for querying available fields.
1529 class QueryFieldsResponse(_QueryResponseBase):
1530 """Object holding the response to a query for fields.
1532 @ivar fields: List of L{QueryFieldDefinition} objects
1539 class MigrationStatus(ConfigObject):
1540 """Object holding the status of a migration.
1550 class InstanceConsole(ConfigObject):
1551 """Object describing how to access the console of an instance.
1566 """Validates contents of this object.
1569 assert self.kind in constants.CONS_ALL, "Unknown console type"
1570 assert self.instance, "Missing instance name"
1571 assert self.message or self.kind in [constants.CONS_SSH,
1572 constants.CONS_SPICE,
1574 assert self.host or self.kind == constants.CONS_MESSAGE
1575 assert self.port or self.kind in [constants.CONS_MESSAGE,
1577 assert self.user or self.kind in [constants.CONS_MESSAGE,
1578 constants.CONS_SPICE,
1580 assert self.command or self.kind in [constants.CONS_MESSAGE,
1581 constants.CONS_SPICE,
1583 assert self.display or self.kind in [constants.CONS_MESSAGE,
1584 constants.CONS_SPICE,
1589 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1590 """Simple wrapper over ConfigParse that allows serialization.
1592 This class is basically ConfigParser.SafeConfigParser with two
1593 additional methods that allow it to serialize/unserialize to/from a
1598 """Dump this instance and return the string representation."""
1601 return buf.getvalue()
1604 def Loads(cls, data):
1605 """Load data from a string."""
1606 buf = StringIO(data)