4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
40 from cStringIO import StringIO
42 from ganeti import errors
43 from ganeti import constants
45 from socket import AF_INET
48 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
51 _TIMESTAMPS = ["ctime", "mtime"]
55 def FillDict(defaults_dict, custom_dict, skip_keys=None):
56 """Basic function to apply settings on top a default dict.
58 @type defaults_dict: dict
59 @param defaults_dict: dictionary holding the default values
60 @type custom_dict: dict
61 @param custom_dict: dictionary holding customized value
63 @param skip_keys: which keys not to fill
65 @return: dict with the 'full' values
68 ret_dict = copy.deepcopy(defaults_dict)
69 ret_dict.update(custom_dict)
79 def UpgradeGroupedParams(target, defaults):
80 """Update all groups for the target parameter.
82 @type target: dict of dicts
83 @param target: {group: {parameter: value}}
85 @param defaults: default parameter values
89 target = {constants.PP_DEFAULT: defaults}
92 target[group] = FillDict(defaults, target[group])
96 class ConfigObject(object):
97 """A generic config object.
99 It has the following properties:
101 - provides somewhat safe recursive unpickling and pickling for its classes
102 - unset attributes which are defined in slots are always returned
103 as None instead of raising an error
105 Classes derived from this must always declare __slots__ (we use many
106 config objects and the memory reduction is useful)
111 def __init__(self, **kwargs):
112 for k, v in kwargs.iteritems():
115 def __getattr__(self, name):
116 if name not in self._all_slots():
117 raise AttributeError("Invalid object attribute %s.%s" %
118 (type(self).__name__, name))
121 def __setstate__(self, state):
122 slots = self._all_slots()
125 setattr(self, name, state[name])
129 """Compute the list of all declared slots for a class.
133 for parent in cls.__mro__:
134 slots.extend(getattr(parent, "__slots__", []))
138 """Convert to a dict holding only standard python types.
140 The generic routine just dumps all of this object's attributes in
141 a dict. It does not work if the class has children who are
142 ConfigObjects themselves (e.g. the nics list in an Instance), in
143 which case the object should subclass the function in order to
144 make sure all objects returned are only standard python types.
148 for name in self._all_slots():
149 value = getattr(self, name, None)
150 if value is not None:
154 __getstate__ = ToDict
157 def FromDict(cls, val):
158 """Create an object from a dictionary.
160 This generic routine takes a dict, instantiates a new instance of
161 the given class, and sets attributes based on the dict content.
163 As for `ToDict`, this does not work if the class has children
164 who are ConfigObjects themselves (e.g. the nics list in an
165 Instance), in which case the object should subclass the function
166 and alter the objects.
169 if not isinstance(val, dict):
170 raise errors.ConfigurationError("Invalid object passed to FromDict:"
171 " expected dict, got %s" % type(val))
172 val_str = dict([(str(k), v) for k, v in val.iteritems()])
173 obj = cls(**val_str) # pylint: disable=W0142
177 def _ContainerToDicts(container):
178 """Convert the elements of a container to standard python types.
180 This method converts a container with elements derived from
181 ConfigData to standard python types. If the container is a dict,
182 we don't touch the keys, only the values.
185 if isinstance(container, dict):
186 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187 elif isinstance(container, (list, tuple, set, frozenset)):
188 ret = [elem.ToDict() for elem in container]
190 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
195 def _ContainerFromDicts(source, c_type, e_type):
196 """Convert a container from standard python types.
198 This method converts a container with standard python types to
199 ConfigData objects. If the container is a dict, we don't touch the
200 keys, only the values.
203 if not isinstance(c_type, type):
204 raise TypeError("Container type %s passed to _ContainerFromDicts is"
205 " not a type" % type(c_type))
209 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210 elif c_type in (list, tuple, set, frozenset):
211 ret = c_type([e_type.FromDict(elem) for elem in source])
213 raise TypeError("Invalid container type %s passed to"
214 " _ContainerFromDicts" % c_type)
218 """Makes a deep copy of the current object and its children.
221 dict_form = self.ToDict()
222 clone_obj = self.__class__.FromDict(dict_form)
226 """Implement __repr__ for ConfigObjects."""
227 return repr(self.ToDict())
229 def UpgradeConfig(self):
230 """Fill defaults for missing configuration values.
232 This method will be called at configuration load time, and its
233 implementation will be object dependent.
239 class TaggableObject(ConfigObject):
240 """An generic class supporting tags.
244 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
247 def ValidateTag(cls, tag):
248 """Check if a tag is valid.
250 If the tag is invalid, an errors.TagError will be raised. The
251 function has no return value.
254 if not isinstance(tag, basestring):
255 raise errors.TagError("Invalid tag type (not a string)")
256 if len(tag) > constants.MAX_TAG_LEN:
257 raise errors.TagError("Tag too long (>%d characters)" %
258 constants.MAX_TAG_LEN)
260 raise errors.TagError("Tags cannot be empty")
261 if not cls.VALID_TAG_RE.match(tag):
262 raise errors.TagError("Tag contains invalid characters")
265 """Return the tags list.
268 tags = getattr(self, "tags", None)
270 tags = self.tags = set()
273 def AddTag(self, tag):
277 self.ValidateTag(tag)
278 tags = self.GetTags()
279 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280 raise errors.TagError("Too many tags")
281 self.GetTags().add(tag)
283 def RemoveTag(self, tag):
287 self.ValidateTag(tag)
288 tags = self.GetTags()
292 raise errors.TagError("Tag not found")
295 """Taggable-object-specific conversion to standard python types.
297 This replaces the tags set with a list.
300 bo = super(TaggableObject, self).ToDict()
302 tags = bo.get("tags", None)
303 if isinstance(tags, set):
304 bo["tags"] = list(tags)
308 def FromDict(cls, val):
309 """Custom function for instances.
312 obj = super(TaggableObject, cls).FromDict(val)
313 if hasattr(obj, "tags") and isinstance(obj.tags, list):
314 obj.tags = set(obj.tags)
318 class MasterNetworkParameters(ConfigObject):
319 """Network configuration parameters for the master
321 @ivar name: master name
323 @ivar netmask: master netmask
324 @ivar netdev: master network device
325 @ivar ip_family: master IP family
337 class ConfigData(ConfigObject):
338 """Top-level config object."""
349 """Custom function for top-level config data.
351 This just replaces the list of instances, nodes and the cluster
352 with standard python types.
355 mydict = super(ConfigData, self).ToDict()
356 mydict["cluster"] = mydict["cluster"].ToDict()
357 for key in "nodes", "instances", "nodegroups":
358 mydict[key] = self._ContainerToDicts(mydict[key])
363 def FromDict(cls, val):
364 """Custom function for top-level config data
367 obj = super(ConfigData, cls).FromDict(val)
368 obj.cluster = Cluster.FromDict(obj.cluster)
369 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
370 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
371 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
374 def HasAnyDiskOfType(self, dev_type):
375 """Check if in there is at disk of the given type in the configuration.
377 @type dev_type: L{constants.LDS_BLOCK}
378 @param dev_type: the type to look for
380 @return: boolean indicating if a disk of the given type was found or not
383 for instance in self.instances.values():
384 for disk in instance.disks:
385 if disk.IsBasedOnDiskType(dev_type):
389 def UpgradeConfig(self):
390 """Fill defaults for missing configuration values.
393 self.cluster.UpgradeConfig()
394 for node in self.nodes.values():
396 for instance in self.instances.values():
397 instance.UpgradeConfig()
398 if self.nodegroups is None:
400 for nodegroup in self.nodegroups.values():
401 nodegroup.UpgradeConfig()
402 if self.cluster.drbd_usermode_helper is None:
403 # To decide if we set an helper let's check if at least one instance has
404 # a DRBD disk. This does not cover all the possible scenarios but it
405 # gives a good approximation.
406 if self.HasAnyDiskOfType(constants.LD_DRBD8):
407 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
410 class NIC(ConfigObject):
411 """Config object representing a network card."""
412 __slots__ = ["mac", "ip", "nicparams"]
415 def CheckParameterSyntax(cls, nicparams):
416 """Check the given parameters for validity.
418 @type nicparams: dict
419 @param nicparams: dictionary with parameter names/value
420 @raise errors.ConfigurationError: when a parameter is not valid
423 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
424 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
425 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
426 raise errors.ConfigurationError(err)
428 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
429 not nicparams[constants.NIC_LINK]):
430 err = "Missing bridged nic link"
431 raise errors.ConfigurationError(err)
434 class Disk(ConfigObject):
435 """Config object representing a block device."""
436 __slots__ = ["dev_type", "logical_id", "physical_id",
437 "children", "iv_name", "size", "mode"]
439 def CreateOnSecondary(self):
440 """Test if this device needs to be created on a secondary node."""
441 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
443 def AssembleOnSecondary(self):
444 """Test if this device needs to be assembled on a secondary node."""
445 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
447 def OpenOnSecondary(self):
448 """Test if this device needs to be opened on a secondary node."""
449 return self.dev_type in (constants.LD_LV,)
451 def StaticDevPath(self):
452 """Return the device path if this device type has a static one.
454 Some devices (LVM for example) live always at the same /dev/ path,
455 irrespective of their status. For such devices, we return this
456 path, for others we return None.
458 @warning: The path returned is not a normalized pathname; callers
459 should check that it is a valid path.
462 if self.dev_type == constants.LD_LV:
463 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
464 elif self.dev_type == constants.LD_BLOCKDEV:
465 return self.logical_id[1]
468 def ChildrenNeeded(self):
469 """Compute the needed number of children for activation.
471 This method will return either -1 (all children) or a positive
472 number denoting the minimum number of children needed for
473 activation (only mirrored devices will usually return >=0).
475 Currently, only DRBD8 supports diskless activation (therefore we
476 return 0), for all other we keep the previous semantics and return
480 if self.dev_type == constants.LD_DRBD8:
484 def IsBasedOnDiskType(self, dev_type):
485 """Check if the disk or its children are based on the given type.
487 @type dev_type: L{constants.LDS_BLOCK}
488 @param dev_type: the type to look for
490 @return: boolean indicating if a device of the given type was found or not
494 for child in self.children:
495 if child.IsBasedOnDiskType(dev_type):
497 return self.dev_type == dev_type
499 def GetNodes(self, node):
500 """This function returns the nodes this device lives on.
502 Given the node on which the parent of the device lives on (or, in
503 case of a top-level device, the primary node of the devices'
504 instance), this function will return a list of nodes on which this
505 devices needs to (or can) be assembled.
508 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
509 constants.LD_BLOCKDEV]:
511 elif self.dev_type in constants.LDS_DRBD:
512 result = [self.logical_id[0], self.logical_id[1]]
513 if node not in result:
514 raise errors.ConfigurationError("DRBD device passed unknown node")
516 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
519 def ComputeNodeTree(self, parent_node):
520 """Compute the node/disk tree for this disk and its children.
522 This method, given the node on which the parent disk lives, will
523 return the list of all (node, disk) pairs which describe the disk
524 tree in the most compact way. For example, a drbd/lvm stack
525 will be returned as (primary_node, drbd) and (secondary_node, drbd)
526 which represents all the top-level devices on the nodes.
529 my_nodes = self.GetNodes(parent_node)
530 result = [(node, self) for node in my_nodes]
531 if not self.children:
534 for node in my_nodes:
535 for child in self.children:
536 child_result = child.ComputeNodeTree(node)
537 if len(child_result) == 1:
538 # child (and all its descendants) is simple, doesn't split
539 # over multiple hosts, so we don't need to describe it, our
540 # own entry for this node describes it completely
543 # check if child nodes differ from my nodes; note that
544 # subdisk can differ from the child itself, and be instead
545 # one of its descendants
546 for subnode, subdisk in child_result:
547 if subnode not in my_nodes:
548 result.append((subnode, subdisk))
549 # otherwise child is under our own node, so we ignore this
550 # entry (but probably the other results in the list will
554 def ComputeGrowth(self, amount):
555 """Compute the per-VG growth requirements.
557 This only works for VG-based disks.
559 @type amount: integer
560 @param amount: the desired increase in (user-visible) disk space
562 @return: a dictionary of volume-groups and the required size
565 if self.dev_type == constants.LD_LV:
566 return {self.logical_id[0]: amount}
567 elif self.dev_type == constants.LD_DRBD8:
569 return self.children[0].ComputeGrowth(amount)
573 # Other disk types do not require VG space
576 def RecordGrow(self, amount):
577 """Update the size of this disk after growth.
579 This method recurses over the disks's children and updates their
580 size correspondigly. The method needs to be kept in sync with the
581 actual algorithms from bdev.
584 if self.dev_type in (constants.LD_LV, constants.LD_FILE):
586 elif self.dev_type == constants.LD_DRBD8:
588 self.children[0].RecordGrow(amount)
591 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
592 " disk type %s" % self.dev_type)
595 """Sets recursively the size to zero for the disk and its children.
599 for child in self.children:
603 def SetPhysicalID(self, target_node, nodes_ip):
604 """Convert the logical ID to the physical ID.
606 This is used only for drbd, which needs ip/port configuration.
608 The routine descends down and updates its children also, because
609 this helps when the only the top device is passed to the remote
613 - target_node: the node we wish to configure for
614 - nodes_ip: a mapping of node name to ip
616 The target_node must exist in in nodes_ip, and must be one of the
617 nodes in the logical ID for each of the DRBD devices encountered
622 for child in self.children:
623 child.SetPhysicalID(target_node, nodes_ip)
625 if self.logical_id is None and self.physical_id is not None:
627 if self.dev_type in constants.LDS_DRBD:
628 pnode, snode, port, pminor, sminor, secret = self.logical_id
629 if target_node not in (pnode, snode):
630 raise errors.ConfigurationError("DRBD device not knowing node %s" %
632 pnode_ip = nodes_ip.get(pnode, None)
633 snode_ip = nodes_ip.get(snode, None)
634 if pnode_ip is None or snode_ip is None:
635 raise errors.ConfigurationError("Can't find primary or secondary node"
636 " for %s" % str(self))
637 p_data = (pnode_ip, port)
638 s_data = (snode_ip, port)
639 if pnode == target_node:
640 self.physical_id = p_data + s_data + (pminor, secret)
641 else: # it must be secondary, we tested above
642 self.physical_id = s_data + p_data + (sminor, secret)
644 self.physical_id = self.logical_id
648 """Disk-specific conversion to standard python types.
650 This replaces the children lists of objects with lists of
651 standard python types.
654 bo = super(Disk, self).ToDict()
656 for attr in ("children",):
657 alist = bo.get(attr, None)
659 bo[attr] = self._ContainerToDicts(alist)
663 def FromDict(cls, val):
664 """Custom function for Disks
667 obj = super(Disk, cls).FromDict(val)
669 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
670 if obj.logical_id and isinstance(obj.logical_id, list):
671 obj.logical_id = tuple(obj.logical_id)
672 if obj.physical_id and isinstance(obj.physical_id, list):
673 obj.physical_id = tuple(obj.physical_id)
674 if obj.dev_type in constants.LDS_DRBD:
675 # we need a tuple of length six here
676 if len(obj.logical_id) < 6:
677 obj.logical_id += (None,) * (6 - len(obj.logical_id))
681 """Custom str() formatter for disks.
684 if self.dev_type == constants.LD_LV:
685 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
686 elif self.dev_type in constants.LDS_DRBD:
687 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
689 if self.physical_id is None:
692 phy = ("configured as %s:%s %s:%s" %
693 (self.physical_id[0], self.physical_id[1],
694 self.physical_id[2], self.physical_id[3]))
696 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
697 (node_a, minor_a, node_b, minor_b, port, phy))
698 if self.children and self.children.count(None) == 0:
699 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
701 val += "no local storage"
703 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
704 (self.dev_type, self.logical_id, self.physical_id, self.children))
705 if self.iv_name is None:
706 val += ", not visible"
708 val += ", visible as /dev/%s" % self.iv_name
709 if isinstance(self.size, int):
710 val += ", size=%dm)>" % self.size
712 val += ", size='%s')>" % (self.size,)
716 """Checks that this disk is correctly configured.
720 if self.mode not in constants.DISK_ACCESS_SET:
721 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
724 def UpgradeConfig(self):
725 """Fill defaults for missing configuration values.
729 for child in self.children:
730 child.UpgradeConfig()
731 # add here config upgrade for this disk
734 class Instance(TaggableObject):
735 """Config object representing an instance."""
750 ] + _TIMESTAMPS + _UUID
752 def _ComputeSecondaryNodes(self):
753 """Compute the list of secondary nodes.
755 This is a simple wrapper over _ComputeAllNodes.
758 all_nodes = set(self._ComputeAllNodes())
759 all_nodes.discard(self.primary_node)
760 return tuple(all_nodes)
762 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
763 "List of secondary nodes")
765 def _ComputeAllNodes(self):
766 """Compute the list of all nodes.
768 Since the data is already there (in the drbd disks), keeping it as
769 a separate normal attribute is redundant and if not properly
770 synchronised can cause problems. Thus it's better to compute it
774 def _Helper(nodes, device):
775 """Recursively computes nodes given a top device."""
776 if device.dev_type in constants.LDS_DRBD:
777 nodea, nodeb = device.logical_id[:2]
781 for child in device.children:
782 _Helper(nodes, child)
785 all_nodes.add(self.primary_node)
786 for device in self.disks:
787 _Helper(all_nodes, device)
788 return tuple(all_nodes)
790 all_nodes = property(_ComputeAllNodes, None, None,
791 "List of all nodes of the instance")
793 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
794 """Provide a mapping of nodes to LVs this instance owns.
796 This function figures out what logical volumes should belong on
797 which nodes, recursing through a device tree.
799 @param lvmap: optional dictionary to receive the
800 'node' : ['lv', ...] data.
802 @return: None if lvmap arg is given, otherwise, a dictionary of
803 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
804 volumeN is of the form "vg_name/lv_name", compatible with
809 node = self.primary_node
817 if not node in lvmap:
825 if dev.dev_type == constants.LD_LV:
826 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
828 elif dev.dev_type in constants.LDS_DRBD:
830 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
831 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
834 self.MapLVsByNode(lvmap, dev.children, node)
838 def FindDisk(self, idx):
839 """Find a disk given having a specified index.
841 This is just a wrapper that does validation of the index.
844 @param idx: the disk index
846 @return: the corresponding disk
847 @raise errors.OpPrereqError: when the given index is not valid
852 return self.disks[idx]
853 except (TypeError, ValueError), err:
854 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
857 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
858 " 0 to %d" % (idx, len(self.disks) - 1),
862 """Instance-specific conversion to standard python types.
864 This replaces the children lists of objects with lists of standard
868 bo = super(Instance, self).ToDict()
870 for attr in "nics", "disks":
871 alist = bo.get(attr, None)
873 nlist = self._ContainerToDicts(alist)
880 def FromDict(cls, val):
881 """Custom function for instances.
884 obj = super(Instance, cls).FromDict(val)
885 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
886 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
889 def UpgradeConfig(self):
890 """Fill defaults for missing configuration values.
893 for nic in self.nics:
895 for disk in self.disks:
898 for key in constants.HVC_GLOBALS:
900 del self.hvparams[key]
903 if self.osparams is None:
907 class OS(ConfigObject):
908 """Config object representing an operating system.
910 @type supported_parameters: list
911 @ivar supported_parameters: a list of tuples, name and description,
912 containing the supported parameters by this OS
914 @type VARIANT_DELIM: string
915 @cvar VARIANT_DELIM: the variant delimiter
927 "supported_variants",
928 "supported_parameters",
934 def SplitNameVariant(cls, name):
935 """Splits the name into the proper name and variant.
937 @param name: the OS (unprocessed) name
939 @return: a list of two elements; if the original name didn't
940 contain a variant, it's returned as an empty string
943 nv = name.split(cls.VARIANT_DELIM, 1)
949 def GetName(cls, name):
950 """Returns the proper name of the os (without the variant).
952 @param name: the OS (unprocessed) name
955 return cls.SplitNameVariant(name)[0]
958 def GetVariant(cls, name):
959 """Returns the variant the os (without the base name).
961 @param name: the OS (unprocessed) name
964 return cls.SplitNameVariant(name)[1]
967 class Node(TaggableObject):
968 """Config object representing a node."""
982 ] + _TIMESTAMPS + _UUID
984 def UpgradeConfig(self):
985 """Fill defaults for missing configuration values.
988 # pylint: disable=E0203
989 # because these are "defined" via slots, not manually
990 if self.master_capable is None:
991 self.master_capable = True
993 if self.vm_capable is None:
994 self.vm_capable = True
996 if self.ndparams is None:
999 if self.powered is None:
1003 class NodeGroup(TaggableObject):
1004 """Config object representing a node group."""
1011 ] + _TIMESTAMPS + _UUID
1014 """Custom function for nodegroup.
1016 This discards the members object, which gets recalculated and is only kept
1020 mydict = super(NodeGroup, self).ToDict()
1021 del mydict["members"]
1025 def FromDict(cls, val):
1026 """Custom function for nodegroup.
1028 The members slot is initialized to an empty list, upon deserialization.
1031 obj = super(NodeGroup, cls).FromDict(val)
1035 def UpgradeConfig(self):
1036 """Fill defaults for missing configuration values.
1039 if self.ndparams is None:
1042 if self.serial_no is None:
1045 if self.alloc_policy is None:
1046 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1048 # We only update mtime, and not ctime, since we would not be able to provide
1049 # a correct value for creation time.
1050 if self.mtime is None:
1051 self.mtime = time.time()
1053 def FillND(self, node):
1054 """Return filled out ndparams for L{objects.Node}
1056 @type node: L{objects.Node}
1057 @param node: A Node object to fill
1058 @return a copy of the node's ndparams with defaults filled
1061 return self.SimpleFillND(node.ndparams)
1063 def SimpleFillND(self, ndparams):
1064 """Fill a given ndparams dict with defaults.
1066 @type ndparams: dict
1067 @param ndparams: the dict to fill
1069 @return: a copy of the passed in ndparams with missing keys filled
1070 from the node group defaults
1073 return FillDict(self.ndparams, ndparams)
1076 class Cluster(TaggableObject):
1077 """Config object representing the cluster."""
1081 "highest_used_port",
1084 "volume_group_name",
1086 "drbd_usermode_helper",
1088 "default_hypervisor",
1095 "shared_file_storage_dir",
1096 "enabled_hypervisors",
1103 "candidate_pool_size",
1106 "maintain_node_health",
1108 "default_iallocator",
1111 "primary_ip_family",
1112 "prealloc_wipe_disks",
1113 ] + _TIMESTAMPS + _UUID
1115 def UpgradeConfig(self):
1116 """Fill defaults for missing configuration values.
1119 # pylint: disable=E0203
1120 # because these are "defined" via slots, not manually
1121 if self.hvparams is None:
1122 self.hvparams = constants.HVC_DEFAULTS
1124 for hypervisor in self.hvparams:
1125 self.hvparams[hypervisor] = FillDict(
1126 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1128 if self.os_hvp is None:
1131 # osparams added before 2.2
1132 if self.osparams is None:
1135 if self.ndparams is None:
1136 self.ndparams = constants.NDC_DEFAULTS
1138 self.beparams = UpgradeGroupedParams(self.beparams,
1139 constants.BEC_DEFAULTS)
1140 migrate_default_bridge = not self.nicparams
1141 self.nicparams = UpgradeGroupedParams(self.nicparams,
1142 constants.NICC_DEFAULTS)
1143 if migrate_default_bridge:
1144 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1147 if self.modify_etc_hosts is None:
1148 self.modify_etc_hosts = True
1150 if self.modify_ssh_setup is None:
1151 self.modify_ssh_setup = True
1153 # default_bridge is no longer used in 2.1. The slot is left there to
1154 # support auto-upgrading. It can be removed once we decide to deprecate
1155 # upgrading straight from 2.0.
1156 if self.default_bridge is not None:
1157 self.default_bridge = None
1159 # default_hypervisor is just the first enabled one in 2.1. This slot and
1160 # code can be removed once upgrading straight from 2.0 is deprecated.
1161 if self.default_hypervisor is not None:
1162 self.enabled_hypervisors = ([self.default_hypervisor] +
1163 [hvname for hvname in self.enabled_hypervisors
1164 if hvname != self.default_hypervisor])
1165 self.default_hypervisor = None
1167 # maintain_node_health added after 2.1.1
1168 if self.maintain_node_health is None:
1169 self.maintain_node_health = False
1171 if self.uid_pool is None:
1174 if self.default_iallocator is None:
1175 self.default_iallocator = ""
1177 # reserved_lvs added before 2.2
1178 if self.reserved_lvs is None:
1179 self.reserved_lvs = []
1181 # hidden and blacklisted operating systems added before 2.2.1
1182 if self.hidden_os is None:
1185 if self.blacklisted_os is None:
1186 self.blacklisted_os = []
1188 # primary_ip_family added before 2.3
1189 if self.primary_ip_family is None:
1190 self.primary_ip_family = AF_INET
1192 if self.prealloc_wipe_disks is None:
1193 self.prealloc_wipe_disks = False
1195 # shared_file_storage_dir added before 2.5
1196 if self.shared_file_storage_dir is None:
1197 self.shared_file_storage_dir = ""
1200 """Custom function for cluster.
1203 mydict = super(Cluster, self).ToDict()
1204 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1208 def FromDict(cls, val):
1209 """Custom function for cluster.
1212 obj = super(Cluster, cls).FromDict(val)
1213 if not isinstance(obj.tcpudp_port_pool, set):
1214 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1217 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1218 """Get the default hypervisor parameters for the cluster.
1220 @param hypervisor: the hypervisor name
1221 @param os_name: if specified, we'll also update the defaults for this OS
1222 @param skip_keys: if passed, list of keys not to use
1223 @return: the defaults dict
1226 if skip_keys is None:
1229 fill_stack = [self.hvparams.get(hypervisor, {})]
1230 if os_name is not None:
1231 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1232 fill_stack.append(os_hvp)
1235 for o_dict in fill_stack:
1236 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1240 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1241 """Fill a given hvparams dict with cluster defaults.
1243 @type hv_name: string
1244 @param hv_name: the hypervisor to use
1245 @type os_name: string
1246 @param os_name: the OS to use for overriding the hypervisor defaults
1247 @type skip_globals: boolean
1248 @param skip_globals: if True, the global hypervisor parameters will
1251 @return: a copy of the given hvparams with missing keys filled from
1252 the cluster defaults
1256 skip_keys = constants.HVC_GLOBALS
1260 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1261 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1263 def FillHV(self, instance, skip_globals=False):
1264 """Fill an instance's hvparams dict with cluster defaults.
1266 @type instance: L{objects.Instance}
1267 @param instance: the instance parameter to fill
1268 @type skip_globals: boolean
1269 @param skip_globals: if True, the global hypervisor parameters will
1272 @return: a copy of the instance's hvparams with missing keys filled from
1273 the cluster defaults
1276 return self.SimpleFillHV(instance.hypervisor, instance.os,
1277 instance.hvparams, skip_globals)
1279 def SimpleFillBE(self, beparams):
1280 """Fill a given beparams dict with cluster defaults.
1282 @type beparams: dict
1283 @param beparams: the dict to fill
1285 @return: a copy of the passed in beparams with missing keys filled
1286 from the cluster defaults
1289 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1291 def FillBE(self, instance):
1292 """Fill an instance's beparams dict with cluster defaults.
1294 @type instance: L{objects.Instance}
1295 @param instance: the instance parameter to fill
1297 @return: a copy of the instance's beparams with missing keys filled from
1298 the cluster defaults
1301 return self.SimpleFillBE(instance.beparams)
1303 def SimpleFillNIC(self, nicparams):
1304 """Fill a given nicparams dict with cluster defaults.
1306 @type nicparams: dict
1307 @param nicparams: the dict to fill
1309 @return: a copy of the passed in nicparams with missing keys filled
1310 from the cluster defaults
1313 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1315 def SimpleFillOS(self, os_name, os_params):
1316 """Fill an instance's osparams dict with cluster defaults.
1318 @type os_name: string
1319 @param os_name: the OS name to use
1320 @type os_params: dict
1321 @param os_params: the dict to fill with default values
1323 @return: a copy of the instance's osparams with missing keys filled from
1324 the cluster defaults
1327 name_only = os_name.split("+", 1)[0]
1329 result = self.osparams.get(name_only, {})
1331 result = FillDict(result, self.osparams.get(os_name, {}))
1333 return FillDict(result, os_params)
1335 def FillND(self, node, nodegroup):
1336 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1338 @type node: L{objects.Node}
1339 @param node: A Node object to fill
1340 @type nodegroup: L{objects.NodeGroup}
1341 @param nodegroup: A Node object to fill
1342 @return a copy of the node's ndparams with defaults filled
1345 return self.SimpleFillND(nodegroup.FillND(node))
1347 def SimpleFillND(self, ndparams):
1348 """Fill a given ndparams dict with defaults.
1350 @type ndparams: dict
1351 @param ndparams: the dict to fill
1353 @return: a copy of the passed in ndparams with missing keys filled
1354 from the cluster defaults
1357 return FillDict(self.ndparams, ndparams)
1360 class BlockDevStatus(ConfigObject):
1361 """Config object representing the status of a block device."""
1373 class ImportExportStatus(ConfigObject):
1374 """Config object representing the status of an import or export."""
1380 "progress_throughput",
1388 class ImportExportOptions(ConfigObject):
1389 """Options for import/export daemon
1391 @ivar key_name: X509 key name (None for cluster certificate)
1392 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1393 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1394 @ivar magic: Used to ensure the connection goes to the right disk
1395 @ivar ipv6: Whether to use IPv6
1396 @ivar connect_timeout: Number of seconds for establishing connection
1409 class ConfdRequest(ConfigObject):
1410 """Object holding a confd request.
1412 @ivar protocol: confd protocol version
1413 @ivar type: confd query type
1414 @ivar query: query request
1415 @ivar rsalt: requested reply salt
1426 class ConfdReply(ConfigObject):
1427 """Object holding a confd reply.
1429 @ivar protocol: confd protocol version
1430 @ivar status: reply status code (ok, error)
1431 @ivar answer: confd query reply
1432 @ivar serial: configuration serial number
1443 class QueryFieldDefinition(ConfigObject):
1444 """Object holding a query field definition.
1446 @ivar name: Field name
1447 @ivar title: Human-readable title
1448 @ivar kind: Field type
1449 @ivar doc: Human-readable description
1460 class _QueryResponseBase(ConfigObject):
1466 """Custom function for serializing.
1469 mydict = super(_QueryResponseBase, self).ToDict()
1470 mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1474 def FromDict(cls, val):
1475 """Custom function for de-serializing.
1478 obj = super(_QueryResponseBase, cls).FromDict(val)
1479 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1483 class QueryRequest(ConfigObject):
1484 """Object holding a query request.
1494 class QueryResponse(_QueryResponseBase):
1495 """Object holding the response to a query.
1497 @ivar fields: List of L{QueryFieldDefinition} objects
1498 @ivar data: Requested data
1506 class QueryFieldsRequest(ConfigObject):
1507 """Object holding a request for querying available fields.
1516 class QueryFieldsResponse(_QueryResponseBase):
1517 """Object holding the response to a query for fields.
1519 @ivar fields: List of L{QueryFieldDefinition} objects
1526 class MigrationStatus(ConfigObject):
1527 """Object holding the status of a migration.
1537 class InstanceConsole(ConfigObject):
1538 """Object describing how to access the console of an instance.
1553 """Validates contents of this object.
1556 assert self.kind in constants.CONS_ALL, "Unknown console type"
1557 assert self.instance, "Missing instance name"
1558 assert self.message or self.kind in [constants.CONS_SSH,
1559 constants.CONS_SPICE,
1561 assert self.host or self.kind == constants.CONS_MESSAGE
1562 assert self.port or self.kind in [constants.CONS_MESSAGE,
1564 assert self.user or self.kind in [constants.CONS_MESSAGE,
1565 constants.CONS_SPICE,
1567 assert self.command or self.kind in [constants.CONS_MESSAGE,
1568 constants.CONS_SPICE,
1570 assert self.display or self.kind in [constants.CONS_MESSAGE,
1571 constants.CONS_SPICE,
1576 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1577 """Simple wrapper over ConfigParse that allows serialization.
1579 This class is basically ConfigParser.SafeConfigParser with two
1580 additional methods that allow it to serialize/unserialize to/from a
1585 """Dump this instance and return the string representation."""
1588 return buf.getvalue()
1591 def Loads(cls, data):
1592 """Load data from a string."""
1593 buf = StringIO(data)