4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable-msg=E0203,W0201
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitely initialise its members
34 # W0201: Attribute '%s' defined outside __init__
40 from cStringIO import StringIO
42 from ganeti import errors
43 from ganeti import constants
45 from socket import AF_INET
48 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
49 "OS", "Node", "NodeGroup", "Cluster", "FillDict"]
51 _TIMESTAMPS = ["ctime", "mtime"]
55 def FillDict(defaults_dict, custom_dict, skip_keys=None):
56 """Basic function to apply settings on top a default dict.
58 @type defaults_dict: dict
59 @param defaults_dict: dictionary holding the default values
60 @type custom_dict: dict
61 @param custom_dict: dictionary holding customized value
63 @param skip_keys: which keys not to fill
65 @return: dict with the 'full' values
68 ret_dict = copy.deepcopy(defaults_dict)
69 ret_dict.update(custom_dict)
79 def UpgradeGroupedParams(target, defaults):
80 """Update all groups for the target parameter.
82 @type target: dict of dicts
83 @param target: {group: {parameter: value}}
85 @param defaults: default parameter values
89 target = {constants.PP_DEFAULT: defaults}
92 target[group] = FillDict(defaults, target[group])
96 class ConfigObject(object):
97 """A generic config object.
99 It has the following properties:
101 - provides somewhat safe recursive unpickling and pickling for its classes
102 - unset attributes which are defined in slots are always returned
103 as None instead of raising an error
105 Classes derived from this must always declare __slots__ (we use many
106 config objects and the memory reduction is useful)
111 def __init__(self, **kwargs):
112 for k, v in kwargs.iteritems():
115 def __getattr__(self, name):
116 if name not in self._all_slots():
117 raise AttributeError("Invalid object attribute %s.%s" %
118 (type(self).__name__, name))
121 def __setstate__(self, state):
122 slots = self._all_slots()
125 setattr(self, name, state[name])
129 """Compute the list of all declared slots for a class.
133 for parent in cls.__mro__:
134 slots.extend(getattr(parent, "__slots__", []))
138 """Convert to a dict holding only standard python types.
140 The generic routine just dumps all of this object's attributes in
141 a dict. It does not work if the class has children who are
142 ConfigObjects themselves (e.g. the nics list in an Instance), in
143 which case the object should subclass the function in order to
144 make sure all objects returned are only standard python types.
148 for name in self._all_slots():
149 value = getattr(self, name, None)
150 if value is not None:
154 __getstate__ = ToDict
157 def FromDict(cls, val):
158 """Create an object from a dictionary.
160 This generic routine takes a dict, instantiates a new instance of
161 the given class, and sets attributes based on the dict content.
163 As for `ToDict`, this does not work if the class has children
164 who are ConfigObjects themselves (e.g. the nics list in an
165 Instance), in which case the object should subclass the function
166 and alter the objects.
169 if not isinstance(val, dict):
170 raise errors.ConfigurationError("Invalid object passed to FromDict:"
171 " expected dict, got %s" % type(val))
172 val_str = dict([(str(k), v) for k, v in val.iteritems()])
173 obj = cls(**val_str) # pylint: disable-msg=W0142
177 def _ContainerToDicts(container):
178 """Convert the elements of a container to standard python types.
180 This method converts a container with elements derived from
181 ConfigData to standard python types. If the container is a dict,
182 we don't touch the keys, only the values.
185 if isinstance(container, dict):
186 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
187 elif isinstance(container, (list, tuple, set, frozenset)):
188 ret = [elem.ToDict() for elem in container]
190 raise TypeError("Invalid type %s passed to _ContainerToDicts" %
195 def _ContainerFromDicts(source, c_type, e_type):
196 """Convert a container from standard python types.
198 This method converts a container with standard python types to
199 ConfigData objects. If the container is a dict, we don't touch the
200 keys, only the values.
203 if not isinstance(c_type, type):
204 raise TypeError("Container type %s passed to _ContainerFromDicts is"
205 " not a type" % type(c_type))
209 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
210 elif c_type in (list, tuple, set, frozenset):
211 ret = c_type([e_type.FromDict(elem) for elem in source])
213 raise TypeError("Invalid container type %s passed to"
214 " _ContainerFromDicts" % c_type)
218 """Makes a deep copy of the current object and its children.
221 dict_form = self.ToDict()
222 clone_obj = self.__class__.FromDict(dict_form)
226 """Implement __repr__ for ConfigObjects."""
227 return repr(self.ToDict())
229 def UpgradeConfig(self):
230 """Fill defaults for missing configuration values.
232 This method will be called at configuration load time, and its
233 implementation will be object dependent.
239 class TaggableObject(ConfigObject):
240 """An generic class supporting tags.
244 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
247 def ValidateTag(cls, tag):
248 """Check if a tag is valid.
250 If the tag is invalid, an errors.TagError will be raised. The
251 function has no return value.
254 if not isinstance(tag, basestring):
255 raise errors.TagError("Invalid tag type (not a string)")
256 if len(tag) > constants.MAX_TAG_LEN:
257 raise errors.TagError("Tag too long (>%d characters)" %
258 constants.MAX_TAG_LEN)
260 raise errors.TagError("Tags cannot be empty")
261 if not cls.VALID_TAG_RE.match(tag):
262 raise errors.TagError("Tag contains invalid characters")
265 """Return the tags list.
268 tags = getattr(self, "tags", None)
270 tags = self.tags = set()
273 def AddTag(self, tag):
277 self.ValidateTag(tag)
278 tags = self.GetTags()
279 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
280 raise errors.TagError("Too many tags")
281 self.GetTags().add(tag)
283 def RemoveTag(self, tag):
287 self.ValidateTag(tag)
288 tags = self.GetTags()
292 raise errors.TagError("Tag not found")
295 """Taggable-object-specific conversion to standard python types.
297 This replaces the tags set with a list.
300 bo = super(TaggableObject, self).ToDict()
302 tags = bo.get("tags", None)
303 if isinstance(tags, set):
304 bo["tags"] = list(tags)
308 def FromDict(cls, val):
309 """Custom function for instances.
312 obj = super(TaggableObject, cls).FromDict(val)
313 if hasattr(obj, "tags") and isinstance(obj.tags, list):
314 obj.tags = set(obj.tags)
318 class ConfigData(ConfigObject):
319 """Top-level config object."""
330 """Custom function for top-level config data.
332 This just replaces the list of instances, nodes and the cluster
333 with standard python types.
336 mydict = super(ConfigData, self).ToDict()
337 mydict["cluster"] = mydict["cluster"].ToDict()
338 for key in "nodes", "instances", "nodegroups":
339 mydict[key] = self._ContainerToDicts(mydict[key])
344 def FromDict(cls, val):
345 """Custom function for top-level config data
348 obj = super(ConfigData, cls).FromDict(val)
349 obj.cluster = Cluster.FromDict(obj.cluster)
350 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
351 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
352 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
355 def HasAnyDiskOfType(self, dev_type):
356 """Check if in there is at disk of the given type in the configuration.
358 @type dev_type: L{constants.LDS_BLOCK}
359 @param dev_type: the type to look for
361 @return: boolean indicating if a disk of the given type was found or not
364 for instance in self.instances.values():
365 for disk in instance.disks:
366 if disk.IsBasedOnDiskType(dev_type):
370 def UpgradeConfig(self):
371 """Fill defaults for missing configuration values.
374 self.cluster.UpgradeConfig()
375 for node in self.nodes.values():
377 for instance in self.instances.values():
378 instance.UpgradeConfig()
379 if self.nodegroups is None:
381 for nodegroup in self.nodegroups.values():
382 nodegroup.UpgradeConfig()
383 if self.cluster.drbd_usermode_helper is None:
384 # To decide if we set an helper let's check if at least one instance has
385 # a DRBD disk. This does not cover all the possible scenarios but it
386 # gives a good approximation.
387 if self.HasAnyDiskOfType(constants.LD_DRBD8):
388 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
391 class NIC(ConfigObject):
392 """Config object representing a network card."""
393 __slots__ = ["mac", "ip", "nicparams"]
396 def CheckParameterSyntax(cls, nicparams):
397 """Check the given parameters for validity.
399 @type nicparams: dict
400 @param nicparams: dictionary with parameter names/value
401 @raise errors.ConfigurationError: when a parameter is not valid
404 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
405 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
406 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
407 raise errors.ConfigurationError(err)
409 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and
410 not nicparams[constants.NIC_LINK]):
411 err = "Missing bridged nic link"
412 raise errors.ConfigurationError(err)
415 class Disk(ConfigObject):
416 """Config object representing a block device."""
417 __slots__ = ["dev_type", "logical_id", "physical_id",
418 "children", "iv_name", "size", "mode"]
420 def CreateOnSecondary(self):
421 """Test if this device needs to be created on a secondary node."""
422 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
424 def AssembleOnSecondary(self):
425 """Test if this device needs to be assembled on a secondary node."""
426 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
428 def OpenOnSecondary(self):
429 """Test if this device needs to be opened on a secondary node."""
430 return self.dev_type in (constants.LD_LV,)
432 def StaticDevPath(self):
433 """Return the device path if this device type has a static one.
435 Some devices (LVM for example) live always at the same /dev/ path,
436 irrespective of their status. For such devices, we return this
437 path, for others we return None.
439 @warning: The path returned is not a normalized pathname; callers
440 should check that it is a valid path.
443 if self.dev_type == constants.LD_LV:
444 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
445 elif self.dev_type == constants.LD_BLOCKDEV:
446 return self.logical_id[1]
449 def ChildrenNeeded(self):
450 """Compute the needed number of children for activation.
452 This method will return either -1 (all children) or a positive
453 number denoting the minimum number of children needed for
454 activation (only mirrored devices will usually return >=0).
456 Currently, only DRBD8 supports diskless activation (therefore we
457 return 0), for all other we keep the previous semantics and return
461 if self.dev_type == constants.LD_DRBD8:
465 def IsBasedOnDiskType(self, dev_type):
466 """Check if the disk or its children are based on the given type.
468 @type dev_type: L{constants.LDS_BLOCK}
469 @param dev_type: the type to look for
471 @return: boolean indicating if a device of the given type was found or not
475 for child in self.children:
476 if child.IsBasedOnDiskType(dev_type):
478 return self.dev_type == dev_type
480 def GetNodes(self, node):
481 """This function returns the nodes this device lives on.
483 Given the node on which the parent of the device lives on (or, in
484 case of a top-level device, the primary node of the devices'
485 instance), this function will return a list of nodes on which this
486 devices needs to (or can) be assembled.
489 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
490 constants.LD_BLOCKDEV]:
492 elif self.dev_type in constants.LDS_DRBD:
493 result = [self.logical_id[0], self.logical_id[1]]
494 if node not in result:
495 raise errors.ConfigurationError("DRBD device passed unknown node")
497 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
500 def ComputeNodeTree(self, parent_node):
501 """Compute the node/disk tree for this disk and its children.
503 This method, given the node on which the parent disk lives, will
504 return the list of all (node, disk) pairs which describe the disk
505 tree in the most compact way. For example, a drbd/lvm stack
506 will be returned as (primary_node, drbd) and (secondary_node, drbd)
507 which represents all the top-level devices on the nodes.
510 my_nodes = self.GetNodes(parent_node)
511 result = [(node, self) for node in my_nodes]
512 if not self.children:
515 for node in my_nodes:
516 for child in self.children:
517 child_result = child.ComputeNodeTree(node)
518 if len(child_result) == 1:
519 # child (and all its descendants) is simple, doesn't split
520 # over multiple hosts, so we don't need to describe it, our
521 # own entry for this node describes it completely
524 # check if child nodes differ from my nodes; note that
525 # subdisk can differ from the child itself, and be instead
526 # one of its descendants
527 for subnode, subdisk in child_result:
528 if subnode not in my_nodes:
529 result.append((subnode, subdisk))
530 # otherwise child is under our own node, so we ignore this
531 # entry (but probably the other results in the list will
535 def ComputeGrowth(self, amount):
536 """Compute the per-VG growth requirements.
538 This only works for VG-based disks.
540 @type amount: integer
541 @param amount: the desired increase in (user-visible) disk space
543 @return: a dictionary of volume-groups and the required size
546 if self.dev_type == constants.LD_LV:
547 return {self.logical_id[0]: amount}
548 elif self.dev_type == constants.LD_DRBD8:
550 return self.children[0].ComputeGrowth(amount)
554 # Other disk types do not require VG space
557 def RecordGrow(self, amount):
558 """Update the size of this disk after growth.
560 This method recurses over the disks's children and updates their
561 size correspondigly. The method needs to be kept in sync with the
562 actual algorithms from bdev.
565 if self.dev_type in (constants.LD_LV, constants.LD_FILE):
567 elif self.dev_type == constants.LD_DRBD8:
569 self.children[0].RecordGrow(amount)
572 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
573 " disk type %s" % self.dev_type)
576 """Sets recursively the size to zero for the disk and its children.
580 for child in self.children:
584 def SetPhysicalID(self, target_node, nodes_ip):
585 """Convert the logical ID to the physical ID.
587 This is used only for drbd, which needs ip/port configuration.
589 The routine descends down and updates its children also, because
590 this helps when the only the top device is passed to the remote
594 - target_node: the node we wish to configure for
595 - nodes_ip: a mapping of node name to ip
597 The target_node must exist in in nodes_ip, and must be one of the
598 nodes in the logical ID for each of the DRBD devices encountered
603 for child in self.children:
604 child.SetPhysicalID(target_node, nodes_ip)
606 if self.logical_id is None and self.physical_id is not None:
608 if self.dev_type in constants.LDS_DRBD:
609 pnode, snode, port, pminor, sminor, secret = self.logical_id
610 if target_node not in (pnode, snode):
611 raise errors.ConfigurationError("DRBD device not knowing node %s" %
613 pnode_ip = nodes_ip.get(pnode, None)
614 snode_ip = nodes_ip.get(snode, None)
615 if pnode_ip is None or snode_ip is None:
616 raise errors.ConfigurationError("Can't find primary or secondary node"
617 " for %s" % str(self))
618 p_data = (pnode_ip, port)
619 s_data = (snode_ip, port)
620 if pnode == target_node:
621 self.physical_id = p_data + s_data + (pminor, secret)
622 else: # it must be secondary, we tested above
623 self.physical_id = s_data + p_data + (sminor, secret)
625 self.physical_id = self.logical_id
629 """Disk-specific conversion to standard python types.
631 This replaces the children lists of objects with lists of
632 standard python types.
635 bo = super(Disk, self).ToDict()
637 for attr in ("children",):
638 alist = bo.get(attr, None)
640 bo[attr] = self._ContainerToDicts(alist)
644 def FromDict(cls, val):
645 """Custom function for Disks
648 obj = super(Disk, cls).FromDict(val)
650 obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
651 if obj.logical_id and isinstance(obj.logical_id, list):
652 obj.logical_id = tuple(obj.logical_id)
653 if obj.physical_id and isinstance(obj.physical_id, list):
654 obj.physical_id = tuple(obj.physical_id)
655 if obj.dev_type in constants.LDS_DRBD:
656 # we need a tuple of length six here
657 if len(obj.logical_id) < 6:
658 obj.logical_id += (None,) * (6 - len(obj.logical_id))
662 """Custom str() formatter for disks.
665 if self.dev_type == constants.LD_LV:
666 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
667 elif self.dev_type in constants.LDS_DRBD:
668 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
670 if self.physical_id is None:
673 phy = ("configured as %s:%s %s:%s" %
674 (self.physical_id[0], self.physical_id[1],
675 self.physical_id[2], self.physical_id[3]))
677 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
678 (node_a, minor_a, node_b, minor_b, port, phy))
679 if self.children and self.children.count(None) == 0:
680 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
682 val += "no local storage"
684 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
685 (self.dev_type, self.logical_id, self.physical_id, self.children))
686 if self.iv_name is None:
687 val += ", not visible"
689 val += ", visible as /dev/%s" % self.iv_name
690 if isinstance(self.size, int):
691 val += ", size=%dm)>" % self.size
693 val += ", size='%s')>" % (self.size,)
697 """Checks that this disk is correctly configured.
701 if self.mode not in constants.DISK_ACCESS_SET:
702 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
705 def UpgradeConfig(self):
706 """Fill defaults for missing configuration values.
710 for child in self.children:
711 child.UpgradeConfig()
712 # add here config upgrade for this disk
715 class Instance(TaggableObject):
716 """Config object representing an instance."""
731 ] + _TIMESTAMPS + _UUID
733 def _ComputeSecondaryNodes(self):
734 """Compute the list of secondary nodes.
736 This is a simple wrapper over _ComputeAllNodes.
739 all_nodes = set(self._ComputeAllNodes())
740 all_nodes.discard(self.primary_node)
741 return tuple(all_nodes)
743 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
744 "List of secondary nodes")
746 def _ComputeAllNodes(self):
747 """Compute the list of all nodes.
749 Since the data is already there (in the drbd disks), keeping it as
750 a separate normal attribute is redundant and if not properly
751 synchronised can cause problems. Thus it's better to compute it
755 def _Helper(nodes, device):
756 """Recursively computes nodes given a top device."""
757 if device.dev_type in constants.LDS_DRBD:
758 nodea, nodeb = device.logical_id[:2]
762 for child in device.children:
763 _Helper(nodes, child)
766 all_nodes.add(self.primary_node)
767 for device in self.disks:
768 _Helper(all_nodes, device)
769 return tuple(all_nodes)
771 all_nodes = property(_ComputeAllNodes, None, None,
772 "List of all nodes of the instance")
774 def MapLVsByNode(self, lvmap=None, devs=None, node=None):
775 """Provide a mapping of nodes to LVs this instance owns.
777 This function figures out what logical volumes should belong on
778 which nodes, recursing through a device tree.
780 @param lvmap: optional dictionary to receive the
781 'node' : ['lv', ...] data.
783 @return: None if lvmap arg is given, otherwise, a dictionary of
784 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
785 volumeN is of the form "vg_name/lv_name", compatible with
790 node = self.primary_node
798 if not node in lvmap:
806 if dev.dev_type == constants.LD_LV:
807 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
809 elif dev.dev_type in constants.LDS_DRBD:
811 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
812 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
815 self.MapLVsByNode(lvmap, dev.children, node)
819 def FindDisk(self, idx):
820 """Find a disk given having a specified index.
822 This is just a wrapper that does validation of the index.
825 @param idx: the disk index
827 @return: the corresponding disk
828 @raise errors.OpPrereqError: when the given index is not valid
833 return self.disks[idx]
834 except (TypeError, ValueError), err:
835 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
838 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
839 " 0 to %d" % (idx, len(self.disks) - 1),
843 """Instance-specific conversion to standard python types.
845 This replaces the children lists of objects with lists of standard
849 bo = super(Instance, self).ToDict()
851 for attr in "nics", "disks":
852 alist = bo.get(attr, None)
854 nlist = self._ContainerToDicts(alist)
861 def FromDict(cls, val):
862 """Custom function for instances.
865 obj = super(Instance, cls).FromDict(val)
866 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
867 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
870 def UpgradeConfig(self):
871 """Fill defaults for missing configuration values.
874 for nic in self.nics:
876 for disk in self.disks:
879 for key in constants.HVC_GLOBALS:
881 del self.hvparams[key]
884 if self.osparams is None:
888 class OS(ConfigObject):
889 """Config object representing an operating system.
891 @type supported_parameters: list
892 @ivar supported_parameters: a list of tuples, name and description,
893 containing the supported parameters by this OS
895 @type VARIANT_DELIM: string
896 @cvar VARIANT_DELIM: the variant delimiter
908 "supported_variants",
909 "supported_parameters",
915 def SplitNameVariant(cls, name):
916 """Splits the name into the proper name and variant.
918 @param name: the OS (unprocessed) name
920 @return: a list of two elements; if the original name didn't
921 contain a variant, it's returned as an empty string
924 nv = name.split(cls.VARIANT_DELIM, 1)
930 def GetName(cls, name):
931 """Returns the proper name of the os (without the variant).
933 @param name: the OS (unprocessed) name
936 return cls.SplitNameVariant(name)[0]
939 def GetVariant(cls, name):
940 """Returns the variant the os (without the base name).
942 @param name: the OS (unprocessed) name
945 return cls.SplitNameVariant(name)[1]
948 class Node(TaggableObject):
949 """Config object representing a node."""
963 ] + _TIMESTAMPS + _UUID
965 def UpgradeConfig(self):
966 """Fill defaults for missing configuration values.
969 # pylint: disable-msg=E0203
970 # because these are "defined" via slots, not manually
971 if self.master_capable is None:
972 self.master_capable = True
974 if self.vm_capable is None:
975 self.vm_capable = True
977 if self.ndparams is None:
980 if self.powered is None:
984 class NodeGroup(TaggableObject):
985 """Config object representing a node group."""
992 ] + _TIMESTAMPS + _UUID
995 """Custom function for nodegroup.
997 This discards the members object, which gets recalculated and is only kept
1001 mydict = super(NodeGroup, self).ToDict()
1002 del mydict["members"]
1006 def FromDict(cls, val):
1007 """Custom function for nodegroup.
1009 The members slot is initialized to an empty list, upon deserialization.
1012 obj = super(NodeGroup, cls).FromDict(val)
1016 def UpgradeConfig(self):
1017 """Fill defaults for missing configuration values.
1020 if self.ndparams is None:
1023 if self.serial_no is None:
1026 if self.alloc_policy is None:
1027 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1029 # We only update mtime, and not ctime, since we would not be able to provide
1030 # a correct value for creation time.
1031 if self.mtime is None:
1032 self.mtime = time.time()
1034 def FillND(self, node):
1035 """Return filled out ndparams for L{objects.Node}
1037 @type node: L{objects.Node}
1038 @param node: A Node object to fill
1039 @return a copy of the node's ndparams with defaults filled
1042 return self.SimpleFillND(node.ndparams)
1044 def SimpleFillND(self, ndparams):
1045 """Fill a given ndparams dict with defaults.
1047 @type ndparams: dict
1048 @param ndparams: the dict to fill
1050 @return: a copy of the passed in ndparams with missing keys filled
1051 from the node group defaults
1054 return FillDict(self.ndparams, ndparams)
1057 class Cluster(TaggableObject):
1058 """Config object representing the cluster."""
1062 "highest_used_port",
1065 "volume_group_name",
1067 "drbd_usermode_helper",
1069 "default_hypervisor",
1075 "shared_file_storage_dir",
1076 "enabled_hypervisors",
1083 "candidate_pool_size",
1086 "maintain_node_health",
1088 "default_iallocator",
1091 "primary_ip_family",
1092 "prealloc_wipe_disks",
1093 ] + _TIMESTAMPS + _UUID
1095 def UpgradeConfig(self):
1096 """Fill defaults for missing configuration values.
1099 # pylint: disable-msg=E0203
1100 # because these are "defined" via slots, not manually
1101 if self.hvparams is None:
1102 self.hvparams = constants.HVC_DEFAULTS
1104 for hypervisor in self.hvparams:
1105 self.hvparams[hypervisor] = FillDict(
1106 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1108 if self.os_hvp is None:
1111 # osparams added before 2.2
1112 if self.osparams is None:
1115 if self.ndparams is None:
1116 self.ndparams = constants.NDC_DEFAULTS
1118 self.beparams = UpgradeGroupedParams(self.beparams,
1119 constants.BEC_DEFAULTS)
1120 migrate_default_bridge = not self.nicparams
1121 self.nicparams = UpgradeGroupedParams(self.nicparams,
1122 constants.NICC_DEFAULTS)
1123 if migrate_default_bridge:
1124 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1127 if self.modify_etc_hosts is None:
1128 self.modify_etc_hosts = True
1130 if self.modify_ssh_setup is None:
1131 self.modify_ssh_setup = True
1133 # default_bridge is no longer used in 2.1. The slot is left there to
1134 # support auto-upgrading. It can be removed once we decide to deprecate
1135 # upgrading straight from 2.0.
1136 if self.default_bridge is not None:
1137 self.default_bridge = None
1139 # default_hypervisor is just the first enabled one in 2.1. This slot and
1140 # code can be removed once upgrading straight from 2.0 is deprecated.
1141 if self.default_hypervisor is not None:
1142 self.enabled_hypervisors = ([self.default_hypervisor] +
1143 [hvname for hvname in self.enabled_hypervisors
1144 if hvname != self.default_hypervisor])
1145 self.default_hypervisor = None
1147 # maintain_node_health added after 2.1.1
1148 if self.maintain_node_health is None:
1149 self.maintain_node_health = False
1151 if self.uid_pool is None:
1154 if self.default_iallocator is None:
1155 self.default_iallocator = ""
1157 # reserved_lvs added before 2.2
1158 if self.reserved_lvs is None:
1159 self.reserved_lvs = []
1161 # hidden and blacklisted operating systems added before 2.2.1
1162 if self.hidden_os is None:
1165 if self.blacklisted_os is None:
1166 self.blacklisted_os = []
1168 # primary_ip_family added before 2.3
1169 if self.primary_ip_family is None:
1170 self.primary_ip_family = AF_INET
1172 if self.prealloc_wipe_disks is None:
1173 self.prealloc_wipe_disks = False
1175 # shared_file_storage_dir added before 2.5
1176 if self.shared_file_storage_dir is None:
1177 self.shared_file_storage_dir = ""
1180 """Custom function for cluster.
1183 mydict = super(Cluster, self).ToDict()
1184 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
1188 def FromDict(cls, val):
1189 """Custom function for cluster.
1192 obj = super(Cluster, cls).FromDict(val)
1193 if not isinstance(obj.tcpudp_port_pool, set):
1194 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1197 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1198 """Get the default hypervisor parameters for the cluster.
1200 @param hypervisor: the hypervisor name
1201 @param os_name: if specified, we'll also update the defaults for this OS
1202 @param skip_keys: if passed, list of keys not to use
1203 @return: the defaults dict
1206 if skip_keys is None:
1209 fill_stack = [self.hvparams.get(hypervisor, {})]
1210 if os_name is not None:
1211 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1212 fill_stack.append(os_hvp)
1215 for o_dict in fill_stack:
1216 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1220 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1221 """Fill a given hvparams dict with cluster defaults.
1223 @type hv_name: string
1224 @param hv_name: the hypervisor to use
1225 @type os_name: string
1226 @param os_name: the OS to use for overriding the hypervisor defaults
1227 @type skip_globals: boolean
1228 @param skip_globals: if True, the global hypervisor parameters will
1231 @return: a copy of the given hvparams with missing keys filled from
1232 the cluster defaults
1236 skip_keys = constants.HVC_GLOBALS
1240 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1241 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1243 def FillHV(self, instance, skip_globals=False):
1244 """Fill an instance's hvparams dict with cluster defaults.
1246 @type instance: L{objects.Instance}
1247 @param instance: the instance parameter to fill
1248 @type skip_globals: boolean
1249 @param skip_globals: if True, the global hypervisor parameters will
1252 @return: a copy of the instance's hvparams with missing keys filled from
1253 the cluster defaults
1256 return self.SimpleFillHV(instance.hypervisor, instance.os,
1257 instance.hvparams, skip_globals)
1259 def SimpleFillBE(self, beparams):
1260 """Fill a given beparams dict with cluster defaults.
1262 @type beparams: dict
1263 @param beparams: the dict to fill
1265 @return: a copy of the passed in beparams with missing keys filled
1266 from the cluster defaults
1269 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1271 def FillBE(self, instance):
1272 """Fill an instance's beparams dict with cluster defaults.
1274 @type instance: L{objects.Instance}
1275 @param instance: the instance parameter to fill
1277 @return: a copy of the instance's beparams with missing keys filled from
1278 the cluster defaults
1281 return self.SimpleFillBE(instance.beparams)
1283 def SimpleFillNIC(self, nicparams):
1284 """Fill a given nicparams dict with cluster defaults.
1286 @type nicparams: dict
1287 @param nicparams: the dict to fill
1289 @return: a copy of the passed in nicparams with missing keys filled
1290 from the cluster defaults
1293 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1295 def SimpleFillOS(self, os_name, os_params):
1296 """Fill an instance's osparams dict with cluster defaults.
1298 @type os_name: string
1299 @param os_name: the OS name to use
1300 @type os_params: dict
1301 @param os_params: the dict to fill with default values
1303 @return: a copy of the instance's osparams with missing keys filled from
1304 the cluster defaults
1307 name_only = os_name.split("+", 1)[0]
1309 result = self.osparams.get(name_only, {})
1311 result = FillDict(result, self.osparams.get(os_name, {}))
1313 return FillDict(result, os_params)
1315 def FillND(self, node, nodegroup):
1316 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1318 @type node: L{objects.Node}
1319 @param node: A Node object to fill
1320 @type nodegroup: L{objects.NodeGroup}
1321 @param nodegroup: A Node object to fill
1322 @return a copy of the node's ndparams with defaults filled
1325 return self.SimpleFillND(nodegroup.FillND(node))
1327 def SimpleFillND(self, ndparams):
1328 """Fill a given ndparams dict with defaults.
1330 @type ndparams: dict
1331 @param ndparams: the dict to fill
1333 @return: a copy of the passed in ndparams with missing keys filled
1334 from the cluster defaults
1337 return FillDict(self.ndparams, ndparams)
1340 class BlockDevStatus(ConfigObject):
1341 """Config object representing the status of a block device."""
1353 class ImportExportStatus(ConfigObject):
1354 """Config object representing the status of an import or export."""
1360 "progress_throughput",
1368 class ImportExportOptions(ConfigObject):
1369 """Options for import/export daemon
1371 @ivar key_name: X509 key name (None for cluster certificate)
1372 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1373 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1374 @ivar magic: Used to ensure the connection goes to the right disk
1375 @ivar ipv6: Whether to use IPv6
1376 @ivar connect_timeout: Number of seconds for establishing connection
1389 class ConfdRequest(ConfigObject):
1390 """Object holding a confd request.
1392 @ivar protocol: confd protocol version
1393 @ivar type: confd query type
1394 @ivar query: query request
1395 @ivar rsalt: requested reply salt
1406 class ConfdReply(ConfigObject):
1407 """Object holding a confd reply.
1409 @ivar protocol: confd protocol version
1410 @ivar status: reply status code (ok, error)
1411 @ivar answer: confd query reply
1412 @ivar serial: configuration serial number
1423 class QueryFieldDefinition(ConfigObject):
1424 """Object holding a query field definition.
1426 @ivar name: Field name
1427 @ivar title: Human-readable title
1428 @ivar kind: Field type
1429 @ivar doc: Human-readable description
1440 class _QueryResponseBase(ConfigObject):
1446 """Custom function for serializing.
1449 mydict = super(_QueryResponseBase, self).ToDict()
1450 mydict["fields"] = self._ContainerToDicts(mydict["fields"])
1454 def FromDict(cls, val):
1455 """Custom function for de-serializing.
1458 obj = super(_QueryResponseBase, cls).FromDict(val)
1459 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
1463 class QueryRequest(ConfigObject):
1464 """Object holding a query request.
1474 class QueryResponse(_QueryResponseBase):
1475 """Object holding the response to a query.
1477 @ivar fields: List of L{QueryFieldDefinition} objects
1478 @ivar data: Requested data
1486 class QueryFieldsRequest(ConfigObject):
1487 """Object holding a request for querying available fields.
1496 class QueryFieldsResponse(_QueryResponseBase):
1497 """Object holding the response to a query for fields.
1499 @ivar fields: List of L{QueryFieldDefinition} objects
1506 class InstanceConsole(ConfigObject):
1507 """Object describing how to access the console of an instance.
1522 """Validates contents of this object.
1525 assert self.kind in constants.CONS_ALL, "Unknown console type"
1526 assert self.instance, "Missing instance name"
1527 assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC]
1528 assert self.host or self.kind == constants.CONS_MESSAGE
1529 assert self.port or self.kind in [constants.CONS_MESSAGE,
1531 assert self.user or self.kind in [constants.CONS_MESSAGE,
1533 assert self.command or self.kind in [constants.CONS_MESSAGE,
1535 assert self.display or self.kind in [constants.CONS_MESSAGE,
1540 class SerializableConfigParser(ConfigParser.SafeConfigParser):
1541 """Simple wrapper over ConfigParse that allows serialization.
1543 This class is basically ConfigParser.SafeConfigParser with two
1544 additional methods that allow it to serialize/unserialize to/from a
1549 """Dump this instance and return the string representation."""
1552 return buf.getvalue()
1555 def Loads(cls, data):
1556 """Load data from a string."""
1557 buf = StringIO(data)