4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Transportable objects for Ganeti.
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
29 # pylint: disable=E0203,W0201,R0902
31 # E0203: Access to member %r before its definition, since we use
32 # objects.py which doesn't explicitly initialise its members
34 # W0201: Attribute '%s' defined outside __init__
36 # R0902: Allow instances of these objects to have more than 20 attributes
43 from cStringIO import StringIO
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import netutils
48 from ganeti import outils
49 from ganeti import utils
51 from socket import AF_INET
54 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
57 _TIMESTAMPS = ["ctime", "mtime"]
61 def FillDict(defaults_dict, custom_dict, skip_keys=None):
62 """Basic function to apply settings on top a default dict.
64 @type defaults_dict: dict
65 @param defaults_dict: dictionary holding the default values
66 @type custom_dict: dict
67 @param custom_dict: dictionary holding customized value
69 @param skip_keys: which keys not to fill
71 @return: dict with the 'full' values
74 ret_dict = copy.deepcopy(defaults_dict)
75 ret_dict.update(custom_dict)
85 def FillIPolicy(default_ipolicy, custom_ipolicy):
86 """Fills an instance policy with defaults.
89 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90 ret_dict = copy.deepcopy(custom_ipolicy)
91 for key in default_ipolicy:
92 if key not in ret_dict:
93 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94 elif key == constants.ISPECS_STD:
95 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
99 def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100 """Fills the disk parameter defaults.
102 @see: L{FillDict} for parameters and return value
105 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
107 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108 skip_keys=skip_keys))
109 for dt in constants.DISK_TEMPLATES)
112 def UpgradeGroupedParams(target, defaults):
113 """Update all groups for the target parameter.
115 @type target: dict of dicts
116 @param target: {group: {parameter: value}}
118 @param defaults: default parameter values
122 target = {constants.PP_DEFAULT: defaults}
125 target[group] = FillDict(defaults, target[group])
129 def UpgradeBeParams(target):
130 """Update the be parameters dict to the new format.
133 @param target: "be" parameters dict
136 if constants.BE_MEMORY in target:
137 memory = target[constants.BE_MEMORY]
138 target[constants.BE_MAXMEM] = memory
139 target[constants.BE_MINMEM] = memory
140 del target[constants.BE_MEMORY]
143 def UpgradeDiskParams(diskparams):
144 """Upgrade the disk parameters.
146 @type diskparams: dict
147 @param diskparams: disk parameters to upgrade
149 @return: the upgraded disk parameters dict
155 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
160 def UpgradeNDParams(ndparams):
161 """Upgrade ndparams structure.
164 @param ndparams: disk parameters to upgrade
166 @return: the upgraded node parameters dict
172 if (constants.ND_OOB_PROGRAM in ndparams and
173 ndparams[constants.ND_OOB_PROGRAM] is None):
174 # will be reset by the line below
175 del ndparams[constants.ND_OOB_PROGRAM]
176 return FillDict(constants.NDC_DEFAULTS, ndparams)
179 def MakeEmptyIPolicy():
180 """Create empty IPolicy dictionary.
186 class ConfigObject(outils.ValidatedSlots):
187 """A generic config object.
189 It has the following properties:
191 - provides somewhat safe recursive unpickling and pickling for its classes
192 - unset attributes which are defined in slots are always returned
193 as None instead of raising an error
195 Classes derived from this must always declare __slots__ (we use many
196 config objects and the memory reduction is useful)
201 def __getattr__(self, name):
202 if name not in self.GetAllSlots():
203 raise AttributeError("Invalid object attribute %s.%s" %
204 (type(self).__name__, name))
207 def __setstate__(self, state):
208 slots = self.GetAllSlots()
211 setattr(self, name, state[name])
214 """Validates the slots.
219 """Convert to a dict holding only standard python types.
221 The generic routine just dumps all of this object's attributes in
222 a dict. It does not work if the class has children who are
223 ConfigObjects themselves (e.g. the nics list in an Instance), in
224 which case the object should subclass the function in order to
225 make sure all objects returned are only standard python types.
229 for name in self.GetAllSlots():
230 value = getattr(self, name, None)
231 if value is not None:
235 __getstate__ = ToDict
238 def FromDict(cls, val):
239 """Create an object from a dictionary.
241 This generic routine takes a dict, instantiates a new instance of
242 the given class, and sets attributes based on the dict content.
244 As for `ToDict`, this does not work if the class has children
245 who are ConfigObjects themselves (e.g. the nics list in an
246 Instance), in which case the object should subclass the function
247 and alter the objects.
250 if not isinstance(val, dict):
251 raise errors.ConfigurationError("Invalid object passed to FromDict:"
252 " expected dict, got %s" % type(val))
253 val_str = dict([(str(k), v) for k, v in val.iteritems()])
254 obj = cls(**val_str) # pylint: disable=W0142
258 """Makes a deep copy of the current object and its children.
261 dict_form = self.ToDict()
262 clone_obj = self.__class__.FromDict(dict_form)
266 """Implement __repr__ for ConfigObjects."""
267 return repr(self.ToDict())
269 def __eq__(self, other):
270 """Implement __eq__ for ConfigObjects."""
271 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
273 def UpgradeConfig(self):
274 """Fill defaults for missing configuration values.
276 This method will be called at configuration load time, and its
277 implementation will be object dependent.
283 class TaggableObject(ConfigObject):
284 """An generic class supporting tags.
288 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
291 def ValidateTag(cls, tag):
292 """Check if a tag is valid.
294 If the tag is invalid, an errors.TagError will be raised. The
295 function has no return value.
298 if not isinstance(tag, basestring):
299 raise errors.TagError("Invalid tag type (not a string)")
300 if len(tag) > constants.MAX_TAG_LEN:
301 raise errors.TagError("Tag too long (>%d characters)" %
302 constants.MAX_TAG_LEN)
304 raise errors.TagError("Tags cannot be empty")
305 if not cls.VALID_TAG_RE.match(tag):
306 raise errors.TagError("Tag contains invalid characters")
309 """Return the tags list.
312 tags = getattr(self, "tags", None)
314 tags = self.tags = set()
317 def AddTag(self, tag):
321 self.ValidateTag(tag)
322 tags = self.GetTags()
323 if len(tags) >= constants.MAX_TAGS_PER_OBJ:
324 raise errors.TagError("Too many tags")
325 self.GetTags().add(tag)
327 def RemoveTag(self, tag):
331 self.ValidateTag(tag)
332 tags = self.GetTags()
336 raise errors.TagError("Tag not found")
339 """Taggable-object-specific conversion to standard python types.
341 This replaces the tags set with a list.
344 bo = super(TaggableObject, self).ToDict()
346 tags = bo.get("tags", None)
347 if isinstance(tags, set):
348 bo["tags"] = list(tags)
352 def FromDict(cls, val):
353 """Custom function for instances.
356 obj = super(TaggableObject, cls).FromDict(val)
357 if hasattr(obj, "tags") and isinstance(obj.tags, list):
358 obj.tags = set(obj.tags)
362 class MasterNetworkParameters(ConfigObject):
363 """Network configuration parameters for the master
365 @ivar uuid: master nodes UUID
367 @ivar netmask: master netmask
368 @ivar netdev: master network device
369 @ivar ip_family: master IP family
381 class ConfigData(ConfigObject):
382 """Top-level config object."""
394 """Custom function for top-level config data.
396 This just replaces the list of instances, nodes and the cluster
397 with standard python types.
400 mydict = super(ConfigData, self).ToDict()
401 mydict["cluster"] = mydict["cluster"].ToDict()
402 for key in "nodes", "instances", "nodegroups", "networks":
403 mydict[key] = outils.ContainerToDicts(mydict[key])
408 def FromDict(cls, val):
409 """Custom function for top-level config data
412 obj = super(ConfigData, cls).FromDict(val)
413 obj.cluster = Cluster.FromDict(obj.cluster)
414 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
416 outils.ContainerFromDicts(obj.instances, dict, Instance)
418 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
419 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
422 def HasAnyDiskOfType(self, dev_type):
423 """Check if in there is at disk of the given type in the configuration.
425 @type dev_type: L{constants.LDS_BLOCK}
426 @param dev_type: the type to look for
428 @return: boolean indicating if a disk of the given type was found or not
431 for instance in self.instances.values():
432 for disk in instance.disks:
433 if disk.IsBasedOnDiskType(dev_type):
437 def UpgradeConfig(self):
438 """Fill defaults for missing configuration values.
441 self.cluster.UpgradeConfig()
442 for node in self.nodes.values():
444 for instance in self.instances.values():
445 instance.UpgradeConfig()
446 if self.nodegroups is None:
448 for nodegroup in self.nodegroups.values():
449 nodegroup.UpgradeConfig()
450 if self.cluster.drbd_usermode_helper is None:
451 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
452 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
453 if self.networks is None:
455 for network in self.networks.values():
456 network.UpgradeConfig()
457 self._UpgradeEnabledDiskTemplates()
459 def _UpgradeEnabledDiskTemplates(self):
460 """Upgrade the cluster's enabled disk templates by inspecting the currently
461 enabled and/or used disk templates.
464 # enabled_disk_templates in the cluster config were introduced in 2.8.
465 # Remove this code once upgrading from earlier versions is deprecated.
466 if not self.cluster.enabled_disk_templates:
468 set([inst.disk_template for inst in self.instances.values()])
469 # Add drbd and plain, if lvm is enabled (by specifying a volume group)
470 if self.cluster.volume_group_name:
471 template_set.add(constants.DT_DRBD8)
472 template_set.add(constants.DT_PLAIN)
473 # Set enabled_disk_templates to the inferred disk templates. Order them
474 # according to a preference list that is based on Ganeti's history of
475 # supported disk templates.
476 self.cluster.enabled_disk_templates = []
477 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
478 if preferred_template in template_set:
479 self.cluster.enabled_disk_templates.append(preferred_template)
480 template_set.remove(preferred_template)
481 self.cluster.enabled_disk_templates.extend(list(template_set))
484 class NIC(ConfigObject):
485 """Config object representing a network card."""
486 __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
489 def CheckParameterSyntax(cls, nicparams):
490 """Check the given parameters for validity.
492 @type nicparams: dict
493 @param nicparams: dictionary with parameter names/value
494 @raise errors.ConfigurationError: when a parameter is not valid
497 mode = nicparams[constants.NIC_MODE]
498 if (mode not in constants.NIC_VALID_MODES and
499 mode != constants.VALUE_AUTO):
500 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
502 if (mode == constants.NIC_MODE_BRIDGED and
503 not nicparams[constants.NIC_LINK]):
504 raise errors.ConfigurationError("Missing bridged NIC link")
507 class Disk(ConfigObject):
508 """Config object representing a block device."""
509 __slots__ = (["name", "dev_type", "logical_id", "physical_id",
510 "children", "iv_name", "size", "mode", "params", "spindles"] +
513 def CreateOnSecondary(self):
514 """Test if this device needs to be created on a secondary node."""
515 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
517 def AssembleOnSecondary(self):
518 """Test if this device needs to be assembled on a secondary node."""
519 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
521 def OpenOnSecondary(self):
522 """Test if this device needs to be opened on a secondary node."""
523 return self.dev_type in (constants.LD_LV,)
525 def StaticDevPath(self):
526 """Return the device path if this device type has a static one.
528 Some devices (LVM for example) live always at the same /dev/ path,
529 irrespective of their status. For such devices, we return this
530 path, for others we return None.
532 @warning: The path returned is not a normalized pathname; callers
533 should check that it is a valid path.
536 if self.dev_type == constants.LD_LV:
537 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
538 elif self.dev_type == constants.LD_BLOCKDEV:
539 return self.logical_id[1]
540 elif self.dev_type == constants.LD_RBD:
541 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
544 def ChildrenNeeded(self):
545 """Compute the needed number of children for activation.
547 This method will return either -1 (all children) or a positive
548 number denoting the minimum number of children needed for
549 activation (only mirrored devices will usually return >=0).
551 Currently, only DRBD8 supports diskless activation (therefore we
552 return 0), for all other we keep the previous semantics and return
556 if self.dev_type == constants.LD_DRBD8:
560 def IsBasedOnDiskType(self, dev_type):
561 """Check if the disk or its children are based on the given type.
563 @type dev_type: L{constants.LDS_BLOCK}
564 @param dev_type: the type to look for
566 @return: boolean indicating if a device of the given type was found or not
570 for child in self.children:
571 if child.IsBasedOnDiskType(dev_type):
573 return self.dev_type == dev_type
575 def GetNodes(self, node_uuid):
576 """This function returns the nodes this device lives on.
578 Given the node on which the parent of the device lives on (or, in
579 case of a top-level device, the primary node of the devices'
580 instance), this function will return a list of nodes on which this
581 devices needs to (or can) be assembled.
584 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
585 constants.LD_BLOCKDEV, constants.LD_RBD,
588 elif self.dev_type in constants.LDS_DRBD:
589 result = [self.logical_id[0], self.logical_id[1]]
590 if node_uuid not in result:
591 raise errors.ConfigurationError("DRBD device passed unknown node")
593 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
596 def ComputeNodeTree(self, parent_node_uuid):
597 """Compute the node/disk tree for this disk and its children.
599 This method, given the node on which the parent disk lives, will
600 return the list of all (node UUID, disk) pairs which describe the disk
601 tree in the most compact way. For example, a drbd/lvm stack
602 will be returned as (primary_node, drbd) and (secondary_node, drbd)
603 which represents all the top-level devices on the nodes.
606 my_nodes = self.GetNodes(parent_node_uuid)
607 result = [(node, self) for node in my_nodes]
608 if not self.children:
611 for node in my_nodes:
612 for child in self.children:
613 child_result = child.ComputeNodeTree(node)
614 if len(child_result) == 1:
615 # child (and all its descendants) is simple, doesn't split
616 # over multiple hosts, so we don't need to describe it, our
617 # own entry for this node describes it completely
620 # check if child nodes differ from my nodes; note that
621 # subdisk can differ from the child itself, and be instead
622 # one of its descendants
623 for subnode, subdisk in child_result:
624 if subnode not in my_nodes:
625 result.append((subnode, subdisk))
626 # otherwise child is under our own node, so we ignore this
627 # entry (but probably the other results in the list will
631 def ComputeGrowth(self, amount):
632 """Compute the per-VG growth requirements.
634 This only works for VG-based disks.
636 @type amount: integer
637 @param amount: the desired increase in (user-visible) disk space
639 @return: a dictionary of volume-groups and the required size
642 if self.dev_type == constants.LD_LV:
643 return {self.logical_id[0]: amount}
644 elif self.dev_type == constants.LD_DRBD8:
646 return self.children[0].ComputeGrowth(amount)
650 # Other disk types do not require VG space
653 def RecordGrow(self, amount):
654 """Update the size of this disk after growth.
656 This method recurses over the disks's children and updates their
657 size correspondigly. The method needs to be kept in sync with the
658 actual algorithms from bdev.
661 if self.dev_type in (constants.LD_LV, constants.LD_FILE,
662 constants.LD_RBD, constants.LD_EXT):
664 elif self.dev_type == constants.LD_DRBD8:
666 self.children[0].RecordGrow(amount)
669 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
670 " disk type %s" % self.dev_type)
672 def Update(self, size=None, mode=None, spindles=None):
673 """Apply changes to size, spindles and mode.
676 if self.dev_type == constants.LD_DRBD8:
678 self.children[0].Update(size=size, mode=mode)
680 assert not self.children
686 if spindles is not None:
687 self.spindles = spindles
690 """Sets recursively the size to zero for the disk and its children.
694 for child in self.children:
698 def SetPhysicalID(self, target_node_uuid, nodes_ip):
699 """Convert the logical ID to the physical ID.
701 This is used only for drbd, which needs ip/port configuration.
703 The routine descends down and updates its children also, because
704 this helps when the only the top device is passed to the remote
708 - target_node_uuid: the node UUID we wish to configure for
709 - nodes_ip: a mapping of node name to ip
711 The target_node must exist in in nodes_ip, and must be one of the
712 nodes in the logical ID for each of the DRBD devices encountered
717 for child in self.children:
718 child.SetPhysicalID(target_node_uuid, nodes_ip)
720 if self.logical_id is None and self.physical_id is not None:
722 if self.dev_type in constants.LDS_DRBD:
723 pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
724 if target_node_uuid not in (pnode_uuid, snode_uuid):
725 raise errors.ConfigurationError("DRBD device not knowing node %s" %
727 pnode_ip = nodes_ip.get(pnode_uuid, None)
728 snode_ip = nodes_ip.get(snode_uuid, None)
729 if pnode_ip is None or snode_ip is None:
730 raise errors.ConfigurationError("Can't find primary or secondary node"
731 " for %s" % str(self))
732 p_data = (pnode_ip, port)
733 s_data = (snode_ip, port)
734 if pnode_uuid == target_node_uuid:
735 self.physical_id = p_data + s_data + (pminor, secret)
736 else: # it must be secondary, we tested above
737 self.physical_id = s_data + p_data + (sminor, secret)
739 self.physical_id = self.logical_id
743 """Disk-specific conversion to standard python types.
745 This replaces the children lists of objects with lists of
746 standard python types.
749 bo = super(Disk, self).ToDict()
751 for attr in ("children",):
752 alist = bo.get(attr, None)
754 bo[attr] = outils.ContainerToDicts(alist)
758 def FromDict(cls, val):
759 """Custom function for Disks
762 obj = super(Disk, cls).FromDict(val)
764 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
765 if obj.logical_id and isinstance(obj.logical_id, list):
766 obj.logical_id = tuple(obj.logical_id)
767 if obj.physical_id and isinstance(obj.physical_id, list):
768 obj.physical_id = tuple(obj.physical_id)
769 if obj.dev_type in constants.LDS_DRBD:
770 # we need a tuple of length six here
771 if len(obj.logical_id) < 6:
772 obj.logical_id += (None,) * (6 - len(obj.logical_id))
776 """Custom str() formatter for disks.
779 if self.dev_type == constants.LD_LV:
780 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
781 elif self.dev_type in constants.LDS_DRBD:
782 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
784 if self.physical_id is None:
787 phy = ("configured as %s:%s %s:%s" %
788 (self.physical_id[0], self.physical_id[1],
789 self.physical_id[2], self.physical_id[3]))
791 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
792 (node_a, minor_a, node_b, minor_b, port, phy))
793 if self.children and self.children.count(None) == 0:
794 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
796 val += "no local storage"
798 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
799 (self.dev_type, self.logical_id, self.physical_id, self.children))
800 if self.iv_name is None:
801 val += ", not visible"
803 val += ", visible as /dev/%s" % self.iv_name
804 if self.spindles is not None:
805 val += ", spindles=%s" % self.spindles
806 if isinstance(self.size, int):
807 val += ", size=%dm)>" % self.size
809 val += ", size='%s')>" % (self.size,)
813 """Checks that this disk is correctly configured.
817 if self.mode not in constants.DISK_ACCESS_SET:
818 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
821 def UpgradeConfig(self):
822 """Fill defaults for missing configuration values.
826 for child in self.children:
827 child.UpgradeConfig()
829 # FIXME: Make this configurable in Ganeti 2.7
831 # add here config upgrade for this disk
834 def ComputeLDParams(disk_template, disk_params):
835 """Computes Logical Disk parameters from Disk Template parameters.
837 @type disk_template: string
838 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
839 @type disk_params: dict
840 @param disk_params: disk template parameters;
841 dict(template_name -> parameters
843 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
844 contains the LD parameters of the node. The tree is flattened in-order.
847 if disk_template not in constants.DISK_TEMPLATES:
848 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
850 assert disk_template in disk_params
853 dt_params = disk_params[disk_template]
854 if disk_template == constants.DT_DRBD8:
855 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
856 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
857 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
858 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
859 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
860 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
861 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
862 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
863 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
864 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
865 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
866 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
867 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
868 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
872 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
873 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
877 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
878 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
881 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
882 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
884 elif disk_template == constants.DT_PLAIN:
885 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
886 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
889 elif disk_template == constants.DT_BLOCK:
890 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
892 elif disk_template == constants.DT_RBD:
893 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
894 constants.LDP_POOL: dt_params[constants.RBD_POOL],
897 elif disk_template == constants.DT_EXT:
898 result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
903 class InstancePolicy(ConfigObject):
904 """Config object representing instance policy limits dictionary.
906 Note that this object is not actually used in the config, it's just
907 used as a placeholder for a few functions.
911 def CheckParameterSyntax(cls, ipolicy, check_std):
912 """ Check the instance policy for validity.
915 @param ipolicy: dictionary with min/max/std specs and policies
916 @type check_std: bool
917 @param check_std: Whether to check std value or just assume compliance
918 @raise errors.ConfigurationError: when the policy is not legal
921 InstancePolicy.CheckISpecSyntax(ipolicy, check_std)
922 if constants.IPOLICY_DTS in ipolicy:
923 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
924 for key in constants.IPOLICY_PARAMETERS:
926 InstancePolicy.CheckParameter(key, ipolicy[key])
927 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
929 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
930 utils.CommaJoin(wrong_keys))
933 def _CheckIncompleteSpec(cls, spec, keyname):
934 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys())
936 msg = ("Missing instance specs parameters for %s: %s" %
937 (keyname, utils.CommaJoin(missing_params)))
938 raise errors.ConfigurationError(msg)
941 def CheckISpecSyntax(cls, ipolicy, check_std):
942 """Check the instance policy specs for validity.
945 @param ipolicy: dictionary with min/max/std specs
946 @type check_std: bool
947 @param check_std: Whether to check std value or just assume compliance
948 @raise errors.ConfigurationError: when specs are not valid
951 if constants.ISPECS_MINMAX not in ipolicy:
955 if check_std and constants.ISPECS_STD not in ipolicy:
956 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD
957 raise errors.ConfigurationError(msg)
958 stdspec = ipolicy.get(constants.ISPECS_STD)
960 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD)
962 if not ipolicy[constants.ISPECS_MINMAX]:
963 raise errors.ConfigurationError("Empty minmax specifications")
965 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]:
966 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys())
968 msg = "Missing instance specification: %s" % utils.CommaJoin(missing)
969 raise errors.ConfigurationError(msg)
970 for (key, spec) in minmaxspecs.items():
971 InstancePolicy._CheckIncompleteSpec(spec, key)
974 for param in constants.ISPECS_PARAMETERS:
975 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec,
977 spec_std_ok = spec_std_ok and par_std_ok
978 std_is_good = std_is_good or spec_std_ok
980 raise errors.ConfigurationError("Invalid std specifications")
983 def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
984 """Check the instance policy specs for validity on a given key.
986 We check if the instance specs makes sense for a given key, that is
987 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
989 @type minmaxspecs: dict
990 @param minmaxspecs: dictionary with min and max instance spec
992 @param stdspec: dictionary with standard instance spec
994 @param name: what are the limits for
995 @type check_std: bool
996 @param check_std: Whether to check std value or just assume compliance
998 @return: C{True} when specs are valid, C{False} when standard spec for the
999 given name is not valid
1000 @raise errors.ConfigurationError: when min/max specs for the given name
1004 minspec = minmaxspecs[constants.ISPECS_MIN]
1005 maxspec = minmaxspecs[constants.ISPECS_MAX]
1006 min_v = minspec[name]
1007 max_v = maxspec[name]
1010 err = ("Invalid specification of min/max values for %s: %s/%s" %
1011 (name, min_v, max_v))
1012 raise errors.ConfigurationError(err)
1014 std_v = stdspec.get(name, min_v)
1015 return std_v >= min_v and std_v <= max_v
1020 def CheckDiskTemplates(cls, disk_templates):
1021 """Checks the disk templates for validity.
1024 if not disk_templates:
1025 raise errors.ConfigurationError("Instance policy must contain" +
1026 " at least one disk template")
1027 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1029 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1030 utils.CommaJoin(wrong))
1033 def CheckParameter(cls, key, value):
1034 """Checks a parameter.
1036 Currently we expect all parameters to be float values.
1041 except (TypeError, ValueError), err:
1042 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1043 " '%s', error: %s" % (key, value, err))
1046 class Instance(TaggableObject):
1047 """Config object representing an instance."""
1063 ] + _TIMESTAMPS + _UUID
1065 def _ComputeSecondaryNodes(self):
1066 """Compute the list of secondary nodes.
1068 This is a simple wrapper over _ComputeAllNodes.
1071 all_nodes = set(self._ComputeAllNodes())
1072 all_nodes.discard(self.primary_node)
1073 return tuple(all_nodes)
1075 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1076 "List of names of secondary nodes")
1078 def _ComputeAllNodes(self):
1079 """Compute the list of all nodes.
1081 Since the data is already there (in the drbd disks), keeping it as
1082 a separate normal attribute is redundant and if not properly
1083 synchronised can cause problems. Thus it's better to compute it
1087 def _Helper(nodes, device):
1088 """Recursively computes nodes given a top device."""
1089 if device.dev_type in constants.LDS_DRBD:
1090 nodea, nodeb = device.logical_id[:2]
1094 for child in device.children:
1095 _Helper(nodes, child)
1098 all_nodes.add(self.primary_node)
1099 for device in self.disks:
1100 _Helper(all_nodes, device)
1101 return tuple(all_nodes)
1103 all_nodes = property(_ComputeAllNodes, None, None,
1104 "List of names of all the nodes of the instance")
1106 def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1107 """Provide a mapping of nodes to LVs this instance owns.
1109 This function figures out what logical volumes should belong on
1110 which nodes, recursing through a device tree.
1113 @param lvmap: optional dictionary to receive the
1114 'node' : ['lv', ...] data.
1115 @type devs: list of L{Disk}
1116 @param devs: disks to get the LV name for. If None, all disk of this
1118 @type node_uuid: string
1119 @param node_uuid: UUID of the node to get the LV names for. If None, the
1120 primary node of this instance is used.
1121 @return: None if lvmap arg is given, otherwise, a dictionary of
1122 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1123 volumeN is of the form "vg_name/lv_name", compatible with
1127 if node_uuid is None:
1128 node_uuid = self.primary_node
1136 if not node_uuid in lvmap:
1137 lvmap[node_uuid] = []
1144 if dev.dev_type == constants.LD_LV:
1145 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1147 elif dev.dev_type in constants.LDS_DRBD:
1149 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1150 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1153 self.MapLVsByNode(lvmap, dev.children, node_uuid)
1157 def FindDisk(self, idx):
1158 """Find a disk given having a specified index.
1160 This is just a wrapper that does validation of the index.
1163 @param idx: the disk index
1165 @return: the corresponding disk
1166 @raise errors.OpPrereqError: when the given index is not valid
1171 return self.disks[idx]
1172 except (TypeError, ValueError), err:
1173 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1176 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1177 " 0 to %d" % (idx, len(self.disks) - 1),
1181 """Instance-specific conversion to standard python types.
1183 This replaces the children lists of objects with lists of standard
1187 bo = super(Instance, self).ToDict()
1189 for attr in "nics", "disks":
1190 alist = bo.get(attr, None)
1192 nlist = outils.ContainerToDicts(alist)
1199 def FromDict(cls, val):
1200 """Custom function for instances.
1203 if "admin_state" not in val:
1204 if val.get("admin_up", False):
1205 val["admin_state"] = constants.ADMINST_UP
1207 val["admin_state"] = constants.ADMINST_DOWN
1208 if "admin_up" in val:
1210 obj = super(Instance, cls).FromDict(val)
1211 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC)
1212 obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk)
1215 def UpgradeConfig(self):
1216 """Fill defaults for missing configuration values.
1219 for nic in self.nics:
1221 for disk in self.disks:
1222 disk.UpgradeConfig()
1224 for key in constants.HVC_GLOBALS:
1226 del self.hvparams[key]
1229 if self.osparams is None:
1231 UpgradeBeParams(self.beparams)
1232 if self.disks_active is None:
1233 self.disks_active = self.admin_state == constants.ADMINST_UP
1236 class OS(ConfigObject):
1237 """Config object representing an operating system.
1239 @type supported_parameters: list
1240 @ivar supported_parameters: a list of tuples, name and description,
1241 containing the supported parameters by this OS
1243 @type VARIANT_DELIM: string
1244 @cvar VARIANT_DELIM: the variant delimiter
1256 "supported_variants",
1257 "supported_parameters",
1263 def SplitNameVariant(cls, name):
1264 """Splits the name into the proper name and variant.
1266 @param name: the OS (unprocessed) name
1268 @return: a list of two elements; if the original name didn't
1269 contain a variant, it's returned as an empty string
1272 nv = name.split(cls.VARIANT_DELIM, 1)
1278 def GetName(cls, name):
1279 """Returns the proper name of the os (without the variant).
1281 @param name: the OS (unprocessed) name
1284 return cls.SplitNameVariant(name)[0]
1287 def GetVariant(cls, name):
1288 """Returns the variant the os (without the base name).
1290 @param name: the OS (unprocessed) name
1293 return cls.SplitNameVariant(name)[1]
1296 class ExtStorage(ConfigObject):
1297 """Config object representing an External Storage Provider.
1310 "supported_parameters",
1314 class NodeHvState(ConfigObject):
1315 """Hypvervisor state on a node.
1317 @ivar mem_total: Total amount of memory
1318 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1320 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1322 @ivar mem_inst: Memory used by instances living on node
1323 @ivar cpu_total: Total node CPU core count
1324 @ivar cpu_node: Number of CPU cores reserved for the node itself
1337 class NodeDiskState(ConfigObject):
1338 """Disk state on a node.
1348 class Node(TaggableObject):
1349 """Config object representing a node.
1351 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1352 @ivar hv_state_static: Hypervisor state overriden by user
1353 @ivar disk_state: Disk state (e.g. free space)
1354 @ivar disk_state_static: Disk state overriden by user
1373 "disk_state_static",
1374 ] + _TIMESTAMPS + _UUID
1376 def UpgradeConfig(self):
1377 """Fill defaults for missing configuration values.
1380 # pylint: disable=E0203
1381 # because these are "defined" via slots, not manually
1382 if self.master_capable is None:
1383 self.master_capable = True
1385 if self.vm_capable is None:
1386 self.vm_capable = True
1388 if self.ndparams is None:
1390 # And remove any global parameter
1391 for key in constants.NDC_GLOBALS:
1392 if key in self.ndparams:
1393 logging.warning("Ignoring %s node parameter for node %s",
1395 del self.ndparams[key]
1397 if self.powered is None:
1401 """Custom function for serializing.
1404 data = super(Node, self).ToDict()
1406 hv_state = data.get("hv_state", None)
1407 if hv_state is not None:
1408 data["hv_state"] = outils.ContainerToDicts(hv_state)
1410 disk_state = data.get("disk_state", None)
1411 if disk_state is not None:
1412 data["disk_state"] = \
1413 dict((key, outils.ContainerToDicts(value))
1414 for (key, value) in disk_state.items())
1419 def FromDict(cls, val):
1420 """Custom function for deserializing.
1423 obj = super(Node, cls).FromDict(val)
1425 if obj.hv_state is not None:
1427 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1429 if obj.disk_state is not None:
1431 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1432 for (key, value) in obj.disk_state.items())
1437 class NodeGroup(TaggableObject):
1438 """Config object representing a node group."""
1447 "disk_state_static",
1450 ] + _TIMESTAMPS + _UUID
1453 """Custom function for nodegroup.
1455 This discards the members object, which gets recalculated and is only kept
1459 mydict = super(NodeGroup, self).ToDict()
1460 del mydict["members"]
1464 def FromDict(cls, val):
1465 """Custom function for nodegroup.
1467 The members slot is initialized to an empty list, upon deserialization.
1470 obj = super(NodeGroup, cls).FromDict(val)
1474 def UpgradeConfig(self):
1475 """Fill defaults for missing configuration values.
1478 if self.ndparams is None:
1481 if self.serial_no is None:
1484 if self.alloc_policy is None:
1485 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1487 # We only update mtime, and not ctime, since we would not be able
1488 # to provide a correct value for creation time.
1489 if self.mtime is None:
1490 self.mtime = time.time()
1492 if self.diskparams is None:
1493 self.diskparams = {}
1494 if self.ipolicy is None:
1495 self.ipolicy = MakeEmptyIPolicy()
1497 if self.networks is None:
1500 def FillND(self, node):
1501 """Return filled out ndparams for L{objects.Node}
1503 @type node: L{objects.Node}
1504 @param node: A Node object to fill
1505 @return a copy of the node's ndparams with defaults filled
1508 return self.SimpleFillND(node.ndparams)
1510 def SimpleFillND(self, ndparams):
1511 """Fill a given ndparams dict with defaults.
1513 @type ndparams: dict
1514 @param ndparams: the dict to fill
1516 @return: a copy of the passed in ndparams with missing keys filled
1517 from the node group defaults
1520 return FillDict(self.ndparams, ndparams)
1523 class Cluster(TaggableObject):
1524 """Config object representing the cluster."""
1529 "highest_used_port",
1532 "volume_group_name",
1534 "drbd_usermode_helper",
1536 "default_hypervisor",
1541 "use_external_mip_script",
1544 "shared_file_storage_dir",
1545 "enabled_hypervisors",
1554 "candidate_pool_size",
1557 "maintain_node_health",
1559 "default_iallocator",
1562 "primary_ip_family",
1563 "prealloc_wipe_disks",
1565 "disk_state_static",
1566 "enabled_disk_templates",
1567 ] + _TIMESTAMPS + _UUID
1569 def UpgradeConfig(self):
1570 """Fill defaults for missing configuration values.
1573 # pylint: disable=E0203
1574 # because these are "defined" via slots, not manually
1575 if self.hvparams is None:
1576 self.hvparams = constants.HVC_DEFAULTS
1578 for hypervisor in self.hvparams:
1579 self.hvparams[hypervisor] = FillDict(
1580 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1582 if self.os_hvp is None:
1585 # osparams added before 2.2
1586 if self.osparams is None:
1589 self.ndparams = UpgradeNDParams(self.ndparams)
1591 self.beparams = UpgradeGroupedParams(self.beparams,
1592 constants.BEC_DEFAULTS)
1593 for beparams_group in self.beparams:
1594 UpgradeBeParams(self.beparams[beparams_group])
1596 migrate_default_bridge = not self.nicparams
1597 self.nicparams = UpgradeGroupedParams(self.nicparams,
1598 constants.NICC_DEFAULTS)
1599 if migrate_default_bridge:
1600 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1603 if self.modify_etc_hosts is None:
1604 self.modify_etc_hosts = True
1606 if self.modify_ssh_setup is None:
1607 self.modify_ssh_setup = True
1609 # default_bridge is no longer used in 2.1. The slot is left there to
1610 # support auto-upgrading. It can be removed once we decide to deprecate
1611 # upgrading straight from 2.0.
1612 if self.default_bridge is not None:
1613 self.default_bridge = None
1615 # default_hypervisor is just the first enabled one in 2.1. This slot and
1616 # code can be removed once upgrading straight from 2.0 is deprecated.
1617 if self.default_hypervisor is not None:
1618 self.enabled_hypervisors = ([self.default_hypervisor] +
1619 [hvname for hvname in self.enabled_hypervisors
1620 if hvname != self.default_hypervisor])
1621 self.default_hypervisor = None
1623 # maintain_node_health added after 2.1.1
1624 if self.maintain_node_health is None:
1625 self.maintain_node_health = False
1627 if self.uid_pool is None:
1630 if self.default_iallocator is None:
1631 self.default_iallocator = ""
1633 # reserved_lvs added before 2.2
1634 if self.reserved_lvs is None:
1635 self.reserved_lvs = []
1637 # hidden and blacklisted operating systems added before 2.2.1
1638 if self.hidden_os is None:
1641 if self.blacklisted_os is None:
1642 self.blacklisted_os = []
1644 # primary_ip_family added before 2.3
1645 if self.primary_ip_family is None:
1646 self.primary_ip_family = AF_INET
1648 if self.master_netmask is None:
1649 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1650 self.master_netmask = ipcls.iplen
1652 if self.prealloc_wipe_disks is None:
1653 self.prealloc_wipe_disks = False
1655 # shared_file_storage_dir added before 2.5
1656 if self.shared_file_storage_dir is None:
1657 self.shared_file_storage_dir = ""
1659 if self.use_external_mip_script is None:
1660 self.use_external_mip_script = False
1663 self.diskparams = UpgradeDiskParams(self.diskparams)
1665 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1667 # instance policy added before 2.6
1668 if self.ipolicy is None:
1669 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1671 # we can either make sure to upgrade the ipolicy always, or only
1672 # do it in some corner cases (e.g. missing keys); note that this
1673 # will break any removal of keys from the ipolicy dict
1674 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1676 # These keys would be silently removed by FillIPolicy()
1677 msg = ("Cluster instance policy contains spurious keys: %s" %
1678 utils.CommaJoin(wrongkeys))
1679 raise errors.ConfigurationError(msg)
1680 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1683 def primary_hypervisor(self):
1684 """The first hypervisor is the primary.
1686 Useful, for example, for L{Node}'s hv/disk state.
1689 return self.enabled_hypervisors[0]
1692 """Custom function for cluster.
1695 mydict = super(Cluster, self).ToDict()
1697 if self.tcpudp_port_pool is None:
1698 tcpudp_port_pool = []
1700 tcpudp_port_pool = list(self.tcpudp_port_pool)
1702 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1707 def FromDict(cls, val):
1708 """Custom function for cluster.
1711 obj = super(Cluster, cls).FromDict(val)
1713 if obj.tcpudp_port_pool is None:
1714 obj.tcpudp_port_pool = set()
1715 elif not isinstance(obj.tcpudp_port_pool, set):
1716 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1720 def SimpleFillDP(self, diskparams):
1721 """Fill a given diskparams dict with cluster defaults.
1723 @param diskparams: The diskparams
1724 @return: The defaults dict
1727 return FillDiskParams(self.diskparams, diskparams)
1729 def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1730 """Get the default hypervisor parameters for the cluster.
1732 @param hypervisor: the hypervisor name
1733 @param os_name: if specified, we'll also update the defaults for this OS
1734 @param skip_keys: if passed, list of keys not to use
1735 @return: the defaults dict
1738 if skip_keys is None:
1741 fill_stack = [self.hvparams.get(hypervisor, {})]
1742 if os_name is not None:
1743 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1744 fill_stack.append(os_hvp)
1747 for o_dict in fill_stack:
1748 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1752 def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1753 """Fill a given hvparams dict with cluster defaults.
1755 @type hv_name: string
1756 @param hv_name: the hypervisor to use
1757 @type os_name: string
1758 @param os_name: the OS to use for overriding the hypervisor defaults
1759 @type skip_globals: boolean
1760 @param skip_globals: if True, the global hypervisor parameters will
1763 @return: a copy of the given hvparams with missing keys filled from
1764 the cluster defaults
1768 skip_keys = constants.HVC_GLOBALS
1772 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1773 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1775 def FillHV(self, instance, skip_globals=False):
1776 """Fill an instance's hvparams dict with cluster defaults.
1778 @type instance: L{objects.Instance}
1779 @param instance: the instance parameter to fill
1780 @type skip_globals: boolean
1781 @param skip_globals: if True, the global hypervisor parameters will
1784 @return: a copy of the instance's hvparams with missing keys filled from
1785 the cluster defaults
1788 return self.SimpleFillHV(instance.hypervisor, instance.os,
1789 instance.hvparams, skip_globals)
1791 def SimpleFillBE(self, beparams):
1792 """Fill a given beparams dict with cluster defaults.
1794 @type beparams: dict
1795 @param beparams: the dict to fill
1797 @return: a copy of the passed in beparams with missing keys filled
1798 from the cluster defaults
1801 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1803 def FillBE(self, instance):
1804 """Fill an instance's beparams dict with cluster defaults.
1806 @type instance: L{objects.Instance}
1807 @param instance: the instance parameter to fill
1809 @return: a copy of the instance's beparams with missing keys filled from
1810 the cluster defaults
1813 return self.SimpleFillBE(instance.beparams)
1815 def SimpleFillNIC(self, nicparams):
1816 """Fill a given nicparams dict with cluster defaults.
1818 @type nicparams: dict
1819 @param nicparams: the dict to fill
1821 @return: a copy of the passed in nicparams with missing keys filled
1822 from the cluster defaults
1825 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1827 def SimpleFillOS(self, os_name, os_params):
1828 """Fill an instance's osparams dict with cluster defaults.
1830 @type os_name: string
1831 @param os_name: the OS name to use
1832 @type os_params: dict
1833 @param os_params: the dict to fill with default values
1835 @return: a copy of the instance's osparams with missing keys filled from
1836 the cluster defaults
1839 name_only = os_name.split("+", 1)[0]
1841 result = self.osparams.get(name_only, {})
1843 result = FillDict(result, self.osparams.get(os_name, {}))
1845 return FillDict(result, os_params)
1848 def SimpleFillHvState(hv_state):
1849 """Fill an hv_state sub dict with cluster defaults.
1852 return FillDict(constants.HVST_DEFAULTS, hv_state)
1855 def SimpleFillDiskState(disk_state):
1856 """Fill an disk_state sub dict with cluster defaults.
1859 return FillDict(constants.DS_DEFAULTS, disk_state)
1861 def FillND(self, node, nodegroup):
1862 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1864 @type node: L{objects.Node}
1865 @param node: A Node object to fill
1866 @type nodegroup: L{objects.NodeGroup}
1867 @param nodegroup: A Node object to fill
1868 @return a copy of the node's ndparams with defaults filled
1871 return self.SimpleFillND(nodegroup.FillND(node))
1873 def SimpleFillND(self, ndparams):
1874 """Fill a given ndparams dict with defaults.
1876 @type ndparams: dict
1877 @param ndparams: the dict to fill
1879 @return: a copy of the passed in ndparams with missing keys filled
1880 from the cluster defaults
1883 return FillDict(self.ndparams, ndparams)
1885 def SimpleFillIPolicy(self, ipolicy):
1886 """ Fill instance policy dict with defaults.
1889 @param ipolicy: the dict to fill
1891 @return: a copy of passed ipolicy with missing keys filled from
1892 the cluster defaults
1895 return FillIPolicy(self.ipolicy, ipolicy)
1897 def IsDiskTemplateEnabled(self, disk_template):
1898 """Checks if a particular disk template is enabled.
1901 return utils.storage.IsDiskTemplateEnabled(
1902 disk_template, self.enabled_disk_templates)
1904 def IsFileStorageEnabled(self):
1905 """Checks if file storage is enabled.
1908 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1910 def IsSharedFileStorageEnabled(self):
1911 """Checks if shared file storage is enabled.
1914 return utils.storage.IsSharedFileStorageEnabled(
1915 self.enabled_disk_templates)
1918 class BlockDevStatus(ConfigObject):
1919 """Config object representing the status of a block device."""
1931 class ImportExportStatus(ConfigObject):
1932 """Config object representing the status of an import or export."""
1938 "progress_throughput",
1946 class ImportExportOptions(ConfigObject):
1947 """Options for import/export daemon
1949 @ivar key_name: X509 key name (None for cluster certificate)
1950 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1951 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1952 @ivar magic: Used to ensure the connection goes to the right disk
1953 @ivar ipv6: Whether to use IPv6
1954 @ivar connect_timeout: Number of seconds for establishing connection
1967 class ConfdRequest(ConfigObject):
1968 """Object holding a confd request.
1970 @ivar protocol: confd protocol version
1971 @ivar type: confd query type
1972 @ivar query: query request
1973 @ivar rsalt: requested reply salt
1984 class ConfdReply(ConfigObject):
1985 """Object holding a confd reply.
1987 @ivar protocol: confd protocol version
1988 @ivar status: reply status code (ok, error)
1989 @ivar answer: confd query reply
1990 @ivar serial: configuration serial number
2001 class QueryFieldDefinition(ConfigObject):
2002 """Object holding a query field definition.
2004 @ivar name: Field name
2005 @ivar title: Human-readable title
2006 @ivar kind: Field type
2007 @ivar doc: Human-readable description
2018 class _QueryResponseBase(ConfigObject):
2024 """Custom function for serializing.
2027 mydict = super(_QueryResponseBase, self).ToDict()
2028 mydict["fields"] = outils.ContainerToDicts(mydict["fields"])
2032 def FromDict(cls, val):
2033 """Custom function for de-serializing.
2036 obj = super(_QueryResponseBase, cls).FromDict(val)
2038 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
2042 class QueryResponse(_QueryResponseBase):
2043 """Object holding the response to a query.
2045 @ivar fields: List of L{QueryFieldDefinition} objects
2046 @ivar data: Requested data
2054 class QueryFieldsRequest(ConfigObject):
2055 """Object holding a request for querying available fields.
2064 class QueryFieldsResponse(_QueryResponseBase):
2065 """Object holding the response to a query for fields.
2067 @ivar fields: List of L{QueryFieldDefinition} objects
2073 class MigrationStatus(ConfigObject):
2074 """Object holding the status of a migration.
2084 class InstanceConsole(ConfigObject):
2085 """Object describing how to access the console of an instance.
2100 """Validates contents of this object.
2103 assert self.kind in constants.CONS_ALL, "Unknown console type"
2104 assert self.instance, "Missing instance name"
2105 assert self.message or self.kind in [constants.CONS_SSH,
2106 constants.CONS_SPICE,
2108 assert self.host or self.kind == constants.CONS_MESSAGE
2109 assert self.port or self.kind in [constants.CONS_MESSAGE,
2111 assert self.user or self.kind in [constants.CONS_MESSAGE,
2112 constants.CONS_SPICE,
2114 assert self.command or self.kind in [constants.CONS_MESSAGE,
2115 constants.CONS_SPICE,
2117 assert self.display or self.kind in [constants.CONS_MESSAGE,
2118 constants.CONS_SPICE,
2123 class Network(TaggableObject):
2124 """Object representing a network definition for ganeti.
2137 ] + _TIMESTAMPS + _UUID
2139 def HooksDict(self, prefix=""):
2140 """Export a dictionary used by hooks with a network's information.
2142 @type prefix: String
2143 @param prefix: Prefix to prepend to the dict entries
2147 "%sNETWORK_NAME" % prefix: self.name,
2148 "%sNETWORK_UUID" % prefix: self.uuid,
2149 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2152 result["%sNETWORK_SUBNET" % prefix] = self.network
2154 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2156 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2158 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2160 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2165 def FromDict(cls, val):
2166 """Custom function for networks.
2168 Remove deprecated network_type and family.
2171 if "network_type" in val:
2172 del val["network_type"]
2175 obj = super(Network, cls).FromDict(val)
2179 class SerializableConfigParser(ConfigParser.SafeConfigParser):
2180 """Simple wrapper over ConfigParse that allows serialization.
2182 This class is basically ConfigParser.SafeConfigParser with two
2183 additional methods that allow it to serialize/unserialize to/from a
2188 """Dump this instance and return the string representation."""
2191 return buf.getvalue()
2194 def Loads(cls, data):
2195 """Load data from a string."""
2196 buf = StringIO(data)
2202 class LvmPvInfo(ConfigObject):
2203 """Information about an LVM physical volume (PV).
2206 @ivar name: name of the PV
2207 @type vg_name: string
2208 @ivar vg_name: name of the volume group containing the PV
2210 @ivar size: size of the PV in MiB
2212 @ivar free: free space in the PV, in MiB
2213 @type attributes: string
2214 @ivar attributes: PV attributes
2215 @type lv_list: list of strings
2216 @ivar lv_list: names of the LVs hosted on the PV
2228 """Is this PV empty?
2231 return self.size <= (self.free + 1)
2233 def IsAllocatable(self):
2234 """Is this PV allocatable?
2237 return ("a" in self.attributes)