X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/00fb824669888dce00af0b3932ff0c736f6e2e2f..a85f23fa4fdb4118c5a36155103a1ba573b1340f:/lib/objects.py diff --git a/lib/objects.py b/lib/objects.py index 522162a..db4e12e 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,17 +26,217 @@ pass to and from external parties. """ +# pylint: disable=E0203,W0201,R0902 + +# E0203: Access to member %r before its definition, since we use +# objects.py which doesn't explicitely initialise its members + +# W0201: Attribute '%s' defined outside __init__ + +# R0902: Allow instances of these objects to have more than 20 attributes import ConfigParser import re +import copy +import time from cStringIO import StringIO from ganeti import errors from ganeti import constants +from ganeti import netutils +from ganeti import utils + +from socket import AF_INET __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", - "OS", "Node", "Cluster"] + "OS", "Node", "NodeGroup", "Cluster", "FillDict"] + +_TIMESTAMPS = ["ctime", "mtime"] +_UUID = ["uuid"] + +# constants used to create InstancePolicy dictionary +TISPECS_GROUP_TYPES = { + constants.ISPECS_MIN: constants.VTYPE_INT, + constants.ISPECS_MAX: constants.VTYPE_INT, + } + +TISPECS_CLUSTER_TYPES = { + constants.ISPECS_MIN: constants.VTYPE_INT, + constants.ISPECS_MAX: constants.VTYPE_INT, + constants.ISPECS_STD: constants.VTYPE_INT, + } + + +def FillDict(defaults_dict, custom_dict, skip_keys=None): + """Basic function to apply settings on top a default dict. + + @type defaults_dict: dict + @param defaults_dict: dictionary holding the default values + @type custom_dict: dict + @param custom_dict: dictionary holding customized value + @type skip_keys: list + @param skip_keys: which keys not to fill + @rtype: dict + @return: dict with the 'full' values + + """ + ret_dict = copy.deepcopy(defaults_dict) + ret_dict.update(custom_dict) + if skip_keys: + for k in skip_keys: + try: + del ret_dict[k] + except KeyError: + pass + return ret_dict + + +def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None): + """Fills an instance policy with defaults. + + """ + assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS + ret_dict = {} + for key in constants.IPOLICY_ISPECS: + ret_dict[key] = FillDict(default_ipolicy[key], + custom_ipolicy.get(key, {}), + skip_keys=skip_keys) + # list items + for key in [constants.IPOLICY_DTS]: + ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key])) + # other items which we know we can directly copy (immutables) + for key in constants.IPOLICY_PARAMETERS: + ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key]) + + return ret_dict + + +def UpgradeGroupedParams(target, defaults): + """Update all groups for the target parameter. + + @type target: dict of dicts + @param target: {group: {parameter: value}} + @type defaults: dict + @param defaults: default parameter values + + """ + if target is None: + target = {constants.PP_DEFAULT: defaults} + else: + for group in target: + target[group] = FillDict(defaults, target[group]) + return target + + +def UpgradeBeParams(target): + """Update the be parameters dict to the new format. + + @type target: dict + @param target: "be" parameters dict + + """ + if constants.BE_MEMORY in target: + memory = target[constants.BE_MEMORY] + target[constants.BE_MAXMEM] = memory + target[constants.BE_MINMEM] = memory + del target[constants.BE_MEMORY] + + +def UpgradeDiskParams(diskparams): + """Upgrade the disk parameters. + + @type diskparams: dict + @param diskparams: disk parameters to upgrade + @rtype: dict + @return: the upgraded disk parameters dit + + """ + result = dict() + if diskparams is None: + result = constants.DISK_DT_DEFAULTS.copy() + else: + # Update the disk parameter values for each disk template. + # The code iterates over constants.DISK_TEMPLATES because new templates + # might have been added. + for template in constants.DISK_TEMPLATES: + if template not in diskparams: + result[template] = constants.DISK_DT_DEFAULTS[template].copy() + else: + result[template] = FillDict(constants.DISK_DT_DEFAULTS[template], + diskparams[template]) + + return result + + +def MakeEmptyIPolicy(): + """Create empty IPolicy dictionary. + + """ + return dict([ + (constants.ISPECS_MIN, {}), + (constants.ISPECS_MAX, {}), + (constants.ISPECS_STD, {}), + ]) + + +def CreateIPolicyFromOpts(ispecs_mem_size=None, + ispecs_cpu_count=None, + ispecs_disk_count=None, + ispecs_disk_size=None, + ispecs_nic_count=None, + ipolicy_disk_templates=None, + ipolicy_vcpu_ratio=None, + group_ipolicy=False, + allowed_values=None, + fill_all=False): + """Creation of instance policy based on command line options. + + @param fill_all: whether for cluster policies we should ensure that + all values are filled + + + """ + # prepare ipolicy dict + ipolicy_transposed = { + constants.ISPEC_MEM_SIZE: ispecs_mem_size, + constants.ISPEC_CPU_COUNT: ispecs_cpu_count, + constants.ISPEC_DISK_COUNT: ispecs_disk_count, + constants.ISPEC_DISK_SIZE: ispecs_disk_size, + constants.ISPEC_NIC_COUNT: ispecs_nic_count, + } + + # first, check that the values given are correct + if group_ipolicy: + forced_type = TISPECS_GROUP_TYPES + else: + forced_type = TISPECS_CLUSTER_TYPES + + for specs in ipolicy_transposed.values(): + utils.ForceDictType(specs, forced_type, allowed_values=allowed_values) + + # then transpose + ipolicy_out = MakeEmptyIPolicy() + for name, specs in ipolicy_transposed.iteritems(): + assert name in constants.ISPECS_PARAMETERS + for key, val in specs.items(): # {min: .. ,max: .., std: ..} + ipolicy_out[key][name] = val + + # no filldict for non-dicts + if not group_ipolicy and fill_all: + if ipolicy_disk_templates is None: + ipolicy_disk_templates = constants.DISK_TEMPLATES + if ipolicy_vcpu_ratio is None: + ipolicy_vcpu_ratio = \ + constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO] + if ipolicy_disk_templates is not None: + ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) + if ipolicy_vcpu_ratio is not None: + ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio + + assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) + + return ipolicy_out class ConfigObject(object): @@ -49,7 +249,7 @@ class ConfigObject(object): as None instead of raising an error Classes derived from this must always declare __slots__ (we use many - config objects and the memory reduction is useful. + config objects and the memory reduction is useful) """ __slots__ = [] @@ -59,28 +259,27 @@ class ConfigObject(object): setattr(self, k, v) def __getattr__(self, name): - if name not in self.__slots__: + if name not in self._all_slots(): raise AttributeError("Invalid object attribute %s.%s" % (type(self).__name__, name)) return None - def __setitem__(self, key, value): - if key not in self.__slots__: - raise KeyError(key) - setattr(self, key, value) - - def __getstate__(self): - state = {} - for name in self.__slots__: - if hasattr(self, name): - state[name] = getattr(self, name) - return state - def __setstate__(self, state): + slots = self._all_slots() for name in state: - if name in self.__slots__: + if name in slots: setattr(self, name, state[name]) + @classmethod + def _all_slots(cls): + """Compute the list of all declared slots for a class. + + """ + slots = [] + for parent in cls.__mro__: + slots.extend(getattr(parent, "__slots__", [])) + return slots + def ToDict(self): """Convert to a dict holding only standard python types. @@ -91,7 +290,14 @@ class ConfigObject(object): make sure all objects returned are only standard python types. """ - return dict([(k, getattr(self, k, None)) for k in self.__slots__]) + result = {} + for name in self._all_slots(): + value = getattr(self, name, None) + if value is not None: + result[name] = value + return result + + __getstate__ = ToDict @classmethod def FromDict(cls, val): @@ -110,7 +316,7 @@ class ConfigObject(object): raise errors.ConfigurationError("Invalid object passed to FromDict:" " expected dict, got %s" % type(val)) val_str = dict([(str(k), v) for k, v in val.iteritems()]) - obj = cls(**val_str) + obj = cls(**val_str) # pylint: disable=W0142 return obj @staticmethod @@ -143,6 +349,8 @@ class ConfigObject(object): if not isinstance(c_type, type): raise TypeError("Container type %s passed to _ContainerFromDicts is" " not a type" % type(c_type)) + if source is None: + source = c_type() if c_type is dict: ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) elif c_type in (list, tuple, set, frozenset): @@ -152,19 +360,37 @@ class ConfigObject(object): " _ContainerFromDicts" % c_type) return ret + def Copy(self): + """Makes a deep copy of the current object and its children. + + """ + dict_form = self.ToDict() + clone_obj = self.__class__.FromDict(dict_form) + return clone_obj + def __repr__(self): """Implement __repr__ for ConfigObjects.""" return repr(self.ToDict()) + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + This method will be called at configuration load time, and its + implementation will be object dependent. + + """ + pass + class TaggableObject(ConfigObject): """An generic class supporting tags. """ - __slots__ = ConfigObject.__slots__ + ["tags"] + __slots__ = ["tags"] + VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$") - @staticmethod - def ValidateTag(tag): + @classmethod + def ValidateTag(cls, tag): """Check if a tag is valid. If the tag is invalid, an errors.TagError will be raised. The @@ -178,7 +404,7 @@ class TaggableObject(ConfigObject): constants.MAX_TAG_LEN) if not tag: raise errors.TagError("Tags cannot be empty") - if not re.match("^[ \w.+*/:-]+$", tag): + if not cls.VALID_TAG_RE.match(tag): raise errors.TagError("Tag contains invalid characters") def GetTags(self): @@ -235,9 +461,35 @@ class TaggableObject(ConfigObject): return obj +class MasterNetworkParameters(ConfigObject): + """Network configuration parameters for the master + + @ivar name: master name + @ivar ip: master IP + @ivar netmask: master netmask + @ivar netdev: master network device + @ivar ip_family: master IP family + + """ + __slots__ = [ + "name", + "ip", + "netmask", + "netdev", + "ip_family" + ] + + class ConfigData(ConfigObject): """Top-level config object.""" - __slots__ = ["cluster", "nodes", "instances"] + __slots__ = [ + "version", + "cluster", + "nodes", + "nodegroups", + "instances", + "serial_no", + ] + _TIMESTAMPS def ToDict(self): """Custom function for top-level config data. @@ -248,7 +500,7 @@ class ConfigData(ConfigObject): """ mydict = super(ConfigData, self).ToDict() mydict["cluster"] = mydict["cluster"].ToDict() - for key in "nodes", "instances": + for key in "nodes", "instances", "nodegroups": mydict[key] = self._ContainerToDicts(mydict[key]) return mydict @@ -262,18 +514,73 @@ class ConfigData(ConfigObject): obj.cluster = Cluster.FromDict(obj.cluster) obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) + obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) return obj + def HasAnyDiskOfType(self, dev_type): + """Check if in there is at disk of the given type in the configuration. + + @type dev_type: L{constants.LDS_BLOCK} + @param dev_type: the type to look for + @rtype: boolean + @return: boolean indicating if a disk of the given type was found or not + + """ + for instance in self.instances.values(): + for disk in instance.disks: + if disk.IsBasedOnDiskType(dev_type): + return True + return False + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + self.cluster.UpgradeConfig() + for node in self.nodes.values(): + node.UpgradeConfig() + for instance in self.instances.values(): + instance.UpgradeConfig() + if self.nodegroups is None: + self.nodegroups = {} + for nodegroup in self.nodegroups.values(): + nodegroup.UpgradeConfig() + if self.cluster.drbd_usermode_helper is None: + # To decide if we set an helper let's check if at least one instance has + # a DRBD disk. This does not cover all the possible scenarios but it + # gives a good approximation. + if self.HasAnyDiskOfType(constants.LD_DRBD8): + self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER + class NIC(ConfigObject): """Config object representing a network card.""" - __slots__ = ["mac", "ip", "bridge"] + __slots__ = ["mac", "ip", "nicparams"] + + @classmethod + def CheckParameterSyntax(cls, nicparams): + """Check the given parameters for validity. + + @type nicparams: dict + @param nicparams: dictionary with parameter names/value + @raise errors.ConfigurationError: when a parameter is not valid + + """ + if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and + nicparams[constants.NIC_MODE] != constants.VALUE_AUTO): + err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE] + raise errors.ConfigurationError(err) + + if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and + not nicparams[constants.NIC_LINK]): + err = "Missing bridged nic link" + raise errors.ConfigurationError(err) class Disk(ConfigObject): """Config object representing a block device.""" __slots__ = ["dev_type", "logical_id", "physical_id", - "children", "iv_name", "size"] + "children", "iv_name", "size", "mode", "params"] def CreateOnSecondary(self): """Test if this device needs to be created on a secondary node.""" @@ -294,9 +601,16 @@ class Disk(ConfigObject): irrespective of their status. For such devices, we return this path, for others we return None. + @warning: The path returned is not a normalized pathname; callers + should check that it is a valid path. + """ if self.dev_type == constants.LD_LV: return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) + elif self.dev_type == constants.LD_BLOCKDEV: + return self.logical_id[1] + elif self.dev_type == constants.LD_RBD: + return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) return None def ChildrenNeeded(self): @@ -315,6 +629,21 @@ class Disk(ConfigObject): return 0 return -1 + def IsBasedOnDiskType(self, dev_type): + """Check if the disk or its children are based on the given type. + + @type dev_type: L{constants.LDS_BLOCK} + @param dev_type: the type to look for + @rtype: boolean + @return: boolean indicating if a device of the given type was found or not + + """ + if self.children: + for child in self.children: + if child.IsBasedOnDiskType(dev_type): + return True + return self.dev_type == dev_type + def GetNodes(self, node): """This function returns the nodes this device lives on. @@ -324,7 +653,8 @@ class Disk(ConfigObject): devices needs to (or can) be assembled. """ - if self.dev_type in [constants.LD_LV, constants.LD_FILE]: + if self.dev_type in [constants.LD_LV, constants.LD_FILE, + constants.LD_BLOCKDEV, constants.LD_RBD]: result = [node] elif self.dev_type in constants.LDS_DRBD: result = [self.logical_id[0], self.logical_id[1]] @@ -369,6 +699,28 @@ class Disk(ConfigObject): # be different) return result + def ComputeGrowth(self, amount): + """Compute the per-VG growth requirements. + + This only works for VG-based disks. + + @type amount: integer + @param amount: the desired increase in (user-visible) disk space + @rtype: dict + @return: a dictionary of volume-groups and the required size + + """ + if self.dev_type == constants.LD_LV: + return {self.logical_id[0]: amount} + elif self.dev_type == constants.LD_DRBD8: + if self.children: + return self.children[0].ComputeGrowth(amount) + else: + return {} + else: + # Other disk types do not require VG space + return {} + def RecordGrow(self, amount): """Update the size of this disk after growth. @@ -377,7 +729,8 @@ class Disk(ConfigObject): actual algorithms from bdev. """ - if self.dev_type == constants.LD_LV: + if self.dev_type in (constants.LD_LV, constants.LD_FILE, + constants.LD_RBD): self.size += amount elif self.dev_type == constants.LD_DRBD8: if self.children: @@ -387,6 +740,30 @@ class Disk(ConfigObject): raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" " disk type %s" % self.dev_type) + def Update(self, size=None, mode=None): + """Apply changes to size and mode. + + """ + if self.dev_type == constants.LD_DRBD8: + if self.children: + self.children[0].Update(size=size, mode=mode) + else: + assert not self.children + + if size is not None: + self.size = size + if mode is not None: + self.mode = mode + + def UnsetSize(self): + """Sets recursively the size to zero for the disk and its children. + + """ + if self.children: + for child in self.children: + child.UnsetSize() + self.size = 0 + def SetPhysicalID(self, target_node, nodes_ip): """Convert the logical ID to the physical ID. @@ -412,7 +789,7 @@ class Disk(ConfigObject): if self.logical_id is None and self.physical_id is not None: return if self.dev_type in constants.LDS_DRBD: - pnode, snode, port = self.logical_id + pnode, snode, port, pminor, sminor, secret = self.logical_id if target_node not in (pnode, snode): raise errors.ConfigurationError("DRBD device not knowing node %s" % target_node) @@ -421,12 +798,12 @@ class Disk(ConfigObject): if pnode_ip is None or snode_ip is None: raise errors.ConfigurationError("Can't find primary or secondary node" " for %s" % str(self)) + p_data = (pnode_ip, port) + s_data = (snode_ip, port) if pnode == target_node: - self.physical_id = (pnode_ip, port, - snode_ip, port) + self.physical_id = p_data + s_data + (pminor, secret) else: # it must be secondary, we tested above - self.physical_id = (snode_ip, port, - pnode_ip, port) + self.physical_id = s_data + p_data + (sminor, secret) else: self.physical_id = self.logical_id return @@ -458,6 +835,10 @@ class Disk(ConfigObject): obj.logical_id = tuple(obj.logical_id) if obj.physical_id and isinstance(obj.physical_id, list): obj.physical_id = tuple(obj.physical_id) + if obj.dev_type in constants.LDS_DRBD: + # we need a tuple of length six here + if len(obj.logical_id) < 6: + obj.logical_id += (None,) * (6 - len(obj.logical_id)) return obj def __str__(self): @@ -465,8 +846,9 @@ class Disk(ConfigObject): """ if self.dev_type == constants.LD_LV: - val = " std_v or std_v > max_v: + raise errors.ConfigurationError(err) + + @classmethod + def CheckDiskTemplates(cls, disk_templates): + """Checks the disk templates for validity. + + """ + wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) + if wrong: + raise errors.ConfigurationError("Invalid disk template(s) %s" % + utils.CommaJoin(wrong)) + + @classmethod + def CheckParameter(cls, key, value): + """Checks a parameter. + + Currently we expect all parameters to be float values. + + """ + try: + float(value) + except (TypeError, ValueError), err: + raise errors.ConfigurationError("Invalid value for key" " '%s':" + " '%s', error: %s" % (key, value, err)) + class Instance(TaggableObject): """Config object representing an instance.""" - __slots__ = TaggableObject.__slots__ + [ + __slots__ = [ "name", "primary_node", "os", - "status", - "memory", - "vcpus", + "hypervisor", + "hvparams", + "beparams", + "osparams", + "admin_state", "nics", "disks", "disk_template", "network_port", - "kernel_path", - "initrd_path", - "hvm_boot_order", - "hvm_acpi", - "hvm_pae", - "hvm_cdrom_image_path", - "vnc_bind_address", - ] + "serial_no", + ] + _TIMESTAMPS + _UUID def _ComputeSecondaryNodes(self): """Compute the list of secondary nodes. + This is a simple wrapper over _ComputeAllNodes. + + """ + all_nodes = set(self._ComputeAllNodes()) + all_nodes.discard(self.primary_node) + return tuple(all_nodes) + + secondary_nodes = property(_ComputeSecondaryNodes, None, None, + "List of secondary nodes") + + def _ComputeAllNodes(self): + """Compute the list of all nodes. + Since the data is already there (in the drbd disks), keeping it as a separate normal attribute is redundant and if not properly synchronised can cause problems. Thus it's better to compute it dynamically. """ - def _Helper(primary, sec_nodes, device): - """Recursively computes secondary nodes given a top device.""" + def _Helper(nodes, device): + """Recursively computes nodes given a top device.""" if device.dev_type in constants.LDS_DRBD: - nodea, nodeb, dummy = device.logical_id - if nodea == primary: - candidate = nodeb - else: - candidate = nodea - if candidate not in sec_nodes: - sec_nodes.append(candidate) + nodea, nodeb = device.logical_id[:2] + nodes.add(nodea) + nodes.add(nodeb) if device.children: for child in device.children: - _Helper(primary, sec_nodes, child) + _Helper(nodes, child) - secondary_nodes = [] + all_nodes = set() + all_nodes.add(self.primary_node) for device in self.disks: - _Helper(self.primary_node, secondary_nodes, device) - return tuple(secondary_nodes) + _Helper(all_nodes, device) + return tuple(all_nodes) - secondary_nodes = property(_ComputeSecondaryNodes, None, None, - "List of secondary nodes") + all_nodes = property(_ComputeAllNodes, None, None, + "List of all nodes of the instance") def MapLVsByNode(self, lvmap=None, devs=None, node=None): """Provide a mapping of nodes to LVs this instance owns. - This function figures out what logical volumes should belong on which - nodes, recursing through a device tree. + This function figures out what logical volumes should belong on + which nodes, recursing through a device tree. - Args: - lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data. + @param lvmap: optional dictionary to receive the + 'node' : ['lv', ...] data. - Returns: - None if lvmap arg is given. - Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... } + @return: None if lvmap arg is given, otherwise, a dictionary of + the form { 'nodename' : ['volume1', 'volume2', ...], ... }; + volumeN is of the form "vg_name/lv_name", compatible with + GetVolumeList() """ if node == None: node = self.primary_node if lvmap is None: - lvmap = { node : [] } + lvmap = { + node: [], + } ret = lvmap else: if not node in lvmap: @@ -576,15 +1067,9 @@ class Instance(TaggableObject): for dev in devs: if dev.dev_type == constants.LD_LV: - lvmap[node].append(dev.logical_id[1]) + lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1]) elif dev.dev_type in constants.LDS_DRBD: - if dev.logical_id[0] not in lvmap: - lvmap[dev.logical_id[0]] = [] - - if dev.logical_id[1] not in lvmap: - lvmap[dev.logical_id[1]] = [] - if dev.children: self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) @@ -594,17 +1079,28 @@ class Instance(TaggableObject): return ret - def FindDisk(self, name): - """Find a disk given having a specified name. + def FindDisk(self, idx): + """Find a disk given having a specified index. - This will return the disk which has the given iv_name. + This is just a wrapper that does validation of the index. - """ - for disk in self.disks: - if disk.iv_name == name: - return disk + @type idx: int + @param idx: the disk index + @rtype: L{Disk} + @return: the corresponding disk + @raise errors.OpPrereqError: when the given index is not valid - return None + """ + try: + idx = int(idx) + return self.disks[idx] + except (TypeError, ValueError), err: + raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), + errors.ECODE_INVAL) + except IndexError: + raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" + " 0 to %d" % (idx, len(self.disks) - 1), + errors.ECODE_INVAL) def ToDict(self): """Instance-specific conversion to standard python types. @@ -629,65 +1125,451 @@ class Instance(TaggableObject): """Custom function for instances. """ + if "admin_state" not in val: + if val.get("admin_up", False): + val["admin_state"] = constants.ADMINST_UP + else: + val["admin_state"] = constants.ADMINST_DOWN + if "admin_up" in val: + del val["admin_up"] obj = super(Instance, cls).FromDict(val) obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC) obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk) return obj + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + for nic in self.nics: + nic.UpgradeConfig() + for disk in self.disks: + disk.UpgradeConfig() + if self.hvparams: + for key in constants.HVC_GLOBALS: + try: + del self.hvparams[key] + except KeyError: + pass + if self.osparams is None: + self.osparams = {} + UpgradeBeParams(self.beparams) + class OS(ConfigObject): - """Config object representing an operating system.""" + """Config object representing an operating system. + + @type supported_parameters: list + @ivar supported_parameters: a list of tuples, name and description, + containing the supported parameters by this OS + + @type VARIANT_DELIM: string + @cvar VARIANT_DELIM: the variant delimiter + + """ __slots__ = [ "name", "path", - "status", - "api_version", + "api_versions", "create_script", "export_script", "import_script", "rename_script", + "verify_script", + "supported_variants", + "supported_parameters", ] + VARIANT_DELIM = "+" + @classmethod - def FromInvalidOS(cls, err): - """Create an OS from an InvalidOS error. + def SplitNameVariant(cls, name): + """Splits the name into the proper name and variant. - This routine knows how to convert an InvalidOS error to an OS - object representing the broken OS with a meaningful error message. + @param name: the OS (unprocessed) name + @rtype: list + @return: a list of two elements; if the original name didn't + contain a variant, it's returned as an empty string """ - if not isinstance(err, errors.InvalidOS): - raise errors.ProgrammerError("Trying to initialize an OS from an" - " invalid object of type %s" % type(err)) + nv = name.split(cls.VARIANT_DELIM, 1) + if len(nv) == 1: + nv.append("") + return nv - return cls(name=err.args[0], path=err.args[1], status=err.args[2]) + @classmethod + def GetName(cls, name): + """Returns the proper name of the os (without the variant). - def __nonzero__(self): - return self.status == constants.OS_VALID_STATUS + @param name: the OS (unprocessed) name - __bool__ = __nonzero__ + """ + return cls.SplitNameVariant(name)[0] + + @classmethod + def GetVariant(cls, name): + """Returns the variant the os (without the base name). + + @param name: the OS (unprocessed) name + + """ + return cls.SplitNameVariant(name)[1] + + +class NodeHvState(ConfigObject): + """Hypvervisor state on a node. + + @ivar mem_total: Total amount of memory + @ivar mem_node: Memory used by, or reserved for, the node itself (not always + available) + @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation + rounding + @ivar mem_inst: Memory used by instances living on node + @ivar cpu_total: Total node CPU core count + @ivar cpu_node: Number of CPU cores reserved for the node itself + + """ + __slots__ = [ + "mem_total", + "mem_node", + "mem_hv", + "mem_inst", + "cpu_total", + "cpu_node", + ] + _TIMESTAMPS + + +class NodeDiskState(ConfigObject): + """Disk state on a node. + + """ + __slots__ = [ + "total", + "reserved", + "overhead", + ] + _TIMESTAMPS class Node(TaggableObject): - """Config object representing a node.""" - __slots__ = TaggableObject.__slots__ + [ + """Config object representing a node. + + @ivar hv_state: Hypervisor state (e.g. number of CPUs) + @ivar hv_state_static: Hypervisor state overriden by user + @ivar disk_state: Disk state (e.g. free space) + @ivar disk_state_static: Disk state overriden by user + + """ + __slots__ = [ "name", "primary_ip", "secondary_ip", - ] + "serial_no", + "master_candidate", + "offline", + "drained", + "group", + "master_capable", + "vm_capable", + "ndparams", + "powered", + "hv_state", + "hv_state_static", + "disk_state", + "disk_state_static", + ] + _TIMESTAMPS + _UUID + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + # pylint: disable=E0203 + # because these are "defined" via slots, not manually + if self.master_capable is None: + self.master_capable = True + + if self.vm_capable is None: + self.vm_capable = True + + if self.ndparams is None: + self.ndparams = {} + + if self.powered is None: + self.powered = True + + def ToDict(self): + """Custom function for serializing. + + """ + data = super(Node, self).ToDict() + + hv_state = data.get("hv_state", None) + if hv_state is not None: + data["hv_state"] = self._ContainerToDicts(hv_state) + + disk_state = data.get("disk_state", None) + if disk_state is not None: + data["disk_state"] = \ + dict((key, self._ContainerToDicts(value)) + for (key, value) in disk_state.items()) + + return data + + @classmethod + def FromDict(cls, val): + """Custom function for deserializing. + + """ + obj = super(Node, cls).FromDict(val) + + if obj.hv_state is not None: + obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState) + + if obj.disk_state is not None: + obj.disk_state = \ + dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState)) + for (key, value) in obj.disk_state.items()) + + return obj + + +class NodeGroup(TaggableObject): + """Config object representing a node group.""" + __slots__ = [ + "name", + "members", + "ndparams", + "diskparams", + "ipolicy", + "serial_no", + "hv_state_static", + "disk_state_static", + "alloc_policy", + ] + _TIMESTAMPS + _UUID + + def ToDict(self): + """Custom function for nodegroup. + + This discards the members object, which gets recalculated and is only kept + in memory. + + """ + mydict = super(NodeGroup, self).ToDict() + del mydict["members"] + return mydict + + @classmethod + def FromDict(cls, val): + """Custom function for nodegroup. + + The members slot is initialized to an empty list, upon deserialization. + + """ + obj = super(NodeGroup, cls).FromDict(val) + obj.members = [] + return obj + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + if self.ndparams is None: + self.ndparams = {} + + if self.serial_no is None: + self.serial_no = 1 + + if self.alloc_policy is None: + self.alloc_policy = constants.ALLOC_POLICY_PREFERRED + + # We only update mtime, and not ctime, since we would not be able + # to provide a correct value for creation time. + if self.mtime is None: + self.mtime = time.time() + + self.diskparams = UpgradeDiskParams(self.diskparams) + if self.ipolicy is None: + self.ipolicy = MakeEmptyIPolicy() + + def FillND(self, node): + """Return filled out ndparams for L{objects.Node} + + @type node: L{objects.Node} + @param node: A Node object to fill + @return a copy of the node's ndparams with defaults filled + + """ + return self.SimpleFillND(node.ndparams) + + def SimpleFillND(self, ndparams): + """Fill a given ndparams dict with defaults. + + @type ndparams: dict + @param ndparams: the dict to fill + @rtype: dict + @return: a copy of the passed in ndparams with missing keys filled + from the node group defaults + + """ + return FillDict(self.ndparams, ndparams) class Cluster(TaggableObject): """Config object representing the cluster.""" - __slots__ = TaggableObject.__slots__ + [ + __slots__ = [ "serial_no", "rsahostkeypub", "highest_used_port", "tcpudp_port_pool", "mac_prefix", "volume_group_name", + "reserved_lvs", + "drbd_usermode_helper", "default_bridge", - ] + "default_hypervisor", + "master_node", + "master_ip", + "master_netdev", + "master_netmask", + "use_external_mip_script", + "cluster_name", + "file_storage_dir", + "shared_file_storage_dir", + "enabled_hypervisors", + "hvparams", + "ipolicy", + "os_hvp", + "beparams", + "osparams", + "nicparams", + "ndparams", + "diskparams", + "candidate_pool_size", + "modify_etc_hosts", + "modify_ssh_setup", + "maintain_node_health", + "uid_pool", + "default_iallocator", + "hidden_os", + "blacklisted_os", + "primary_ip_family", + "prealloc_wipe_disks", + "hv_state_static", + "disk_state_static", + ] + _TIMESTAMPS + _UUID + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + # pylint: disable=E0203 + # because these are "defined" via slots, not manually + if self.hvparams is None: + self.hvparams = constants.HVC_DEFAULTS + else: + for hypervisor in self.hvparams: + self.hvparams[hypervisor] = FillDict( + constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor]) + + if self.os_hvp is None: + self.os_hvp = {} + + # osparams added before 2.2 + if self.osparams is None: + self.osparams = {} + + if self.ndparams is None: + self.ndparams = constants.NDC_DEFAULTS + + self.beparams = UpgradeGroupedParams(self.beparams, + constants.BEC_DEFAULTS) + for beparams_group in self.beparams: + UpgradeBeParams(self.beparams[beparams_group]) + + migrate_default_bridge = not self.nicparams + self.nicparams = UpgradeGroupedParams(self.nicparams, + constants.NICC_DEFAULTS) + if migrate_default_bridge: + self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ + self.default_bridge + + if self.modify_etc_hosts is None: + self.modify_etc_hosts = True + + if self.modify_ssh_setup is None: + self.modify_ssh_setup = True + + # default_bridge is no longer used in 2.1. The slot is left there to + # support auto-upgrading. It can be removed once we decide to deprecate + # upgrading straight from 2.0. + if self.default_bridge is not None: + self.default_bridge = None + + # default_hypervisor is just the first enabled one in 2.1. This slot and + # code can be removed once upgrading straight from 2.0 is deprecated. + if self.default_hypervisor is not None: + self.enabled_hypervisors = ([self.default_hypervisor] + + [hvname for hvname in self.enabled_hypervisors + if hvname != self.default_hypervisor]) + self.default_hypervisor = None + + # maintain_node_health added after 2.1.1 + if self.maintain_node_health is None: + self.maintain_node_health = False + + if self.uid_pool is None: + self.uid_pool = [] + + if self.default_iallocator is None: + self.default_iallocator = "" + + # reserved_lvs added before 2.2 + if self.reserved_lvs is None: + self.reserved_lvs = [] + + # hidden and blacklisted operating systems added before 2.2.1 + if self.hidden_os is None: + self.hidden_os = [] + + if self.blacklisted_os is None: + self.blacklisted_os = [] + + # primary_ip_family added before 2.3 + if self.primary_ip_family is None: + self.primary_ip_family = AF_INET + + if self.master_netmask is None: + ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) + self.master_netmask = ipcls.iplen + + if self.prealloc_wipe_disks is None: + self.prealloc_wipe_disks = False + + # shared_file_storage_dir added before 2.5 + if self.shared_file_storage_dir is None: + self.shared_file_storage_dir = "" + + if self.use_external_mip_script is None: + self.use_external_mip_script = False + + self.diskparams = UpgradeDiskParams(self.diskparams) + + # instance policy added before 2.6 + if self.ipolicy is None: + self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) + else: + # we can either make sure to upgrade the ipolicy always, or only + # do it in some corner cases (e.g. missing keys); note that this + # will break any removal of keys from the ipolicy dict + self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) + + @property + def primary_hypervisor(self): + """The first hypervisor is the primary. + + Useful, for example, for L{Node}'s hv/disk state. + + """ + return self.enabled_hypervisors[0] def ToDict(self): """Custom function for cluster. @@ -707,6 +1589,390 @@ class Cluster(TaggableObject): obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) return obj + def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None): + """Get the default hypervisor parameters for the cluster. + + @param hypervisor: the hypervisor name + @param os_name: if specified, we'll also update the defaults for this OS + @param skip_keys: if passed, list of keys not to use + @return: the defaults dict + + """ + if skip_keys is None: + skip_keys = [] + + fill_stack = [self.hvparams.get(hypervisor, {})] + if os_name is not None: + os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) + fill_stack.append(os_hvp) + + ret_dict = {} + for o_dict in fill_stack: + ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) + + return ret_dict + + def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False): + """Fill a given hvparams dict with cluster defaults. + + @type hv_name: string + @param hv_name: the hypervisor to use + @type os_name: string + @param os_name: the OS to use for overriding the hypervisor defaults + @type skip_globals: boolean + @param skip_globals: if True, the global hypervisor parameters will + not be filled + @rtype: dict + @return: a copy of the given hvparams with missing keys filled from + the cluster defaults + + """ + if skip_globals: + skip_keys = constants.HVC_GLOBALS + else: + skip_keys = [] + + def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) + return FillDict(def_dict, hvparams, skip_keys=skip_keys) + + def FillHV(self, instance, skip_globals=False): + """Fill an instance's hvparams dict with cluster defaults. + + @type instance: L{objects.Instance} + @param instance: the instance parameter to fill + @type skip_globals: boolean + @param skip_globals: if True, the global hypervisor parameters will + not be filled + @rtype: dict + @return: a copy of the instance's hvparams with missing keys filled from + the cluster defaults + + """ + return self.SimpleFillHV(instance.hypervisor, instance.os, + instance.hvparams, skip_globals) + + def SimpleFillBE(self, beparams): + """Fill a given beparams dict with cluster defaults. + + @type beparams: dict + @param beparams: the dict to fill + @rtype: dict + @return: a copy of the passed in beparams with missing keys filled + from the cluster defaults + + """ + return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams) + + def FillBE(self, instance): + """Fill an instance's beparams dict with cluster defaults. + + @type instance: L{objects.Instance} + @param instance: the instance parameter to fill + @rtype: dict + @return: a copy of the instance's beparams with missing keys filled from + the cluster defaults + + """ + return self.SimpleFillBE(instance.beparams) + + def SimpleFillNIC(self, nicparams): + """Fill a given nicparams dict with cluster defaults. + + @type nicparams: dict + @param nicparams: the dict to fill + @rtype: dict + @return: a copy of the passed in nicparams with missing keys filled + from the cluster defaults + + """ + return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams) + + def SimpleFillOS(self, os_name, os_params): + """Fill an instance's osparams dict with cluster defaults. + + @type os_name: string + @param os_name: the OS name to use + @type os_params: dict + @param os_params: the dict to fill with default values + @rtype: dict + @return: a copy of the instance's osparams with missing keys filled from + the cluster defaults + + """ + name_only = os_name.split("+", 1)[0] + # base OS + result = self.osparams.get(name_only, {}) + # OS with variant + result = FillDict(result, self.osparams.get(os_name, {})) + # specified params + return FillDict(result, os_params) + + @staticmethod + def SimpleFillHvState(hv_state): + """Fill an hv_state sub dict with cluster defaults. + + """ + return FillDict(constants.HVST_DEFAULTS, hv_state) + + @staticmethod + def SimpleFillDiskState(disk_state): + """Fill an disk_state sub dict with cluster defaults. + + """ + return FillDict(constants.DS_DEFAULTS, disk_state) + + def FillND(self, node, nodegroup): + """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} + + @type node: L{objects.Node} + @param node: A Node object to fill + @type nodegroup: L{objects.NodeGroup} + @param nodegroup: A Node object to fill + @return a copy of the node's ndparams with defaults filled + + """ + return self.SimpleFillND(nodegroup.FillND(node)) + + def SimpleFillND(self, ndparams): + """Fill a given ndparams dict with defaults. + + @type ndparams: dict + @param ndparams: the dict to fill + @rtype: dict + @return: a copy of the passed in ndparams with missing keys filled + from the cluster defaults + + """ + return FillDict(self.ndparams, ndparams) + + def SimpleFillIPolicy(self, ipolicy): + """ Fill instance policy dict with defaults. + + @type ipolicy: dict + @param ipolicy: the dict to fill + @rtype: dict + @return: a copy of passed ipolicy with missing keys filled from + the cluster defaults + + """ + return FillIPolicy(self.ipolicy, ipolicy) + + +class BlockDevStatus(ConfigObject): + """Config object representing the status of a block device.""" + __slots__ = [ + "dev_path", + "major", + "minor", + "sync_percent", + "estimated_time", + "is_degraded", + "ldisk_status", + ] + + +class ImportExportStatus(ConfigObject): + """Config object representing the status of an import or export.""" + __slots__ = [ + "recent_output", + "listen_port", + "connected", + "progress_mbytes", + "progress_throughput", + "progress_eta", + "progress_percent", + "exit_status", + "error_message", + ] + _TIMESTAMPS + + +class ImportExportOptions(ConfigObject): + """Options for import/export daemon + + @ivar key_name: X509 key name (None for cluster certificate) + @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) + @ivar compress: Compression method (one of L{constants.IEC_ALL}) + @ivar magic: Used to ensure the connection goes to the right disk + @ivar ipv6: Whether to use IPv6 + @ivar connect_timeout: Number of seconds for establishing connection + + """ + __slots__ = [ + "key_name", + "ca_pem", + "compress", + "magic", + "ipv6", + "connect_timeout", + ] + + +class ConfdRequest(ConfigObject): + """Object holding a confd request. + + @ivar protocol: confd protocol version + @ivar type: confd query type + @ivar query: query request + @ivar rsalt: requested reply salt + + """ + __slots__ = [ + "protocol", + "type", + "query", + "rsalt", + ] + + +class ConfdReply(ConfigObject): + """Object holding a confd reply. + + @ivar protocol: confd protocol version + @ivar status: reply status code (ok, error) + @ivar answer: confd query reply + @ivar serial: configuration serial number + + """ + __slots__ = [ + "protocol", + "status", + "answer", + "serial", + ] + + +class QueryFieldDefinition(ConfigObject): + """Object holding a query field definition. + + @ivar name: Field name + @ivar title: Human-readable title + @ivar kind: Field type + @ivar doc: Human-readable description + + """ + __slots__ = [ + "name", + "title", + "kind", + "doc", + ] + + +class _QueryResponseBase(ConfigObject): + __slots__ = [ + "fields", + ] + + def ToDict(self): + """Custom function for serializing. + + """ + mydict = super(_QueryResponseBase, self).ToDict() + mydict["fields"] = self._ContainerToDicts(mydict["fields"]) + return mydict + + @classmethod + def FromDict(cls, val): + """Custom function for de-serializing. + + """ + obj = super(_QueryResponseBase, cls).FromDict(val) + obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) + return obj + + +class QueryRequest(ConfigObject): + """Object holding a query request. + + """ + __slots__ = [ + "what", + "fields", + "qfilter", + ] + + +class QueryResponse(_QueryResponseBase): + """Object holding the response to a query. + + @ivar fields: List of L{QueryFieldDefinition} objects + @ivar data: Requested data + + """ + __slots__ = [ + "data", + ] + + +class QueryFieldsRequest(ConfigObject): + """Object holding a request for querying available fields. + + """ + __slots__ = [ + "what", + "fields", + ] + + +class QueryFieldsResponse(_QueryResponseBase): + """Object holding the response to a query for fields. + + @ivar fields: List of L{QueryFieldDefinition} objects + + """ + __slots__ = [ + ] + + +class MigrationStatus(ConfigObject): + """Object holding the status of a migration. + + """ + __slots__ = [ + "status", + "transferred_ram", + "total_ram", + ] + + +class InstanceConsole(ConfigObject): + """Object describing how to access the console of an instance. + + """ + __slots__ = [ + "instance", + "kind", + "message", + "host", + "port", + "user", + "command", + "display", + ] + + def Validate(self): + """Validates contents of this object. + + """ + assert self.kind in constants.CONS_ALL, "Unknown console type" + assert self.instance, "Missing instance name" + assert self.message or self.kind in [constants.CONS_SSH, + constants.CONS_SPICE, + constants.CONS_VNC] + assert self.host or self.kind == constants.CONS_MESSAGE + assert self.port or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SSH] + assert self.user or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SPICE, + constants.CONS_VNC] + assert self.command or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SPICE, + constants.CONS_VNC] + assert self.display or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SPICE, + constants.CONS_SSH] + return True + class SerializableConfigParser(ConfigParser.SafeConfigParser): """Simple wrapper over ConfigParse that allows serialization. @@ -722,10 +1988,10 @@ class SerializableConfigParser(ConfigParser.SafeConfigParser): self.write(buf) return buf.getvalue() - @staticmethod - def Loads(data): + @classmethod + def Loads(cls, data): """Load data from a string.""" buf = StringIO(data) - cfp = SerializableConfigParser() + cfp = cls() cfp.readfp(buf) return cfp