X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/2ed0e208a18209f33c228ef00800fe679b287054..e228ab9c2f3f41063db20f59132306a4b4812d6b:/lib/objects.py diff --git a/lib/objects.py b/lib/objects.py index d79e085..358f108 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -38,19 +38,21 @@ pass to and from external parties. import ConfigParser import re import copy +import logging import time from cStringIO import StringIO from ganeti import errors from ganeti import constants from ganeti import netutils +from ganeti import outils from ganeti import utils from socket import AF_INET __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", - "OS", "Node", "NodeGroup", "Cluster", "FillDict"] + "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"] _TIMESTAMPS = ["ctime", "mtime"] _UUID = ["uuid"] @@ -80,23 +82,17 @@ def FillDict(defaults_dict, custom_dict, skip_keys=None): return ret_dict -def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None): +def FillIPolicy(default_ipolicy, custom_ipolicy): """Fills an instance policy with defaults. """ assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS - ret_dict = {} - for key in constants.IPOLICY_ISPECS: - ret_dict[key] = FillDict(default_ipolicy[key], - custom_ipolicy.get(key, {}), - skip_keys=skip_keys) - # list items - for key in [constants.IPOLICY_DTS]: - ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key])) - # other items which we know we can directly copy (immutables) - for key in constants.IPOLICY_PARAMETERS: - ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key]) - + ret_dict = copy.deepcopy(custom_ipolicy) + for key in default_ipolicy: + if key not in ret_dict: + ret_dict[key] = copy.deepcopy(default_ipolicy[key]) + elif key == constants.ISPECS_STD: + ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key]) return ret_dict @@ -173,6 +169,10 @@ def UpgradeNDParams(ndparams): if ndparams is None: ndparams = {} + if (constants.ND_OOB_PROGRAM in ndparams and + ndparams[constants.ND_OOB_PROGRAM] is None): + # will be reset by the line below + del ndparams[constants.ND_OOB_PROGRAM] return FillDict(constants.NDC_DEFAULTS, ndparams) @@ -180,14 +180,10 @@ def MakeEmptyIPolicy(): """Create empty IPolicy dictionary. """ - return dict([ - (constants.ISPECS_MIN, {}), - (constants.ISPECS_MAX, {}), - (constants.ISPECS_STD, {}), - ]) + return {} -class ConfigObject(object): +class ConfigObject(outils.ValidatedSlots): """A generic config object. It has the following properties: @@ -202,34 +198,22 @@ class ConfigObject(object): """ __slots__ = [] - def __init__(self, **kwargs): - for k, v in kwargs.iteritems(): - setattr(self, k, v) - def __getattr__(self, name): - if name not in self._all_slots(): + if name not in self.GetAllSlots(): raise AttributeError("Invalid object attribute %s.%s" % (type(self).__name__, name)) return None def __setstate__(self, state): - slots = self._all_slots() + slots = self.GetAllSlots() for name in state: if name in slots: setattr(self, name, state[name]) - @classmethod - def _all_slots(cls): - """Compute the list of all declared slots for a class. + def Validate(self): + """Validates the slots. """ - slots = [] - for parent in cls.__mro__: - slots.extend(getattr(parent, "__slots__", [])) - return slots - - #: Public getter for the defined slots - GetAllSlots = _all_slots def ToDict(self): """Convert to a dict holding only standard python types. @@ -242,7 +226,7 @@ class ConfigObject(object): """ result = {} - for name in self._all_slots(): + for name in self.GetAllSlots(): value = getattr(self, name, None) if value is not None: result[name] = value @@ -270,47 +254,6 @@ class ConfigObject(object): obj = cls(**val_str) # pylint: disable=W0142 return obj - @staticmethod - def _ContainerToDicts(container): - """Convert the elements of a container to standard python types. - - This method converts a container with elements derived from - ConfigData to standard python types. If the container is a dict, - we don't touch the keys, only the values. - - """ - if isinstance(container, dict): - ret = dict([(k, v.ToDict()) for k, v in container.iteritems()]) - elif isinstance(container, (list, tuple, set, frozenset)): - ret = [elem.ToDict() for elem in container] - else: - raise TypeError("Invalid type %s passed to _ContainerToDicts" % - type(container)) - return ret - - @staticmethod - def _ContainerFromDicts(source, c_type, e_type): - """Convert a container from standard python types. - - This method converts a container with standard python types to - ConfigData objects. If the container is a dict, we don't touch the - keys, only the values. - - """ - if not isinstance(c_type, type): - raise TypeError("Container type %s passed to _ContainerFromDicts is" - " not a type" % type(c_type)) - if source is None: - source = c_type() - if c_type is dict: - ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) - elif c_type in (list, tuple, set, frozenset): - ret = c_type([e_type.FromDict(elem) for elem in source]) - else: - raise TypeError("Invalid container type %s passed to" - " _ContainerFromDicts" % c_type) - return ret - def Copy(self): """Makes a deep copy of the current object and its children. @@ -427,7 +370,7 @@ class MasterNetworkParameters(ConfigObject): "ip", "netmask", "netdev", - "ip_family" + "ip_family", ] @@ -439,6 +382,7 @@ class ConfigData(ConfigObject): "nodes", "nodegroups", "instances", + "networks", "serial_no", ] + _TIMESTAMPS @@ -451,8 +395,8 @@ class ConfigData(ConfigObject): """ mydict = super(ConfigData, self).ToDict() mydict["cluster"] = mydict["cluster"].ToDict() - for key in "nodes", "instances", "nodegroups": - mydict[key] = self._ContainerToDicts(mydict[key]) + for key in "nodes", "instances", "nodegroups", "networks": + mydict[key] = outils.ContainerToDicts(mydict[key]) return mydict @@ -463,9 +407,12 @@ class ConfigData(ConfigObject): """ obj = super(ConfigData, cls).FromDict(val) obj.cluster = Cluster.FromDict(obj.cluster) - obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) - obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) - obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) + obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node) + obj.instances = \ + outils.ContainerFromDicts(obj.instances, dict, Instance) + obj.nodegroups = \ + outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup) + obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network) return obj def HasAnyDiskOfType(self, dev_type): @@ -502,11 +449,47 @@ class ConfigData(ConfigObject): # gives a good approximation. if self.HasAnyDiskOfType(constants.LD_DRBD8): self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER + if self.networks is None: + self.networks = {} + for network in self.networks.values(): + network.UpgradeConfig() + self._UpgradeEnabledDiskTemplates() + + def _UpgradeEnabledDiskTemplates(self): + """Upgrade the cluster's enabled disk templates by inspecting the currently + enabled and/or used disk templates. + + """ + # enabled_disk_templates in the cluster config were introduced in 2.8. + # Remove this code once upgrading from earlier versions is deprecated. + if not self.cluster.enabled_disk_templates: + template_set = \ + set([inst.disk_template for inst in self.instances.values()]) + # Add drbd and plain, if lvm is enabled (by specifying a volume group) + if self.cluster.volume_group_name: + template_set.add(constants.DT_DRBD8) + template_set.add(constants.DT_PLAIN) + # FIXME: Adapt this when dis/enabling at configure time is removed. + # Enable 'file' and 'sharedfile', if they are enabled, even though they + # might currently not be used. + if constants.ENABLE_FILE_STORAGE: + template_set.add(constants.DT_FILE) + if constants.ENABLE_SHARED_FILE_STORAGE: + template_set.add(constants.DT_SHARED_FILE) + # Set enabled_disk_templates to the inferred disk templates. Order them + # according to a preference list that is based on Ganeti's history of + # supported disk templates. + self.cluster.enabled_disk_templates = [] + for preferred_template in constants.DISK_TEMPLATE_PREFERENCE: + if preferred_template in template_set: + self.cluster.enabled_disk_templates.append(preferred_template) + template_set.remove(preferred_template) + self.cluster.enabled_disk_templates.extend(list(template_set)) class NIC(ConfigObject): """Config object representing a network card.""" - __slots__ = ["mac", "ip", "nicparams"] + __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID @classmethod def CheckParameterSyntax(cls, nicparams): @@ -517,21 +500,20 @@ class NIC(ConfigObject): @raise errors.ConfigurationError: when a parameter is not valid """ - if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and - nicparams[constants.NIC_MODE] != constants.VALUE_AUTO): - err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE] - raise errors.ConfigurationError(err) + mode = nicparams[constants.NIC_MODE] + if (mode not in constants.NIC_VALID_MODES and + mode != constants.VALUE_AUTO): + raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode) - if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and + if (mode == constants.NIC_MODE_BRIDGED and not nicparams[constants.NIC_LINK]): - err = "Missing bridged nic link" - raise errors.ConfigurationError(err) + raise errors.ConfigurationError("Missing bridged NIC link") class Disk(ConfigObject): """Config object representing a block device.""" - __slots__ = ["dev_type", "logical_id", "physical_id", - "children", "iv_name", "size", "mode", "params"] + __slots__ = ["name", "dev_type", "logical_id", "physical_id", + "children", "iv_name", "size", "mode", "params"] + _UUID def CreateOnSecondary(self): """Test if this device needs to be created on a secondary node.""" @@ -605,7 +587,8 @@ class Disk(ConfigObject): """ if self.dev_type in [constants.LD_LV, constants.LD_FILE, - constants.LD_BLOCKDEV, constants.LD_RBD]: + constants.LD_BLOCKDEV, constants.LD_RBD, + constants.LD_EXT]: result = [node] elif self.dev_type in constants.LDS_DRBD: result = [self.logical_id[0], self.logical_id[1]] @@ -681,7 +664,7 @@ class Disk(ConfigObject): """ if self.dev_type in (constants.LD_LV, constants.LD_FILE, - constants.LD_RBD): + constants.LD_RBD, constants.LD_EXT): self.size += amount elif self.dev_type == constants.LD_DRBD8: if self.children: @@ -771,7 +754,7 @@ class Disk(ConfigObject): for attr in ("children",): alist = bo.get(attr, None) if alist: - bo[attr] = self._ContainerToDicts(alist) + bo[attr] = outils.ContainerToDicts(alist) return bo @classmethod @@ -781,7 +764,7 @@ class Disk(ConfigObject): """ obj = super(Disk, cls).FromDict(val) if obj.children: - obj.children = cls._ContainerFromDicts(obj.children, list, Disk) + obj.children = outils.ContainerFromDicts(obj.children, list, Disk) if obj.logical_id and isinstance(obj.logical_id, list): obj.logical_id = tuple(obj.logical_id) if obj.physical_id and isinstance(obj.physical_id, list): @@ -845,9 +828,18 @@ class Disk(ConfigObject): child.UpgradeConfig() # FIXME: Make this configurable in Ganeti 2.7 - self.params = {} + # Params should be an empty dict that gets filled any time needed + # In case of ext template we allow arbitrary params that should not + # be overrided during a config reload/upgrade. + if not self.params or not isinstance(self.params, dict): + self.params = {} + # add here config upgrade for this disk + # If the file driver is empty, fill it up with the default value + if self.dev_type == constants.LD_FILE and self.physical_id[0] is None: + self.physical_id[0] = constants.FD_DEFAULT + @staticmethod def ComputeLDParams(disk_template, disk_params): """Computes Logical Disk parameters from Disk Template parameters. @@ -870,7 +862,7 @@ class Disk(ConfigObject): result = list() dt_params = disk_params[disk_template] if disk_template == constants.DT_DRBD8: - drbd_params = { + result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], { constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], @@ -883,56 +875,36 @@ class Disk(ConfigObject): constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], - } - - drbd_params = \ - FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], - drbd_params) - - result.append(drbd_params) + })) # data LV - data_params = { + result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], - } - data_params = \ - FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], - data_params) - result.append(data_params) + })) # metadata LV - meta_params = { + result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], - } - meta_params = \ - FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], - meta_params) - result.append(meta_params) + })) - elif (disk_template == constants.DT_FILE or - disk_template == constants.DT_SHARED_FILE): + elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE): result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE]) elif disk_template == constants.DT_PLAIN: - params = { + result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { constants.LDP_STRIPES: dt_params[constants.LV_STRIPES], - } - params = \ - FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], - params) - result.append(params) + })) elif disk_template == constants.DT_BLOCK: result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV]) elif disk_template == constants.DT_RBD: - params = { - constants.LDP_POOL: dt_params[constants.RBD_POOL] - } - params = \ - FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], - params) - result.append(params) + result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], { + constants.LDP_POOL: dt_params[constants.RBD_POOL], + })) + + elif disk_template == constants.DT_EXT: + result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT]) return result @@ -940,7 +912,6 @@ class Disk(ConfigObject): class InstancePolicy(ConfigObject): """Config object representing instance policy limits dictionary. - Note that this object is not actually used in the config, it's just used as a placeholder for a few functions. @@ -949,9 +920,14 @@ class InstancePolicy(ConfigObject): def CheckParameterSyntax(cls, ipolicy, check_std): """ Check the instance policy for validity. + @type ipolicy: dict + @param ipolicy: dictionary with min/max/std specs and policies + @type check_std: bool + @param check_std: Whether to check std value or just assume compliance + @raise errors.ConfigurationError: when the policy is not legal + """ - for param in constants.ISPECS_PARAMETERS: - InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std) + InstancePolicy.CheckISpecSyntax(ipolicy, check_std) if constants.IPOLICY_DTS in ipolicy: InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) for key in constants.IPOLICY_PARAMETERS: @@ -963,44 +939,100 @@ class InstancePolicy(ConfigObject): utils.CommaJoin(wrong_keys)) @classmethod - def CheckISpecSyntax(cls, ipolicy, name, check_std): - """Check the instance policy for validity on a given key. + def _CheckIncompleteSpec(cls, spec, keyname): + missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys()) + if missing_params: + msg = ("Missing instance specs parameters for %s: %s" % + (keyname, utils.CommaJoin(missing_params))) + raise errors.ConfigurationError(msg) - We check if the instance policy makes sense for a given key, that is - if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name]. + @classmethod + def CheckISpecSyntax(cls, ipolicy, check_std): + """Check the instance policy specs for validity. @type ipolicy: dict - @param ipolicy: dictionary with min, max, std specs + @param ipolicy: dictionary with min/max/std specs + @type check_std: bool + @param check_std: Whether to check std value or just assume compliance + @raise errors.ConfigurationError: when specs are not valid + + """ + if constants.ISPECS_MINMAX not in ipolicy: + # Nothing to check + return + + if check_std and constants.ISPECS_STD not in ipolicy: + msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD + raise errors.ConfigurationError(msg) + stdspec = ipolicy.get(constants.ISPECS_STD) + if check_std: + InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD) + + if not ipolicy[constants.ISPECS_MINMAX]: + raise errors.ConfigurationError("Empty minmax specifications") + std_is_good = False + for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]: + missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys()) + if missing: + msg = "Missing instance specification: %s" % utils.CommaJoin(missing) + raise errors.ConfigurationError(msg) + for (key, spec) in minmaxspecs.items(): + InstancePolicy._CheckIncompleteSpec(spec, key) + + spec_std_ok = True + for param in constants.ISPECS_PARAMETERS: + par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec, + param, check_std) + spec_std_ok = spec_std_ok and par_std_ok + std_is_good = std_is_good or spec_std_ok + if not std_is_good: + raise errors.ConfigurationError("Invalid std specifications") + + @classmethod + def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std): + """Check the instance policy specs for validity on a given key. + + We check if the instance specs makes sense for a given key, that is + if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name]. + + @type minmaxspecs: dict + @param minmaxspecs: dictionary with min and max instance spec + @type stdspec: dict + @param stdspec: dictionary with standard instance spec @type name: string @param name: what are the limits for @type check_std: bool @param check_std: Whether to check std value or just assume compliance - @raise errors.ConfigureError: when specs for given name are not valid + @rtype: bool + @return: C{True} when specs are valid, C{False} when standard spec for the + given name is not valid + @raise errors.ConfigurationError: when min/max specs for the given name + are not valid """ - min_v = ipolicy[constants.ISPECS_MIN].get(name, 0) + minspec = minmaxspecs[constants.ISPECS_MIN] + maxspec = minmaxspecs[constants.ISPECS_MAX] + min_v = minspec[name] + max_v = maxspec[name] - if check_std: - std_v = ipolicy[constants.ISPECS_STD].get(name, min_v) - std_msg = std_v - else: - std_v = min_v - std_msg = "-" - - max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v) - err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" % - (name, - ipolicy[constants.ISPECS_MIN].get(name, "-"), - ipolicy[constants.ISPECS_MAX].get(name, "-"), - std_msg)) - if min_v > std_v or std_v > max_v: + if min_v > max_v: + err = ("Invalid specification of min/max values for %s: %s/%s" % + (name, min_v, max_v)) raise errors.ConfigurationError(err) + elif check_std: + std_v = stdspec.get(name, min_v) + return std_v >= min_v and std_v <= max_v + else: + return True @classmethod def CheckDiskTemplates(cls, disk_templates): """Checks the disk templates for validity. """ + if not disk_templates: + raise errors.ConfigurationError("Instance policy must contain" + + " at least one disk template") wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) if wrong: raise errors.ConfigurationError("Invalid disk template(s) %s" % @@ -1034,6 +1066,7 @@ class Instance(TaggableObject): "nics", "disks", "disk_template", + "disks_active", "network_port", "serial_no", ] + _TIMESTAMPS + _UUID @@ -1049,7 +1082,7 @@ class Instance(TaggableObject): return tuple(all_nodes) secondary_nodes = property(_ComputeSecondaryNodes, None, None, - "List of secondary nodes") + "List of names of secondary nodes") def _ComputeAllNodes(self): """Compute the list of all nodes. @@ -1077,7 +1110,7 @@ class Instance(TaggableObject): return tuple(all_nodes) all_nodes = property(_ComputeAllNodes, None, None, - "List of all nodes of the instance") + "List of names of all the nodes of the instance") def MapLVsByNode(self, lvmap=None, devs=None, node=None): """Provide a mapping of nodes to LVs this instance owns. @@ -1094,7 +1127,7 @@ class Instance(TaggableObject): GetVolumeList() """ - if node == None: + if node is None: node = self.primary_node if lvmap is None: @@ -1159,7 +1192,7 @@ class Instance(TaggableObject): for attr in "nics", "disks": alist = bo.get(attr, None) if alist: - nlist = self._ContainerToDicts(alist) + nlist = outils.ContainerToDicts(alist) else: nlist = [] bo[attr] = nlist @@ -1178,8 +1211,8 @@ class Instance(TaggableObject): if "admin_up" in val: del val["admin_up"] obj = super(Instance, cls).FromDict(val) - obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC) - obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk) + obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) + obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk) return obj def UpgradeConfig(self): @@ -1199,6 +1232,8 @@ class Instance(TaggableObject): if self.osparams is None: self.osparams = {} UpgradeBeParams(self.beparams) + if self.disks_active is None: + self.disks_active = self.admin_state == constants.ADMINST_UP class OS(ConfigObject): @@ -1261,6 +1296,24 @@ class OS(ConfigObject): return cls.SplitNameVariant(name)[1] +class ExtStorage(ConfigObject): + """Config object representing an External Storage Provider. + + """ + __slots__ = [ + "name", + "path", + "create_script", + "remove_script", + "grow_script", + "attach_script", + "detach_script", + "setinfo_script", + "verify_script", + "supported_parameters", + ] + + class NodeHvState(ConfigObject): """Hypvervisor state on a node. @@ -1337,6 +1390,12 @@ class Node(TaggableObject): if self.ndparams is None: self.ndparams = {} + # And remove any global parameter + for key in constants.NDC_GLOBALS: + if key in self.ndparams: + logging.warning("Ignoring %s node parameter for node %s", + key, self.name) + del self.ndparams[key] if self.powered is None: self.powered = True @@ -1349,12 +1408,12 @@ class Node(TaggableObject): hv_state = data.get("hv_state", None) if hv_state is not None: - data["hv_state"] = self._ContainerToDicts(hv_state) + data["hv_state"] = outils.ContainerToDicts(hv_state) disk_state = data.get("disk_state", None) if disk_state is not None: data["disk_state"] = \ - dict((key, self._ContainerToDicts(value)) + dict((key, outils.ContainerToDicts(value)) for (key, value) in disk_state.items()) return data @@ -1367,11 +1426,12 @@ class Node(TaggableObject): obj = super(Node, cls).FromDict(val) if obj.hv_state is not None: - obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState) + obj.hv_state = \ + outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState) if obj.disk_state is not None: obj.disk_state = \ - dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState)) + dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState)) for (key, value) in obj.disk_state.items()) return obj @@ -1389,6 +1449,7 @@ class NodeGroup(TaggableObject): "hv_state_static", "disk_state_static", "alloc_policy", + "networks", ] + _TIMESTAMPS + _UUID def ToDict(self): @@ -1436,6 +1497,9 @@ class NodeGroup(TaggableObject): if self.ipolicy is None: self.ipolicy = MakeEmptyIPolicy() + if self.networks is None: + self.networks = {} + def FillND(self, node): """Return filled out ndparams for L{objects.Node} @@ -1464,6 +1528,7 @@ class Cluster(TaggableObject): __slots__ = [ "serial_no", "rsahostkeypub", + "dsahostkeypub", "highest_used_port", "tcpudp_port_pool", "mac_prefix", @@ -1501,6 +1566,7 @@ class Cluster(TaggableObject): "prealloc_wipe_disks", "hv_state_static", "disk_state_static", + "enabled_disk_templates", ] + _TIMESTAMPS + _UUID def UpgradeConfig(self): @@ -1553,8 +1619,8 @@ class Cluster(TaggableObject): # code can be removed once upgrading straight from 2.0 is deprecated. if self.default_hypervisor is not None: self.enabled_hypervisors = ([self.default_hypervisor] + - [hvname for hvname in self.enabled_hypervisors - if hvname != self.default_hypervisor]) + [hvname for hvname in self.enabled_hypervisors + if hvname != self.default_hypervisor]) self.default_hypervisor = None # maintain_node_health added after 2.1.1 @@ -1608,6 +1674,12 @@ class Cluster(TaggableObject): # we can either make sure to upgrade the ipolicy always, or only # do it in some corner cases (e.g. missing keys); note that this # will break any removal of keys from the ipolicy dict + wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS + if wrongkeys: + # These keys would be silently removed by FillIPolicy() + msg = ("Cluster instance policy contains spurious keys: %s" % + utils.CommaJoin(wrongkeys)) + raise errors.ConfigurationError(msg) self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) @property @@ -1624,7 +1696,14 @@ class Cluster(TaggableObject): """ mydict = super(Cluster, self).ToDict() - mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool) + + if self.tcpudp_port_pool is None: + tcpudp_port_pool = [] + else: + tcpudp_port_pool = list(self.tcpudp_port_pool) + + mydict["tcpudp_port_pool"] = tcpudp_port_pool + return mydict @classmethod @@ -1633,8 +1712,12 @@ class Cluster(TaggableObject): """ obj = super(Cluster, cls).FromDict(val) - if not isinstance(obj.tcpudp_port_pool, set): + + if obj.tcpudp_port_pool is None: + obj.tcpudp_port_pool = set() + elif not isinstance(obj.tcpudp_port_pool, set): obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) + return obj def SimpleFillDP(self, diskparams): @@ -1925,7 +2008,7 @@ class _QueryResponseBase(ConfigObject): """ mydict = super(_QueryResponseBase, self).ToDict() - mydict["fields"] = self._ContainerToDicts(mydict["fields"]) + mydict["fields"] = outils.ContainerToDicts(mydict["fields"]) return mydict @classmethod @@ -1934,7 +2017,8 @@ class _QueryResponseBase(ConfigObject): """ obj = super(_QueryResponseBase, cls).FromDict(val) - obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) + obj.fields = \ + outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition) return obj @@ -1966,8 +2050,7 @@ class QueryFieldsResponse(_QueryResponseBase): @ivar fields: List of L{QueryFieldDefinition} objects """ - __slots__ = [ - ] + __slots__ = [] class MigrationStatus(ConfigObject): @@ -2020,6 +2103,62 @@ class InstanceConsole(ConfigObject): return True +class Network(TaggableObject): + """Object representing a network definition for ganeti. + + """ + __slots__ = [ + "name", + "serial_no", + "mac_prefix", + "network", + "network6", + "gateway", + "gateway6", + "reservations", + "ext_reservations", + ] + _TIMESTAMPS + _UUID + + def HooksDict(self, prefix=""): + """Export a dictionary used by hooks with a network's information. + + @type prefix: String + @param prefix: Prefix to prepend to the dict entries + + """ + result = { + "%sNETWORK_NAME" % prefix: self.name, + "%sNETWORK_UUID" % prefix: self.uuid, + "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()), + } + if self.network: + result["%sNETWORK_SUBNET" % prefix] = self.network + if self.gateway: + result["%sNETWORK_GATEWAY" % prefix] = self.gateway + if self.network6: + result["%sNETWORK_SUBNET6" % prefix] = self.network6 + if self.gateway6: + result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6 + if self.mac_prefix: + result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix + + return result + + @classmethod + def FromDict(cls, val): + """Custom function for networks. + + Remove deprecated network_type and family. + + """ + if "network_type" in val: + del val["network_type"] + if "family" in val: + del val["family"] + obj = super(Network, cls).FromDict(val) + return obj + + class SerializableConfigParser(ConfigParser.SafeConfigParser): """Simple wrapper over ConfigParse that allows serialization. @@ -2041,3 +2180,41 @@ class SerializableConfigParser(ConfigParser.SafeConfigParser): cfp = cls() cfp.readfp(buf) return cfp + + +class LvmPvInfo(ConfigObject): + """Information about an LVM physical volume (PV). + + @type name: string + @ivar name: name of the PV + @type vg_name: string + @ivar vg_name: name of the volume group containing the PV + @type size: float + @ivar size: size of the PV in MiB + @type free: float + @ivar free: free space in the PV, in MiB + @type attributes: string + @ivar attributes: PV attributes + @type lv_list: list of strings + @ivar lv_list: names of the LVs hosted on the PV + """ + __slots__ = [ + "name", + "vg_name", + "size", + "free", + "attributes", + "lv_list" + ] + + def IsEmpty(self): + """Is this PV empty? + + """ + return self.size <= (self.free + 1) + + def IsAllocatable(self): + """Is this PV allocatable? + + """ + return ("a" in self.attributes)