X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/0d9c53bce70064206b7c8d89823170a3a38b9e9f..6bb65e3a3d84fb9ea3daeae596bbc9463edda8b8:/lib/objects.py diff --git a/lib/objects.py b/lib/objects.py index 6dd5141..7fb7d5b 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -26,10 +26,16 @@ pass to and from external parties. """ +# pylint: disable-msg=E0203,W0201 + +# E0203: Access to member %r before its definition, since we use +# objects.py which doesn't explicitely initialise its members + +# W0201: Attribute '%s' defined outside __init__ -import simplejson import ConfigParser import re +import copy from cStringIO import StringIO from ganeti import errors @@ -37,15 +43,50 @@ from ganeti import constants __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", - "OS", "Node", "Cluster"] + "OS", "Node", "Cluster", "FillDict"] + +_TIMESTAMPS = ["ctime", "mtime"] +_UUID = ["uuid"] + +def FillDict(defaults_dict, custom_dict, skip_keys=None): + """Basic function to apply settings on top a default dict. + + @type defaults_dict: dict + @param defaults_dict: dictionary holding the default values + @type custom_dict: dict + @param custom_dict: dictionary holding customized value + @type skip_keys: list + @param skip_keys: which keys not to fill + @rtype: dict + @return: dict with the 'full' values + + """ + ret_dict = copy.deepcopy(defaults_dict) + ret_dict.update(custom_dict) + if skip_keys: + for k in skip_keys: + try: + del ret_dict[k] + except KeyError: + pass + return ret_dict + + +def UpgradeGroupedParams(target, defaults): + """Update all groups for the target parameter. + @type target: dict of dicts + @param target: {group: {parameter: value}} + @type defaults: dict + @param defaults: default parameter values -# Check whether the simplejson module supports indentation -_JSON_INDENT = 2 -try: - simplejson.dumps(1, indent=_JSON_INDENT) -except TypeError: - _JSON_INDENT = None + """ + if target is None: + target = {constants.PP_DEFAULT: defaults} + else: + for group in target: + target[group] = FillDict(defaults, target[group]) + return target class ConfigObject(object): @@ -58,7 +99,7 @@ class ConfigObject(object): as None instead of raising an error Classes derived from this must always declare __slots__ (we use many - config objects and the memory reduction is useful. + config objects and the memory reduction is useful) """ __slots__ = [] @@ -68,55 +109,26 @@ class ConfigObject(object): setattr(self, k, v) def __getattr__(self, name): - if name not in self.__slots__: + if name not in self._all_slots(): raise AttributeError("Invalid object attribute %s.%s" % (type(self).__name__, name)) return None - def __setitem__(self, key, value): - if key not in self.__slots__: - raise KeyError(key) - setattr(self, key, value) - - def __getstate__(self): - state = {} - for name in self.__slots__: - if hasattr(self, name): - state[name] = getattr(self, name) - return state - def __setstate__(self, state): + slots = self._all_slots() for name in state: - if name in self.__slots__: + if name in slots: setattr(self, name, state[name]) - def Dump(self, fobj): - """Dump to a file object. - - """ - data = self.ToDict() - if _JSON_INDENT is None: - simplejson.dump(data, fobj) - else: - simplejson.dump(data, fobj, indent=_JSON_INDENT) - @classmethod - def Load(cls, fobj): - """Load data from the given stream. + def _all_slots(cls): + """Compute the list of all declared slots for a class. """ - return cls.FromDict(simplejson.load(fobj)) - - def Dumps(self): - """Dump and return the string representation.""" - buf = StringIO() - self.Dump(buf) - return buf.getvalue() - - @classmethod - def Loads(cls, data): - """Load data from a string.""" - return cls.Load(StringIO(data)) + slots = [] + for parent in cls.__mro__: + slots.extend(getattr(parent, "__slots__", [])) + return slots def ToDict(self): """Convert to a dict holding only standard python types. @@ -128,7 +140,14 @@ class ConfigObject(object): make sure all objects returned are only standard python types. """ - return dict([(k, getattr(self, k, None)) for k in self.__slots__]) + result = {} + for name in self._all_slots(): + value = getattr(self, name, None) + if value is not None: + result[name] = value + return result + + __getstate__ = ToDict @classmethod def FromDict(cls, val): @@ -147,7 +166,7 @@ class ConfigObject(object): raise errors.ConfigurationError("Invalid object passed to FromDict:" " expected dict, got %s" % type(val)) val_str = dict([(str(k), v) for k, v in val.iteritems()]) - obj = cls(**val_str) + obj = cls(**val_str) # pylint: disable-msg=W0142 return obj @staticmethod @@ -189,19 +208,37 @@ class ConfigObject(object): " _ContainerFromDicts" % c_type) return ret + def Copy(self): + """Makes a deep copy of the current object and its children. + + """ + dict_form = self.ToDict() + clone_obj = self.__class__.FromDict(dict_form) + return clone_obj + def __repr__(self): """Implement __repr__ for ConfigObjects.""" return repr(self.ToDict()) + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + This method will be called at configuration load time, and its + implementation will be object dependent. + + """ + pass + class TaggableObject(ConfigObject): """An generic class supporting tags. """ - __slots__ = ConfigObject.__slots__ + ["tags"] + __slots__ = ["tags"] + VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$") - @staticmethod - def ValidateTag(tag): + @classmethod + def ValidateTag(cls, tag): """Check if a tag is valid. If the tag is invalid, an errors.TagError will be raised. The @@ -215,7 +252,7 @@ class TaggableObject(ConfigObject): constants.MAX_TAG_LEN) if not tag: raise errors.TagError("Tags cannot be empty") - if not re.match("^[ \w.+*/:-]+$", tag): + if not cls.VALID_TAG_RE.match(tag): raise errors.TagError("Tag contains invalid characters") def GetTags(self): @@ -274,7 +311,8 @@ class TaggableObject(ConfigObject): class ConfigData(ConfigObject): """Top-level config object.""" - __slots__ = ["cluster", "nodes", "instances"] + __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] + + _TIMESTAMPS) def ToDict(self): """Custom function for top-level config data. @@ -301,26 +339,66 @@ class ConfigData(ConfigObject): obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) return obj + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + self.cluster.UpgradeConfig() + for node in self.nodes.values(): + node.UpgradeConfig() + for instance in self.instances.values(): + instance.UpgradeConfig() + class NIC(ConfigObject): """Config object representing a network card.""" - __slots__ = ["mac", "ip", "bridge"] + __slots__ = ["mac", "ip", "bridge", "nicparams"] + + @classmethod + def CheckParameterSyntax(cls, nicparams): + """Check the given parameters for validity. + + @type nicparams: dict + @param nicparams: dictionary with parameter names/value + @raise errors.ConfigurationError: when a parameter is not valid + + """ + if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES: + err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE] + raise errors.ConfigurationError(err) + + if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and + not nicparams[constants.NIC_LINK]): + err = "Missing bridged nic link" + raise errors.ConfigurationError(err) + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + if self.nicparams is None: + self.nicparams = {} + if self.bridge is not None: + self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED + self.nicparams[constants.NIC_LINK] = self.bridge + # bridge is no longer used it 2.1. The slot is left there to support + # upgrading, but will be removed in 2.2 + if self.bridge is not None: + self.bridge = None class Disk(ConfigObject): """Config object representing a block device.""" __slots__ = ["dev_type", "logical_id", "physical_id", - "children", "iv_name", "size"] + "children", "iv_name", "size", "mode"] def CreateOnSecondary(self): """Test if this device needs to be created on a secondary node.""" - return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8, - constants.LD_LV) + return self.dev_type in (constants.LD_DRBD8, constants.LD_LV) def AssembleOnSecondary(self): """Test if this device needs to be assembled on a secondary node.""" - return self.dev_type in (constants.LD_DRBD7, constants.LD_DRBD8, - constants.LD_LV) + return self.dev_type in (constants.LD_DRBD8, constants.LD_LV) def OpenOnSecondary(self): """Test if this device needs to be opened on a secondary node.""" @@ -363,7 +441,7 @@ class Disk(ConfigObject): devices needs to (or can) be assembled. """ - if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_MD_R1: + if self.dev_type in [constants.LD_LV, constants.LD_FILE]: result = [node] elif self.dev_type in constants.LDS_DRBD: result = [self.logical_id[0], self.logical_id[1]] @@ -378,12 +456,9 @@ class Disk(ConfigObject): This method, given the node on which the parent disk lives, will return the list of all (node, disk) pairs which describe the disk - tree in the most compact way. For example, a md/drbd/lvm stack - will be returned as (primary_node, md) and (secondary_node, drbd) - which represents all the top-level devices on the nodes. This - means that on the primary node we need to activate the the md (and - recursively all its children) and on the secondary node we need to - activate the drbd device (and its children, the two lvm volumes). + tree in the most compact way. For example, a drbd/lvm stack + will be returned as (primary_node, drbd) and (secondary_node, drbd) + which represents all the top-level devices on the nodes. """ my_nodes = self.GetNodes(parent_node) @@ -411,6 +486,77 @@ class Disk(ConfigObject): # be different) return result + def RecordGrow(self, amount): + """Update the size of this disk after growth. + + This method recurses over the disks's children and updates their + size correspondigly. The method needs to be kept in sync with the + actual algorithms from bdev. + + """ + if self.dev_type == constants.LD_LV: + self.size += amount + elif self.dev_type == constants.LD_DRBD8: + if self.children: + self.children[0].RecordGrow(amount) + self.size += amount + else: + raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" + " disk type %s" % self.dev_type) + + def UnsetSize(self): + """Sets recursively the size to zero for the disk and its children. + + """ + if self.children: + for child in self.children: + child.UnsetSize() + self.size = 0 + + def SetPhysicalID(self, target_node, nodes_ip): + """Convert the logical ID to the physical ID. + + This is used only for drbd, which needs ip/port configuration. + + The routine descends down and updates its children also, because + this helps when the only the top device is passed to the remote + node. + + Arguments: + - target_node: the node we wish to configure for + - nodes_ip: a mapping of node name to ip + + The target_node must exist in in nodes_ip, and must be one of the + nodes in the logical ID for each of the DRBD devices encountered + in the disk tree. + + """ + if self.children: + for child in self.children: + child.SetPhysicalID(target_node, nodes_ip) + + if self.logical_id is None and self.physical_id is not None: + return + if self.dev_type in constants.LDS_DRBD: + pnode, snode, port, pminor, sminor, secret = self.logical_id + if target_node not in (pnode, snode): + raise errors.ConfigurationError("DRBD device not knowing node %s" % + target_node) + pnode_ip = nodes_ip.get(pnode, None) + snode_ip = nodes_ip.get(snode, None) + if pnode_ip is None or snode_ip is None: + raise errors.ConfigurationError("Can't find primary or secondary node" + " for %s" % str(self)) + p_data = (pnode_ip, port) + s_data = (snode_ip, port) + if pnode == target_node: + self.physical_id = p_data + s_data + (pminor, secret) + else: # it must be secondary, we tested above + self.physical_id = s_data + p_data + (sminor, secret) + else: + self.physical_id = self.logical_id + return + def ToDict(self): """Disk-specific conversion to standard python types. @@ -438,6 +584,10 @@ class Disk(ConfigObject): obj.logical_id = tuple(obj.logical_id) if obj.physical_id and isinstance(obj.physical_id, list): obj.physical_id = tuple(obj.physical_id) + if obj.dev_type in constants.LDS_DRBD: + # we need a tuple of length six here + if len(obj.logical_id) < 6: + obj.logical_id += (None,) * (6 - len(obj.logical_id)) return obj def __str__(self): @@ -447,10 +597,8 @@ class Disk(ConfigObject): if self.dev_type == constants.LD_LV: val = "