X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/51cb158170d8a2c51237717a1e7c2257bf4daefb..0dfa2c2271131bd9b6e0409f642b4e094827c871:/lib/objects.py diff --git a/lib/objects.py b/lib/objects.py index 9e9163c..fb7e323 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -36,14 +36,17 @@ pass to and from external parties. import ConfigParser import re import copy +import time from cStringIO import StringIO from ganeti import errors from ganeti import constants +from socket import AF_INET + __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", - "OS", "Node", "Cluster", "FillDict"] + "OS", "Node", "NodeGroup", "Cluster", "FillDict"] _TIMESTAMPS = ["ctime", "mtime"] _UUID = ["uuid"] @@ -200,6 +203,8 @@ class ConfigObject(object): if not isinstance(c_type, type): raise TypeError("Container type %s passed to _ContainerFromDicts is" " not a type" % type(c_type)) + if source is None: + source = c_type() if c_type is dict: ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) elif c_type in (list, tuple, set, frozenset): @@ -312,8 +317,14 @@ class TaggableObject(ConfigObject): class ConfigData(ConfigObject): """Top-level config object.""" - __slots__ = (["version", "cluster", "nodes", "instances", "serial_no"] + - _TIMESTAMPS) + __slots__ = [ + "version", + "cluster", + "nodes", + "nodegroups", + "instances", + "serial_no", + ] + _TIMESTAMPS def ToDict(self): """Custom function for top-level config data. @@ -324,7 +335,7 @@ class ConfigData(ConfigObject): """ mydict = super(ConfigData, self).ToDict() mydict["cluster"] = mydict["cluster"].ToDict() - for key in "nodes", "instances": + for key in "nodes", "instances", "nodegroups": mydict[key] = self._ContainerToDicts(mydict[key]) return mydict @@ -338,6 +349,7 @@ class ConfigData(ConfigObject): obj.cluster = Cluster.FromDict(obj.cluster) obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) + obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) return obj def HasAnyDiskOfType(self, dev_type): @@ -364,11 +376,21 @@ class ConfigData(ConfigObject): node.UpgradeConfig() for instance in self.instances.values(): instance.UpgradeConfig() + if self.nodegroups is None: + self.nodegroups = {} + for nodegroup in self.nodegroups.values(): + nodegroup.UpgradeConfig() + if self.cluster.drbd_usermode_helper is None: + # To decide if we set an helper let's check if at least one instance has + # a DRBD disk. This does not cover all the possible scenarios but it + # gives a good approximation. + if self.HasAnyDiskOfType(constants.LD_DRBD8): + self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER class NIC(ConfigObject): """Config object representing a network card.""" - __slots__ = ["mac", "ip", "bridge", "nicparams"] + __slots__ = ["mac", "ip", "nicparams"] @classmethod def CheckParameterSyntax(cls, nicparams): @@ -388,21 +410,6 @@ class NIC(ConfigObject): err = "Missing bridged nic link" raise errors.ConfigurationError(err) - def UpgradeConfig(self): - """Fill defaults for missing configuration values. - - """ - if self.nicparams is None: - self.nicparams = {} - if self.bridge is not None: - self.nicparams[constants.NIC_MODE] = constants.NIC_MODE_BRIDGED - self.nicparams[constants.NIC_LINK] = self.bridge - # bridge is no longer used it 2.1. The slot is left there to support - # upgrading, but can be removed once upgrades to the current version - # straight from 2.0 are deprecated. - if self.bridge is not None: - self.bridge = None - class Disk(ConfigObject): """Config object representing a block device.""" @@ -434,6 +441,8 @@ class Disk(ConfigObject): """ if self.dev_type == constants.LD_LV: return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) + elif self.dev_type == constants.LD_BLOCKDEV: + return self.logical_id[1] return None def ChildrenNeeded(self): @@ -476,7 +485,8 @@ class Disk(ConfigObject): devices needs to (or can) be assembled. """ - if self.dev_type in [constants.LD_LV, constants.LD_FILE]: + if self.dev_type in [constants.LD_LV, constants.LD_FILE, + constants.LD_BLOCKDEV]: result = [node] elif self.dev_type in constants.LDS_DRBD: result = [self.logical_id[0], self.logical_id[1]] @@ -521,6 +531,28 @@ class Disk(ConfigObject): # be different) return result + def ComputeGrowth(self, amount): + """Compute the per-VG growth requirements. + + This only works for VG-based disks. + + @type amount: integer + @param amount: the desired increase in (user-visible) disk space + @rtype: dict + @return: a dictionary of volume-groups and the required size + + """ + if self.dev_type == constants.LD_LV: + return {self.logical_id[0]: amount} + elif self.dev_type == constants.LD_DRBD8: + if self.children: + return self.children[0].ComputeGrowth(amount) + else: + return {} + else: + # Other disk types do not require VG space + return {} + def RecordGrow(self, amount): """Update the size of this disk after growth. @@ -529,7 +561,7 @@ class Disk(ConfigObject): actual algorithms from bdev. """ - if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE: + if self.dev_type in (constants.LD_LV, constants.LD_FILE): self.size += amount elif self.dev_type == constants.LD_DRBD8: if self.children: @@ -747,8 +779,10 @@ class Instance(TaggableObject): @param lvmap: optional dictionary to receive the 'node' : ['lv', ...] data. - @return: None if lvmap arg is given, otherwise, a dictionary - of the form { 'nodename' : ['volume1', 'volume2', ...], ... } + @return: None if lvmap arg is given, otherwise, a dictionary of + the form { 'nodename' : ['volume1', 'volume2', ...], ... }; + volumeN is of the form "vg_name/lv_name", compatible with + GetVolumeList() """ if node == None: @@ -767,7 +801,7 @@ class Instance(TaggableObject): for dev in devs: if dev.dev_type == constants.LD_LV: - lvmap[node].append(dev.logical_id[1]) + lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1]) elif dev.dev_type in constants.LDS_DRBD: if dev.children: @@ -799,7 +833,7 @@ class Instance(TaggableObject): errors.ECODE_INVAL) except IndexError: raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" - " 0 to %d" % (idx, len(self.disks)), + " 0 to %d" % (idx, len(self.disks) - 1), errors.ECODE_INVAL) def ToDict(self): @@ -855,6 +889,9 @@ class OS(ConfigObject): @ivar supported_parameters: a list of tuples, name and description, containing the supported parameters by this OS + @type VARIANT_DELIM: string + @cvar VARIANT_DELIM: the variant delimiter + """ __slots__ = [ "name", @@ -869,6 +906,41 @@ class OS(ConfigObject): "supported_parameters", ] + VARIANT_DELIM = "+" + + @classmethod + def SplitNameVariant(cls, name): + """Splits the name into the proper name and variant. + + @param name: the OS (unprocessed) name + @rtype: list + @return: a list of two elements; if the original name didn't + contain a variant, it's returned as an empty string + + """ + nv = name.split(cls.VARIANT_DELIM, 1) + if len(nv) == 1: + nv.append("") + return nv + + @classmethod + def GetName(cls, name): + """Returns the proper name of the os (without the variant). + + @param name: the OS (unprocessed) name + + """ + return cls.SplitNameVariant(name)[0] + + @classmethod + def GetVariant(cls, name): + """Returns the variant the os (without the base name). + + @param name: the OS (unprocessed) name + + """ + return cls.SplitNameVariant(name)[1] + class Node(TaggableObject): """Config object representing a node.""" @@ -880,8 +952,104 @@ class Node(TaggableObject): "master_candidate", "offline", "drained", + "group", + "master_capable", + "vm_capable", + "ndparams", + "powered", ] + _TIMESTAMPS + _UUID + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + # pylint: disable-msg=E0203 + # because these are "defined" via slots, not manually + if self.master_capable is None: + self.master_capable = True + + if self.vm_capable is None: + self.vm_capable = True + + if self.ndparams is None: + self.ndparams = {} + + if self.powered is None: + self.powered = True + + +class NodeGroup(ConfigObject): + """Config object representing a node group.""" + __slots__ = [ + "name", + "members", + "ndparams", + "serial_no", + "alloc_policy", + ] + _TIMESTAMPS + _UUID + + def ToDict(self): + """Custom function for nodegroup. + + This discards the members object, which gets recalculated and is only kept + in memory. + + """ + mydict = super(NodeGroup, self).ToDict() + del mydict["members"] + return mydict + + @classmethod + def FromDict(cls, val): + """Custom function for nodegroup. + + The members slot is initialized to an empty list, upon deserialization. + + """ + obj = super(NodeGroup, cls).FromDict(val) + obj.members = [] + return obj + + def UpgradeConfig(self): + """Fill defaults for missing configuration values. + + """ + if self.ndparams is None: + self.ndparams = {} + + if self.serial_no is None: + self.serial_no = 1 + + if self.alloc_policy is None: + self.alloc_policy = constants.ALLOC_POLICY_PREFERRED + + # We only update mtime, and not ctime, since we would not be able to provide + # a correct value for creation time. + if self.mtime is None: + self.mtime = time.time() + + def FillND(self, node): + """Return filled out ndparams for L{object.Node} + + @type node: L{objects.Node} + @param node: A Node object to fill + @return a copy of the node's ndparams with defaults filled + + """ + return self.SimpleFillND(node.ndparams) + + def SimpleFillND(self, ndparams): + """Fill a given ndparams dict with defaults. + + @type ndparams: dict + @param ndparams: the dict to fill + @rtype: dict + @return: a copy of the passed in ndparams with missing keys filled + from the node group defaults + + """ + return FillDict(self.ndparams, ndparams) + class Cluster(TaggableObject): """Config object representing the cluster.""" @@ -892,6 +1060,7 @@ class Cluster(TaggableObject): "tcpudp_port_pool", "mac_prefix", "volume_group_name", + "reserved_lvs", "drbd_usermode_helper", "default_bridge", "default_hypervisor", @@ -900,17 +1069,24 @@ class Cluster(TaggableObject): "master_netdev", "cluster_name", "file_storage_dir", + "shared_file_storage_dir", "enabled_hypervisors", "hvparams", "os_hvp", "beparams", "osparams", "nicparams", + "ndparams", "candidate_pool_size", "modify_etc_hosts", "modify_ssh_setup", "maintain_node_health", "uid_pool", + "default_iallocator", + "hidden_os", + "blacklisted_os", + "primary_ip_family", + "prealloc_wipe_disks", ] + _TIMESTAMPS + _UUID def UpgradeConfig(self): @@ -933,6 +1109,9 @@ class Cluster(TaggableObject): if self.osparams is None: self.osparams = {} + if self.ndparams is None: + self.ndparams = constants.NDC_DEFAULTS + self.beparams = UpgradeGroupedParams(self.beparams, constants.BEC_DEFAULTS) migrate_default_bridge = not self.nicparams @@ -948,7 +1127,7 @@ class Cluster(TaggableObject): if self.modify_ssh_setup is None: self.modify_ssh_setup = True - # default_bridge is no longer used it 2.1. The slot is left there to + # default_bridge is no longer used in 2.1. The slot is left there to # support auto-upgrading. It can be removed once we decide to deprecate # upgrading straight from 2.0. if self.default_bridge is not None: @@ -969,6 +1148,27 @@ class Cluster(TaggableObject): if self.uid_pool is None: self.uid_pool = [] + if self.default_iallocator is None: + self.default_iallocator = "" + + # reserved_lvs added before 2.2 + if self.reserved_lvs is None: + self.reserved_lvs = [] + + # hidden and blacklisted operating systems added before 2.2.1 + if self.hidden_os is None: + self.hidden_os = [] + + if self.blacklisted_os is None: + self.blacklisted_os = [] + + # primary_ip_family added before 2.3 + if self.primary_ip_family is None: + self.primary_ip_family = AF_INET + + if self.prealloc_wipe_disks is None: + self.prealloc_wipe_disks = False + def ToDict(self): """Custom function for cluster. @@ -1105,6 +1305,30 @@ class Cluster(TaggableObject): # specified params return FillDict(result, os_params) + def FillND(self, node, nodegroup): + """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node} + + @type node: L{objects.Node} + @param node: A Node object to fill + @type nodegroup: L{objects.NodeGroup} + @param nodegroup: A Node object to fill + @return a copy of the node's ndparams with defaults filled + + """ + return self.SimpleFillND(nodegroup.FillND(node)) + + def SimpleFillND(self, ndparams): + """Fill a given ndparams dict with defaults. + + @type ndparams: dict + @param ndparams: the dict to fill + @rtype: dict + @return: a copy of the passed in ndparams with missing keys filled + from the cluster defaults + + """ + return FillDict(self.ndparams, ndparams) + class BlockDevStatus(ConfigObject): """Config object representing the status of a block device.""" @@ -1141,6 +1365,8 @@ class ImportExportOptions(ConfigObject): @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) @ivar compress: Compression method (one of L{constants.IEC_ALL}) @ivar magic: Used to ensure the connection goes to the right disk + @ivar ipv6: Whether to use IPv6 + @ivar connect_timeout: Number of seconds for establishing connection """ __slots__ = [ @@ -1148,6 +1374,8 @@ class ImportExportOptions(ConfigObject): "ca_pem", "compress", "magic", + "ipv6", + "connect_timeout", ] @@ -1185,6 +1413,123 @@ class ConfdReply(ConfigObject): ] +class QueryFieldDefinition(ConfigObject): + """Object holding a query field definition. + + @ivar name: Field name + @ivar title: Human-readable title + @ivar kind: Field type + @ivar doc: Human-readable description + + """ + __slots__ = [ + "name", + "title", + "kind", + "doc", + ] + + +class _QueryResponseBase(ConfigObject): + __slots__ = [ + "fields", + ] + + def ToDict(self): + """Custom function for serializing. + + """ + mydict = super(_QueryResponseBase, self).ToDict() + mydict["fields"] = self._ContainerToDicts(mydict["fields"]) + return mydict + + @classmethod + def FromDict(cls, val): + """Custom function for de-serializing. + + """ + obj = super(_QueryResponseBase, cls).FromDict(val) + obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) + return obj + + +class QueryRequest(ConfigObject): + """Object holding a query request. + + """ + __slots__ = [ + "what", + "fields", + "filter", + ] + + +class QueryResponse(_QueryResponseBase): + """Object holding the response to a query. + + @ivar fields: List of L{QueryFieldDefinition} objects + @ivar data: Requested data + + """ + __slots__ = [ + "data", + ] + + +class QueryFieldsRequest(ConfigObject): + """Object holding a request for querying available fields. + + """ + __slots__ = [ + "what", + "fields", + ] + + +class QueryFieldsResponse(_QueryResponseBase): + """Object holding the response to a query for fields. + + @ivar fields: List of L{QueryFieldDefinition} objects + + """ + __slots__ = [ + ] + + +class InstanceConsole(ConfigObject): + """Object describing how to access the console of an instance. + + """ + __slots__ = [ + "instance", + "kind", + "message", + "host", + "port", + "user", + "command", + "display", + ] + + def Validate(self): + """Validates contents of this object. + + """ + assert self.kind in constants.CONS_ALL, "Unknown console type" + assert self.instance, "Missing instance name" + assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC] + assert self.host or self.kind == constants.CONS_MESSAGE + assert self.port or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SSH] + assert self.user or self.kind in [constants.CONS_MESSAGE, + constants.CONS_VNC] + assert self.command or self.kind in [constants.CONS_MESSAGE, + constants.CONS_VNC] + assert self.display or self.kind in [constants.CONS_MESSAGE, + constants.CONS_SSH] + return True + + class SerializableConfigParser(ConfigParser.SafeConfigParser): """Simple wrapper over ConfigParse that allows serialization.