X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/a1f445d388a8cd3a1b08ca0dfc9df55b94e47800..2492231f7381ce33f2164322f799c84b2d7cceef:/lib/config.py?ds=sidebyside diff --git a/lib/config.py b/lib/config.py index 0c769d2..7d81b37 100644 --- a/lib/config.py +++ b/lib/config.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -31,39 +31,140 @@ much memory. """ +# pylint: disable-msg=R0904 +# R0904: Too many public methods + import os -import tempfile import random +import logging +import time from ganeti import errors -from ganeti import logger +from ganeti import locking from ganeti import utils from ganeti import constants from ganeti import rpc from ganeti import objects +from ganeti import serializer +from ganeti import uidpool +from ganeti import netutils +from ganeti import runtime + + +_config_lock = locking.SharedLock("ConfigWriter") + +# job id used for resource management at config upgrade time +_UPGRADE_CONFIG_JID = "jid-cfg-upgrade" + + +def _ValidateConfig(data): + """Verifies that a configuration objects looks valid. + + This only verifies the version of the configuration. + + @raise errors.ConfigurationError: if the version differs from what + we expect + + """ + if data.version != constants.CONFIG_VERSION: + raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version) + + +class TemporaryReservationManager: + """A temporary resource reservation manager. + + This is used to reserve resources in a job, before using them, making sure + other jobs cannot get them in the meantime. + + """ + def __init__(self): + self._ec_reserved = {} + + def Reserved(self, resource): + for holder_reserved in self._ec_reserved.values(): + if resource in holder_reserved: + return True + return False + + def Reserve(self, ec_id, resource): + if self.Reserved(resource): + raise errors.ReservationError("Duplicate reservation for resource '%s'" + % str(resource)) + if ec_id not in self._ec_reserved: + self._ec_reserved[ec_id] = set([resource]) + else: + self._ec_reserved[ec_id].add(resource) + + def DropECReservations(self, ec_id): + if ec_id in self._ec_reserved: + del self._ec_reserved[ec_id] + + def GetReserved(self): + all_reserved = set() + for holder_reserved in self._ec_reserved.values(): + all_reserved.update(holder_reserved) + return all_reserved + + def Generate(self, existing, generate_one_fn, ec_id): + """Generate a new resource of this type + + """ + assert callable(generate_one_fn) + + all_elems = self.GetReserved() + all_elems.update(existing) + retries = 64 + while retries > 0: + new_resource = generate_one_fn() + if new_resource is not None and new_resource not in all_elems: + break + else: + raise errors.ConfigurationError("Not able generate new resource" + " (last tried: %s)" % new_resource) + self.Reserve(ec_id, new_resource) + return new_resource + + +def _MatchNameComponentIgnoreCase(short_name, names): + """Wrapper around L{utils.text.MatchNameComponent}. + + """ + return utils.MatchNameComponent(short_name, names, case_sensitive=False) class ConfigWriter: """The interface to the cluster configuration. + @ivar _temporary_lvs: reservation manager for temporary LVs + @ivar _all_rms: a list of all temporary reservation managers + """ - def __init__(self, cfg_file=None, offline=False): + def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts, + accept_foreign=False): self.write_count = 0 + self._lock = _config_lock self._config_data = None - self._config_time = None - self._config_size = None - self._config_inode = None self._offline = offline if cfg_file is None: self._cfg_file = constants.CLUSTER_CONF_FILE else: self._cfg_file = cfg_file - self._temporary_ids = set() + self._getents = _getents + self._temporary_ids = TemporaryReservationManager() + self._temporary_drbds = {} + self._temporary_macs = TemporaryReservationManager() + self._temporary_secrets = TemporaryReservationManager() + self._temporary_lvs = TemporaryReservationManager() + self._all_rms = [self._temporary_ids, self._temporary_macs, + self._temporary_secrets, self._temporary_lvs] # Note: in order to prevent errors when resolving our name in # _DistributeConfig, we compute it here once and reuse it; it's # better to raise an error before starting to modify the config # file than after it was modified - self._my_hostname = utils.HostInfo().name + self._my_hostname = netutils.Hostname.GetSysName() + self._last_cluster_serial = -1 + self._cfg_id = None + self._OpenConfig(accept_foreign) # this method needs to be static, so that we can call it on the class @staticmethod @@ -73,35 +174,82 @@ class ConfigWriter: """ return os.path.exists(constants.CLUSTER_CONF_FILE) - def GenerateMAC(self): + def _GenerateOneMAC(self): + """Generate one mac address + + """ + prefix = self._config_data.cluster.mac_prefix + byte1 = random.randrange(0, 256) + byte2 = random.randrange(0, 256) + byte3 = random.randrange(0, 256) + mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3) + return mac + + @locking.ssynchronized(_config_lock, shared=1) + def GetNdParams(self, node): + """Get the node params populated with cluster defaults. + + @type node: L{object.Node} + @param node: The node we want to know the params for + @return: A dict with the filled in node params + + """ + nodegroup = self._UnlockedGetNodeGroup(node.group) + return self._config_data.cluster.FillND(node, nodegroup) + + @locking.ssynchronized(_config_lock, shared=1) + def GenerateMAC(self, ec_id): """Generate a MAC for an instance. This should check the current instances for duplicates. """ - self._OpenConfig() - self._ReleaseLock() - prefix = self._config_data.cluster.mac_prefix + existing = self._AllMACs() + return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id) + + @locking.ssynchronized(_config_lock, shared=1) + def ReserveMAC(self, mac, ec_id): + """Reserve a MAC for an instance. + + This only checks instances managed by this cluster, it does not + check for potential collisions elsewhere. + + """ all_macs = self._AllMACs() - retries = 64 - while retries > 0: - byte1 = random.randrange(0, 256) - byte2 = random.randrange(0, 256) - byte3 = random.randrange(0, 256) - mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3) - if mac not in all_macs: - break - retries -= 1 + if mac in all_macs: + raise errors.ReservationError("mac already in use") else: - raise errors.ConfigurationError("Can't generate unique MAC") - return mac + self._temporary_macs.Reserve(ec_id, mac) + + @locking.ssynchronized(_config_lock, shared=1) + def ReserveLV(self, lv_name, ec_id): + """Reserve an VG/LV pair for an instance. + + @type lv_name: string + @param lv_name: the logical volume name to reserve + + """ + all_lvs = self._AllLVs() + if lv_name in all_lvs: + raise errors.ReservationError("LV already in use") + else: + self._temporary_lvs.Reserve(ec_id, lv_name) - def _ComputeAllLVs(self): + @locking.ssynchronized(_config_lock, shared=1) + def GenerateDRBDSecret(self, ec_id): + """Generate a DRBD secret. + + This checks the current disks for duplicates. + + """ + return self._temporary_secrets.Generate(self._AllDRBDSecrets(), + utils.GenerateSecret, + ec_id) + + def _AllLVs(self): """Compute the list of all LVs. """ - self._OpenConfig() - self._ReleaseLock() lvnames = set() for instance in self._config_data.instances.values(): node_data = instance.MapLVsByNode() @@ -109,46 +257,56 @@ class ConfigWriter: lvnames.update(lv_list) return lvnames - def GenerateUniqueID(self, exceptions=None): - """Generate an unique disk name. + def _AllIDs(self, include_temporary): + """Compute the list of all UUIDs and names we have. + + @type include_temporary: boolean + @param include_temporary: whether to include the _temporary_ids set + @rtype: set + @return: a set of IDs + + """ + existing = set() + if include_temporary: + existing.update(self._temporary_ids.GetReserved()) + existing.update(self._AllLVs()) + existing.update(self._config_data.instances.keys()) + existing.update(self._config_data.nodes.keys()) + existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid]) + return existing + + def _GenerateUniqueID(self, ec_id): + """Generate an unique UUID. This checks the current node, instances and disk names for duplicates. - Args: - - exceptions: a list with some other names which should be checked - for uniqueness (used for example when you want to get - more than one id at one time without adding each one in - turn to the config file + @rtype: string + @return: the unique id + + """ + existing = self._AllIDs(include_temporary=False) + return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id) + + @locking.ssynchronized(_config_lock, shared=1) + def GenerateUniqueID(self, ec_id): + """Generate an unique ID. - Returns: the unique id as a string + This is just a wrapper over the unlocked version. + + @type ec_id: string + @param ec_id: unique id for the job to reserve the id to """ - existing = set() - existing.update(self._temporary_ids) - existing.update(self._ComputeAllLVs()) - existing.update(self._config_data.instances.keys()) - existing.update(self._config_data.nodes.keys()) - if exceptions is not None: - existing.update(exceptions) - retries = 64 - while retries > 0: - unique_id = utils.GetUUID() - if unique_id not in existing and unique_id is not None: - break - else: - raise errors.ConfigurationError("Not able generate an unique ID" - " (last tried ID: %s" % unique_id) - self._temporary_ids.add(unique_id) - return unique_id + return self._GenerateUniqueID(ec_id) def _AllMACs(self): """Return all MACs present in the config. - """ - self._OpenConfig() - self._ReleaseLock() + @rtype: list + @return: the list of all MACs + """ result = [] for instance in self._config_data.instances.values(): for nic in instance.nics: @@ -156,33 +314,292 @@ class ConfigWriter: return result - def VerifyConfig(self): - """Stub verify function. + def _AllDRBDSecrets(self): + """Return all DRBD secrets present in the config. + + @rtype: list + @return: the list of all DRBD secrets + """ - self._OpenConfig() - self._ReleaseLock() + def helper(disk, result): + """Recursively gather secrets from this disk.""" + if disk.dev_type == constants.DT_DRBD8: + result.append(disk.logical_id[5]) + if disk.children: + for child in disk.children: + helper(child, result) result = [] + for instance in self._config_data.instances.values(): + for disk in instance.disks: + helper(disk, result) + + return result + + def _CheckDiskIDs(self, disk, l_ids, p_ids): + """Compute duplicate disk IDs + + @type disk: L{objects.Disk} + @param disk: the disk at which to start searching + @type l_ids: list + @param l_ids: list of current logical ids + @type p_ids: list + @param p_ids: list of current physical ids + @rtype: list + @return: a list of error messages + + """ + result = [] + if disk.logical_id is not None: + if disk.logical_id in l_ids: + result.append("duplicate logical id %s" % str(disk.logical_id)) + else: + l_ids.append(disk.logical_id) + if disk.physical_id is not None: + if disk.physical_id in p_ids: + result.append("duplicate physical id %s" % str(disk.physical_id)) + else: + p_ids.append(disk.physical_id) + + if disk.children: + for child in disk.children: + result.extend(self._CheckDiskIDs(child, l_ids, p_ids)) + return result + + def _UnlockedVerifyConfig(self): + """Verify function. + + @rtype: list + @return: a list of error messages; a non-empty list signifies + configuration errors + + """ + # pylint: disable-msg=R0914 + result = [] seen_macs = [] + ports = {} data = self._config_data + cluster = data.cluster + seen_lids = [] + seen_pids = [] + + # global cluster checks + if not cluster.enabled_hypervisors: + result.append("enabled hypervisors list doesn't have any entries") + invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES + if invalid_hvs: + result.append("enabled hypervisors contains invalid entries: %s" % + invalid_hvs) + missing_hvp = (set(cluster.enabled_hypervisors) - + set(cluster.hvparams.keys())) + if missing_hvp: + result.append("hypervisor parameters missing for the enabled" + " hypervisor(s) %s" % utils.CommaJoin(missing_hvp)) + + if cluster.master_node not in data.nodes: + result.append("cluster has invalid primary node '%s'" % + cluster.master_node) + + def _helper(owner, attr, value, template): + try: + utils.ForceDictType(value, template) + except errors.GenericError, err: + result.append("%s has invalid %s: %s" % (owner, attr, err)) + + def _helper_nic(owner, params): + try: + objects.NIC.CheckParameterSyntax(params) + except errors.ConfigurationError, err: + result.append("%s has invalid nicparams: %s" % (owner, err)) + + # check cluster parameters + _helper("cluster", "beparams", cluster.SimpleFillBE({}), + constants.BES_PARAMETER_TYPES) + _helper("cluster", "nicparams", cluster.SimpleFillNIC({}), + constants.NICS_PARAMETER_TYPES) + _helper_nic("cluster", cluster.SimpleFillNIC({})) + _helper("cluster", "ndparams", cluster.SimpleFillND({}), + constants.NDS_PARAMETER_TYPES) + + # per-instance checks for instance_name in data.instances: instance = data.instances[instance_name] + if instance.name != instance_name: + result.append("instance '%s' is indexed by wrong name '%s'" % + (instance.name, instance_name)) if instance.primary_node not in data.nodes: - result.append("Instance '%s' has invalid primary node '%s'" % + result.append("instance '%s' has invalid primary node '%s'" % (instance_name, instance.primary_node)) for snode in instance.secondary_nodes: if snode not in data.nodes: - result.append("Instance '%s' has invalid secondary node '%s'" % + result.append("instance '%s' has invalid secondary node '%s'" % (instance_name, snode)) for idx, nic in enumerate(instance.nics): if nic.mac in seen_macs: - result.append("Instance '%s' has NIC %d mac %s duplicate" % + result.append("instance '%s' has NIC %d mac %s duplicate" % (instance_name, idx, nic.mac)) else: seen_macs.append(nic.mac) + if nic.nicparams: + filled = cluster.SimpleFillNIC(nic.nicparams) + owner = "instance %s nic %d" % (instance.name, idx) + _helper(owner, "nicparams", + filled, constants.NICS_PARAMETER_TYPES) + _helper_nic(owner, filled) + + # parameter checks + if instance.beparams: + _helper("instance %s" % instance.name, "beparams", + cluster.FillBE(instance), constants.BES_PARAMETER_TYPES) + + # gather the drbd ports for duplicate checks + for dsk in instance.disks: + if dsk.dev_type in constants.LDS_DRBD: + tcp_port = dsk.logical_id[2] + if tcp_port not in ports: + ports[tcp_port] = [] + ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name)) + # gather network port reservation + net_port = getattr(instance, "network_port", None) + if net_port is not None: + if net_port not in ports: + ports[net_port] = [] + ports[net_port].append((instance.name, "network port")) + + # instance disk verify + for idx, disk in enumerate(instance.disks): + result.extend(["instance '%s' disk %d error: %s" % + (instance.name, idx, msg) for msg in disk.Verify()]) + result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids)) + + # cluster-wide pool of free ports + for free_port in cluster.tcpudp_port_pool: + if free_port not in ports: + ports[free_port] = [] + ports[free_port].append(("cluster", "port marked as free")) + + # compute tcp/udp duplicate ports + keys = ports.keys() + keys.sort() + for pnum in keys: + pdata = ports[pnum] + if len(pdata) > 1: + txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) + result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) + + # highest used tcp port check + if keys: + if keys[-1] > cluster.highest_used_port: + result.append("Highest used port mismatch, saved %s, computed %s" % + (cluster.highest_used_port, keys[-1])) + + if not data.nodes[cluster.master_node].master_candidate: + result.append("Master node is not a master candidate") + + # master candidate checks + mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats() + if mc_now < mc_max: + result.append("Not enough master candidates: actual %d, target %d" % + (mc_now, mc_max)) + + # node checks + for node_name, node in data.nodes.items(): + if node.name != node_name: + result.append("Node '%s' is indexed by wrong name '%s'" % + (node.name, node_name)) + if [node.master_candidate, node.drained, node.offline].count(True) > 1: + result.append("Node %s state is invalid: master_candidate=%s," + " drain=%s, offline=%s" % + (node.name, node.master_candidate, node.drained, + node.offline)) + if node.group not in data.nodegroups: + result.append("Node '%s' has invalid group '%s'" % + (node.name, node.group)) + else: + _helper("node %s" % node.name, "ndparams", + cluster.FillND(node, data.nodegroups[node.group]), + constants.NDS_PARAMETER_TYPES) + + # nodegroups checks + nodegroups_names = set() + for nodegroup_uuid in data.nodegroups: + nodegroup = data.nodegroups[nodegroup_uuid] + if nodegroup.uuid != nodegroup_uuid: + result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'" + % (nodegroup.name, nodegroup.uuid, nodegroup_uuid)) + if utils.UUID_RE.match(nodegroup.name.lower()): + result.append("node group '%s' (uuid: '%s') has uuid-like name" % + (nodegroup.name, nodegroup.uuid)) + if nodegroup.name in nodegroups_names: + result.append("duplicate node group name '%s'" % nodegroup.name) + else: + nodegroups_names.add(nodegroup.name) + if nodegroup.ndparams: + _helper("group %s" % nodegroup.name, "ndparams", + cluster.SimpleFillND(nodegroup.ndparams), + constants.NDS_PARAMETER_TYPES) + + + # drbd minors check + _, duplicates = self._UnlockedComputeDRBDMap() + for node, minor, instance_a, instance_b in duplicates: + result.append("DRBD minor %d on node %s is assigned twice to instances" + " %s and %s" % (minor, node, instance_a, instance_b)) + + # IP checks + default_nicparams = cluster.nicparams[constants.PP_DEFAULT] + ips = {} + + def _AddIpAddress(ip, name): + ips.setdefault(ip, []).append(name) + + _AddIpAddress(cluster.master_ip, "cluster_ip") + + for node in data.nodes.values(): + _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) + if node.secondary_ip != node.primary_ip: + _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name) + + for instance in data.instances.values(): + for idx, nic in enumerate(instance.nics): + if nic.ip is None: + continue + + nicparams = objects.FillDict(default_nicparams, nic.nicparams) + nic_mode = nicparams[constants.NIC_MODE] + nic_link = nicparams[constants.NIC_LINK] + + if nic_mode == constants.NIC_MODE_BRIDGED: + link = "bridge:%s" % nic_link + elif nic_mode == constants.NIC_MODE_ROUTED: + link = "route:%s" % nic_link + else: + raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) + + _AddIpAddress("%s/%s" % (link, nic.ip), + "instance:%s/nic:%d" % (instance.name, idx)) + + for ip, owners in ips.items(): + if len(owners) > 1: + result.append("IP address %s is used by multiple owners: %s" % + (ip, utils.CommaJoin(owners))) + return result - def SetDiskID(self, disk, node_name): + @locking.ssynchronized(_config_lock, shared=1) + def VerifyConfig(self): + """Verify function. + + This is just a wrapper over L{_UnlockedVerifyConfig}. + + @rtype: list + @return: a list of error messages; a non-empty list signifies + configuration errors + + """ + return self._UnlockedVerifyConfig() + + def _UnlockedSetDiskID(self, disk, node_name): """Convert the unique ID to the ID needed on the target nodes. This is used only for drbd, which needs ip/port configuration. @@ -191,33 +608,49 @@ class ConfigWriter: this helps when the only the top device is passed to the remote node. + This function is for internal use, when the config lock is already held. + """ if disk.children: for child in disk.children: - self.SetDiskID(child, node_name) + self._UnlockedSetDiskID(child, node_name) if disk.logical_id is None and disk.physical_id is not None: return - if disk.dev_type in constants.LDS_DRBD: - pnode, snode, port = disk.logical_id + if disk.dev_type == constants.LD_DRBD8: + pnode, snode, port, pminor, sminor, secret = disk.logical_id if node_name not in (pnode, snode): raise errors.ConfigurationError("DRBD device not knowing node %s" % node_name) - pnode_info = self.GetNodeInfo(pnode) - snode_info = self.GetNodeInfo(snode) + pnode_info = self._UnlockedGetNodeInfo(pnode) + snode_info = self._UnlockedGetNodeInfo(snode) if pnode_info is None or snode_info is None: raise errors.ConfigurationError("Can't find primary or secondary node" " for %s" % str(disk)) + p_data = (pnode_info.secondary_ip, port) + s_data = (snode_info.secondary_ip, port) if pnode == node_name: - disk.physical_id = (pnode_info.secondary_ip, port, - snode_info.secondary_ip, port) + disk.physical_id = p_data + s_data + (pminor, secret) else: # it must be secondary, we tested above - disk.physical_id = (snode_info.secondary_ip, port, - pnode_info.secondary_ip, port) + disk.physical_id = s_data + p_data + (sminor, secret) else: disk.physical_id = disk.logical_id return + @locking.ssynchronized(_config_lock) + def SetDiskID(self, disk, node_name): + """Convert the unique ID to the ID needed on the target nodes. + + This is used only for drbd, which needs ip/port configuration. + + The routine descends down and updates its children also, because + this helps when the only the top device is passed to the remote + node. + + """ + return self._UnlockedSetDiskID(disk, node_name) + + @locking.ssynchronized(_config_lock) def AddTcpUdpPort(self, port): """Adds a new port to the available port pool. @@ -225,18 +658,17 @@ class ConfigWriter: if not isinstance(port, int): raise errors.ProgrammerError("Invalid type passed for port") - self._OpenConfig() self._config_data.cluster.tcpudp_port_pool.add(port) self._WriteConfig() + @locking.ssynchronized(_config_lock, shared=1) def GetPortList(self): """Returns a copy of the current port list. """ - self._OpenConfig() - self._ReleaseLock() return self._config_data.cluster.tcpudp_port_pool.copy() + @locking.ssynchronized(_config_lock) def AllocatePort(self): """Allocate a port. @@ -245,8 +677,6 @@ class ConfigWriter: highest_used_port). """ - self._OpenConfig() - # If there are TCP/IP ports configured, we use them first. if self._config_data.cluster.tcpudp_port_pool: port = self._config_data.cluster.tcpudp_port_pool.pop() @@ -261,60 +691,479 @@ class ConfigWriter: self._WriteConfig() return port + def _UnlockedComputeDRBDMap(self): + """Compute the used DRBD minor/nodes. + + @rtype: (dict, list) + @return: dictionary of node_name: dict of minor: instance_name; + the returned dict will have all the nodes in it (even if with + an empty list), and a list of duplicates; if the duplicates + list is not empty, the configuration is corrupted and its caller + should raise an exception + + """ + def _AppendUsedPorts(instance_name, disk, used): + duplicates = [] + if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5: + node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5] + for node, port in ((node_a, minor_a), (node_b, minor_b)): + assert node in used, ("Node '%s' of instance '%s' not found" + " in node list" % (node, instance_name)) + if port in used[node]: + duplicates.append((node, port, instance_name, used[node][port])) + else: + used[node][port] = instance_name + if disk.children: + for child in disk.children: + duplicates.extend(_AppendUsedPorts(instance_name, child, used)) + return duplicates + + duplicates = [] + my_dict = dict((node, {}) for node in self._config_data.nodes) + for instance in self._config_data.instances.itervalues(): + for disk in instance.disks: + duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict)) + for (node, minor), instance in self._temporary_drbds.iteritems(): + if minor in my_dict[node] and my_dict[node][minor] != instance: + duplicates.append((node, minor, instance, my_dict[node][minor])) + else: + my_dict[node][minor] = instance + return my_dict, duplicates + + @locking.ssynchronized(_config_lock) + def ComputeDRBDMap(self): + """Compute the used DRBD minor/nodes. + + This is just a wrapper over L{_UnlockedComputeDRBDMap}. + + @return: dictionary of node_name: dict of minor: instance_name; + the returned dict will have all the nodes in it (even if with + an empty list). + + """ + d_map, duplicates = self._UnlockedComputeDRBDMap() + if duplicates: + raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" % + str(duplicates)) + return d_map + + @locking.ssynchronized(_config_lock) + def AllocateDRBDMinor(self, nodes, instance): + """Allocate a drbd minor. + + The free minor will be automatically computed from the existing + devices. A node can be given multiple times in order to allocate + multiple minors. The result is the list of minors, in the same + order as the passed nodes. + + @type instance: string + @param instance: the instance for which we allocate minors + + """ + assert isinstance(instance, basestring), \ + "Invalid argument '%s' passed to AllocateDRBDMinor" % instance + + d_map, duplicates = self._UnlockedComputeDRBDMap() + if duplicates: + raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" % + str(duplicates)) + result = [] + for nname in nodes: + ndata = d_map[nname] + if not ndata: + # no minors used, we can start at 0 + result.append(0) + ndata[0] = instance + self._temporary_drbds[(nname, 0)] = instance + continue + keys = ndata.keys() + keys.sort() + ffree = utils.FirstFree(keys) + if ffree is None: + # return the next minor + # TODO: implement high-limit check + minor = keys[-1] + 1 + else: + minor = ffree + # double-check minor against current instances + assert minor not in d_map[nname], \ + ("Attempt to reuse allocated DRBD minor %d on node %s," + " already allocated to instance %s" % + (minor, nname, d_map[nname][minor])) + ndata[minor] = instance + # double-check minor against reservation + r_key = (nname, minor) + assert r_key not in self._temporary_drbds, \ + ("Attempt to reuse reserved DRBD minor %d on node %s," + " reserved for instance %s" % + (minor, nname, self._temporary_drbds[r_key])) + self._temporary_drbds[r_key] = instance + result.append(minor) + logging.debug("Request to allocate drbd minors, input: %s, returning %s", + nodes, result) + return result + + def _UnlockedReleaseDRBDMinors(self, instance): + """Release temporary drbd minors allocated for a given instance. + + @type instance: string + @param instance: the instance for which temporary minors should be + released + + """ + assert isinstance(instance, basestring), \ + "Invalid argument passed to ReleaseDRBDMinors" + for key, name in self._temporary_drbds.items(): + if name == instance: + del self._temporary_drbds[key] + + @locking.ssynchronized(_config_lock) + def ReleaseDRBDMinors(self, instance): + """Release temporary drbd minors allocated for a given instance. + + This should be called on the error paths, on the success paths + it's automatically called by the ConfigWriter add and update + functions. + + This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}. + + @type instance: string + @param instance: the instance for which temporary minors should be + released + + """ + self._UnlockedReleaseDRBDMinors(instance) + + @locking.ssynchronized(_config_lock, shared=1) + def GetConfigVersion(self): + """Get the configuration version. + + @return: Config version + + """ + return self._config_data.version + + @locking.ssynchronized(_config_lock, shared=1) + def GetClusterName(self): + """Get cluster name. + + @return: Cluster name + + """ + return self._config_data.cluster.cluster_name + + @locking.ssynchronized(_config_lock, shared=1) + def GetMasterNode(self): + """Get the hostname of the master node for this cluster. + + @return: Master hostname + + """ + return self._config_data.cluster.master_node + + @locking.ssynchronized(_config_lock, shared=1) + def GetMasterIP(self): + """Get the IP of the master node for this cluster. + + @return: Master IP + + """ + return self._config_data.cluster.master_ip + + @locking.ssynchronized(_config_lock, shared=1) + def GetMasterNetdev(self): + """Get the master network device for this cluster. + + """ + return self._config_data.cluster.master_netdev + + @locking.ssynchronized(_config_lock, shared=1) + def GetFileStorageDir(self): + """Get the file storage dir for this cluster. + + """ + return self._config_data.cluster.file_storage_dir + + @locking.ssynchronized(_config_lock, shared=1) + def GetSharedFileStorageDir(self): + """Get the shared file storage dir for this cluster. + + """ + return self._config_data.cluster.shared_file_storage_dir + + @locking.ssynchronized(_config_lock, shared=1) + def GetHypervisorType(self): + """Get the hypervisor type for this cluster. + + """ + return self._config_data.cluster.enabled_hypervisors[0] + + @locking.ssynchronized(_config_lock, shared=1) def GetHostKey(self): """Return the rsa hostkey from the config. - Args: None + @rtype: string + @return: the rsa hostkey - Returns: rsa hostkey """ - self._OpenConfig() - self._ReleaseLock() return self._config_data.cluster.rsahostkeypub - def AddInstance(self, instance): + @locking.ssynchronized(_config_lock, shared=1) + def GetDefaultIAllocator(self): + """Get the default instance allocator for this cluster. + + """ + return self._config_data.cluster.default_iallocator + + @locking.ssynchronized(_config_lock, shared=1) + def GetPrimaryIPFamily(self): + """Get cluster primary ip family. + + @return: primary ip family + + """ + return self._config_data.cluster.primary_ip_family + + @locking.ssynchronized(_config_lock) + def AddNodeGroup(self, group, ec_id, check_uuid=True): + """Add a node group to the configuration. + + This method calls group.UpgradeConfig() to fill any missing attributes + according to their default values. + + @type group: L{objects.NodeGroup} + @param group: the NodeGroup object to add + @type ec_id: string + @param ec_id: unique id for the job to use when creating a missing UUID + @type check_uuid: bool + @param check_uuid: add an UUID to the group if it doesn't have one or, if + it does, ensure that it does not exist in the + configuration already + + """ + self._UnlockedAddNodeGroup(group, ec_id, check_uuid) + self._WriteConfig() + + def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid): + """Add a node group to the configuration. + + """ + logging.info("Adding node group %s to configuration", group.name) + + # Some code might need to add a node group with a pre-populated UUID + # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass + # the "does this UUID" exist already check. + if check_uuid: + self._EnsureUUID(group, ec_id) + + try: + existing_uuid = self._UnlockedLookupNodeGroup(group.name) + except errors.OpPrereqError: + pass + else: + raise errors.OpPrereqError("Desired group name '%s' already exists as a" + " node group (UUID: %s)" % + (group.name, existing_uuid), + errors.ECODE_EXISTS) + + group.serial_no = 1 + group.ctime = group.mtime = time.time() + group.UpgradeConfig() + + self._config_data.nodegroups[group.uuid] = group + self._config_data.cluster.serial_no += 1 + + @locking.ssynchronized(_config_lock) + def RemoveNodeGroup(self, group_uuid): + """Remove a node group from the configuration. + + @type group_uuid: string + @param group_uuid: the UUID of the node group to remove + + """ + logging.info("Removing node group %s from configuration", group_uuid) + + if group_uuid not in self._config_data.nodegroups: + raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid) + + assert len(self._config_data.nodegroups) != 1, \ + "Group '%s' is the only group, cannot be removed" % group_uuid + + del self._config_data.nodegroups[group_uuid] + self._config_data.cluster.serial_no += 1 + self._WriteConfig() + + def _UnlockedLookupNodeGroup(self, target): + """Lookup a node group's UUID. + + @type target: string or None + @param target: group name or UUID or None to look for the default + @rtype: string + @return: nodegroup UUID + @raises errors.OpPrereqError: when the target group cannot be found + + """ + if target is None: + if len(self._config_data.nodegroups) != 1: + raise errors.OpPrereqError("More than one node group exists. Target" + " group must be specified explicitely.") + else: + return self._config_data.nodegroups.keys()[0] + if target in self._config_data.nodegroups: + return target + for nodegroup in self._config_data.nodegroups.values(): + if nodegroup.name == target: + return nodegroup.uuid + raise errors.OpPrereqError("Node group '%s' not found" % target, + errors.ECODE_NOENT) + + @locking.ssynchronized(_config_lock, shared=1) + def LookupNodeGroup(self, target): + """Lookup a node group's UUID. + + This function is just a wrapper over L{_UnlockedLookupNodeGroup}. + + @type target: string or None + @param target: group name or UUID or None to look for the default + @rtype: string + @return: nodegroup UUID + + """ + return self._UnlockedLookupNodeGroup(target) + + def _UnlockedGetNodeGroup(self, uuid): + """Lookup a node group. + + @type uuid: string + @param uuid: group UUID + @rtype: L{objects.NodeGroup} or None + @return: nodegroup object, or None if not found + + """ + if uuid not in self._config_data.nodegroups: + return None + + return self._config_data.nodegroups[uuid] + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroup(self, uuid): + """Lookup a node group. + + @type uuid: string + @param uuid: group UUID + @rtype: L{objects.NodeGroup} or None + @return: nodegroup object, or None if not found + + """ + return self._UnlockedGetNodeGroup(uuid) + + @locking.ssynchronized(_config_lock, shared=1) + def GetAllNodeGroupsInfo(self): + """Get the configuration of all node groups. + + """ + return dict(self._config_data.nodegroups) + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupList(self): + """Get a list of node groups. + + """ + return self._config_data.nodegroups.keys() + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupMembersByNodes(self, nodes): + """Get nodes which are member in the same nodegroups as the given nodes. + + """ + ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group + return frozenset(member_name + for node_name in nodes + for member_name in + self._UnlockedGetNodeGroup(ngfn(node_name)).members) + + @locking.ssynchronized(_config_lock) + def AddInstance(self, instance, ec_id): """Add an instance to the config. This should be used after creating a new instance. - Args: - instance: the instance object + @type instance: L{objects.Instance} + @param instance: the instance object + """ if not isinstance(instance, objects.Instance): raise errors.ProgrammerError("Invalid type passed to AddInstance") if instance.disk_template != constants.DT_DISKLESS: all_lvs = instance.MapLVsByNode() - logger.Info("Instance '%s' DISK_LAYOUT: %s" % (instance.name, all_lvs)) + logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs) + + all_macs = self._AllMACs() + for nic in instance.nics: + if nic.mac in all_macs: + raise errors.ConfigurationError("Cannot add instance %s:" + " MAC address '%s' already in use." % + (instance.name, nic.mac)) + + self._EnsureUUID(instance, ec_id) - self._OpenConfig() + instance.serial_no = 1 + instance.ctime = instance.mtime = time.time() self._config_data.instances[instance.name] = instance + self._config_data.cluster.serial_no += 1 + self._UnlockedReleaseDRBDMinors(instance.name) self._WriteConfig() - def MarkInstanceUp(self, instance_name): - """Mark the instance status to up in the config. + def _EnsureUUID(self, item, ec_id): + """Ensures a given object has a valid UUID. + + @param item: the instance or node to be checked + @param ec_id: the execution context id for the uuid reservation """ - self._OpenConfig() + if not item.uuid: + item.uuid = self._GenerateUniqueID(ec_id) + elif item.uuid in self._AllIDs(include_temporary=True): + raise errors.ConfigurationError("Cannot add '%s': UUID %s already" + " in use" % (item.name, item.uuid)) + + def _SetInstanceStatus(self, instance_name, status): + """Set the instance's status to a given value. + + """ + assert isinstance(status, bool), \ + "Invalid status '%s' passed to SetInstanceStatus" % (status,) if instance_name not in self._config_data.instances: raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) instance = self._config_data.instances[instance_name] - instance.status = "up" - self._WriteConfig() + if instance.admin_up != status: + instance.admin_up = status + instance.serial_no += 1 + instance.mtime = time.time() + self._WriteConfig() + @locking.ssynchronized(_config_lock) + def MarkInstanceUp(self, instance_name): + """Mark the instance status to up in the config. + + """ + self._SetInstanceStatus(instance_name, True) + + @locking.ssynchronized(_config_lock) def RemoveInstance(self, instance_name): """Remove the instance from the configuration. """ - self._OpenConfig() - if instance_name not in self._config_data.instances: raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) del self._config_data.instances[instance_name] + self._config_data.cluster.serial_no += 1 self._WriteConfig() + @locking.ssynchronized(_config_lock) def RenameInstance(self, old_name, new_name): """Rename an instance. @@ -323,186 +1172,498 @@ class ConfigWriter: rename. """ - self._OpenConfig() if old_name not in self._config_data.instances: raise errors.ConfigurationError("Unknown instance '%s'" % old_name) inst = self._config_data.instances[old_name] del self._config_data.instances[old_name] inst.name = new_name + + for disk in inst.disks: + if disk.dev_type == constants.LD_FILE: + # rename the file paths in logical and physical id + file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) + disk_fname = "disk%s" % disk.iv_name.split("/")[1] + disk.physical_id = disk.logical_id = (disk.logical_id[0], + utils.PathJoin(file_storage_dir, + inst.name, + disk_fname)) + + # Force update of ssconf files + self._config_data.cluster.serial_no += 1 + self._config_data.instances[inst.name] = inst self._WriteConfig() + @locking.ssynchronized(_config_lock) def MarkInstanceDown(self, instance_name): """Mark the status of an instance to down in the configuration. """ - self._OpenConfig() + self._SetInstanceStatus(instance_name, False) - if instance_name not in self._config_data.instances: - raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) - instance = self._config_data.instances[instance_name] - instance.status = "down" - self._WriteConfig() + def _UnlockedGetInstanceList(self): + """Get the list of instances. + + This function is for internal use, when the config lock is already held. + + """ + return self._config_data.instances.keys() + @locking.ssynchronized(_config_lock, shared=1) def GetInstanceList(self): """Get the list of instances. - Returns: - array of instances, ex. ['instance2.example.com','instance1.example.com'] - these contains all the instances, also the ones in Admin_down state + @return: array of instances, ex. ['instance2.example.com', + 'instance1.example.com'] """ - self._OpenConfig() - self._ReleaseLock() - - return self._config_data.instances.keys() + return self._UnlockedGetInstanceList() def ExpandInstanceName(self, short_name): """Attempt to expand an incomplete instance name. """ - self._OpenConfig() - self._ReleaseLock() + # Locking is done in L{ConfigWriter.GetInstanceList} + return _MatchNameComponentIgnoreCase(short_name, self.GetInstanceList()) + + def _UnlockedGetInstanceInfo(self, instance_name): + """Returns information about an instance. + + This function is for internal use, when the config lock is already held. + + """ + if instance_name not in self._config_data.instances: + return None - return utils.MatchNameComponent(short_name, - self._config_data.instances.keys()) + return self._config_data.instances[instance_name] + @locking.ssynchronized(_config_lock, shared=1) def GetInstanceInfo(self, instance_name): - """Returns informations about an instance. + """Returns information about an instance. - It takes the information from the configuration file. Other informations of + It takes the information from the configuration file. Other information of an instance are taken from the live systems. - Args: - instance: name of the instance, ex instance1.example.com + @param instance_name: name of the instance, e.g. + I{instance1.example.com} - Returns: - the instance object + @rtype: L{objects.Instance} + @return: the instance object """ - self._OpenConfig() - self._ReleaseLock() + return self._UnlockedGetInstanceInfo(instance_name) - if instance_name not in self._config_data.instances: - return None + @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceNodeGroups(self, instance_name, primary_only=False): + """Returns set of node group UUIDs for instance's nodes. - return self._config_data.instances[instance_name] + @rtype: frozenset + + """ + instance = self._UnlockedGetInstanceInfo(instance_name) + if not instance: + raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) - def AddNode(self, node): + if primary_only: + nodes = [instance.primary_node] + else: + nodes = instance.all_nodes + + return frozenset(self._UnlockedGetNodeInfo(node_name).group + for node_name in nodes) + + @locking.ssynchronized(_config_lock, shared=1) + def GetAllInstancesInfo(self): + """Get the configuration of all instances. + + @rtype: dict + @return: dict of (instance, instance_info), where instance_info is what + would GetInstanceInfo return for the node + + """ + my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance)) + for instance in self._UnlockedGetInstanceList()]) + return my_dict + + @locking.ssynchronized(_config_lock) + def AddNode(self, node, ec_id): """Add a node to the configuration. - Args: - node: an object.Node instance + @type node: L{objects.Node} + @param node: a Node instance """ - self._OpenConfig() + logging.info("Adding node %s to configuration", node.name) + + self._EnsureUUID(node, ec_id) + + node.serial_no = 1 + node.ctime = node.mtime = time.time() + self._UnlockedAddNodeToGroup(node.name, node.group) self._config_data.nodes[node.name] = node + self._config_data.cluster.serial_no += 1 self._WriteConfig() + @locking.ssynchronized(_config_lock) def RemoveNode(self, node_name): """Remove a node from the configuration. """ - self._OpenConfig() + logging.info("Removing node %s from configuration", node_name) + if node_name not in self._config_data.nodes: raise errors.ConfigurationError("Unknown node '%s'" % node_name) + self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name]) del self._config_data.nodes[node_name] + self._config_data.cluster.serial_no += 1 self._WriteConfig() def ExpandNodeName(self, short_name): - """Attempt to expand an incomplete instance name. + """Attempt to expand an incomplete node name. """ - self._OpenConfig() - self._ReleaseLock() + # Locking is done in L{ConfigWriter.GetNodeList} + return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList()) - return utils.MatchNameComponent(short_name, - self._config_data.nodes.keys()) - - def GetNodeInfo(self, node_name): + def _UnlockedGetNodeInfo(self, node_name): """Get the configuration of a node, as stored in the config. - Args: node: nodename (tuple) of the node + This function is for internal use, when the config lock is already + held. - Returns: the node object + @param node_name: the node name, e.g. I{node1.example.com} - """ - self._OpenConfig() - self._ReleaseLock() + @rtype: L{objects.Node} + @return: the node object + """ if node_name not in self._config_data.nodes: return None return self._config_data.nodes[node_name] - def GetNodeList(self): + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeInfo(self, node_name): + """Get the configuration of a node, as stored in the config. + + This is just a locked wrapper over L{_UnlockedGetNodeInfo}. + + @param node_name: the node name, e.g. I{node1.example.com} + + @rtype: L{objects.Node} + @return: the node object + + """ + return self._UnlockedGetNodeInfo(node_name) + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeInstances(self, node_name): + """Get the instances of a node, as stored in the config. + + @param node_name: the node name, e.g. I{node1.example.com} + + @rtype: (list, list) + @return: a tuple with two lists: the primary and the secondary instances + + """ + pri = [] + sec = [] + for inst in self._config_data.instances.values(): + if inst.primary_node == node_name: + pri.append(inst.name) + if node_name in inst.secondary_nodes: + sec.append(inst.name) + return (pri, sec) + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupInstances(self, uuid, primary_only=False): + """Get the instances of a node group. + + @param uuid: Node group UUID + @param primary_only: Whether to only consider primary nodes + @rtype: frozenset + @return: List of instance names in node group + + """ + if primary_only: + nodes_fn = lambda inst: [inst.primary_node] + else: + nodes_fn = lambda inst: inst.all_nodes + + return frozenset(inst.name + for inst in self._config_data.instances.values() + for node_name in nodes_fn(inst) + if self._UnlockedGetNodeInfo(node_name).group == uuid) + + def _UnlockedGetNodeList(self): """Return the list of nodes which are in the configuration. + This function is for internal use, when the config lock is already + held. + + @rtype: list + """ - self._OpenConfig() - self._ReleaseLock() return self._config_data.nodes.keys() - def DumpConfig(self): - """Return the entire configuration of the cluster. + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeList(self): + """Return the list of nodes which are in the configuration. + + """ + return self._UnlockedGetNodeList() + + def _UnlockedGetOnlineNodeList(self): + """Return the list of nodes which are online. + + """ + all_nodes = [self._UnlockedGetNodeInfo(node) + for node in self._UnlockedGetNodeList()] + return [node.name for node in all_nodes if not node.offline] + + @locking.ssynchronized(_config_lock, shared=1) + def GetOnlineNodeList(self): + """Return the list of nodes which are online. + + """ + return self._UnlockedGetOnlineNodeList() + + @locking.ssynchronized(_config_lock, shared=1) + def GetVmCapableNodeList(self): + """Return the list of nodes which are not vm capable. + + """ + all_nodes = [self._UnlockedGetNodeInfo(node) + for node in self._UnlockedGetNodeList()] + return [node.name for node in all_nodes if node.vm_capable] + + @locking.ssynchronized(_config_lock, shared=1) + def GetNonVmCapableNodeList(self): + """Return the list of nodes which are not vm capable. + + """ + all_nodes = [self._UnlockedGetNodeInfo(node) + for node in self._UnlockedGetNodeList()] + return [node.name for node in all_nodes if not node.vm_capable] + + @locking.ssynchronized(_config_lock, shared=1) + def GetAllNodesInfo(self): + """Get the configuration of all nodes. + + @rtype: dict + @return: dict of (node, node_info), where node_info is what + would GetNodeInfo return for the node + + """ + my_dict = dict([(node, self._UnlockedGetNodeInfo(node)) + for node in self._UnlockedGetNodeList()]) + return my_dict + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupsFromNodes(self, nodes): + """Returns groups for a list of nodes. + + @type nodes: list of string + @param nodes: List of node names + @rtype: frozenset + + """ + return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes) + + def _UnlockedGetMasterCandidateStats(self, exceptions=None): + """Get the number of current and maximum desired and possible candidates. + + @type exceptions: list + @param exceptions: if passed, list of nodes that should be ignored + @rtype: tuple + @return: tuple of (current, desired and possible, possible) + + """ + mc_now = mc_should = mc_max = 0 + for node in self._config_data.nodes.values(): + if exceptions and node.name in exceptions: + continue + if not (node.offline or node.drained) and node.master_capable: + mc_max += 1 + if node.master_candidate: + mc_now += 1 + mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size) + return (mc_now, mc_should, mc_max) + + @locking.ssynchronized(_config_lock, shared=1) + def GetMasterCandidateStats(self, exceptions=None): + """Get the number of current and maximum possible candidates. + + This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. + + @type exceptions: list + @param exceptions: if passed, list of nodes that should be ignored + @rtype: tuple + @return: tuple of (current, max) + + """ + return self._UnlockedGetMasterCandidateStats(exceptions) + + @locking.ssynchronized(_config_lock) + def MaintainCandidatePool(self, exceptions): + """Try to grow the candidate pool to the desired size. + + @type exceptions: list + @param exceptions: if passed, list of nodes that should be ignored + @rtype: list + @return: list with the adjusted nodes (L{objects.Node} instances) + + """ + mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions) + mod_list = [] + if mc_now < mc_max: + node_list = self._config_data.nodes.keys() + random.shuffle(node_list) + for name in node_list: + if mc_now >= mc_max: + break + node = self._config_data.nodes[name] + if (node.master_candidate or node.offline or node.drained or + node.name in exceptions or not node.master_capable): + continue + mod_list.append(node) + node.master_candidate = True + node.serial_no += 1 + mc_now += 1 + if mc_now != mc_max: + # this should not happen + logging.warning("Warning: MaintainCandidatePool didn't manage to" + " fill the candidate pool (%d/%d)", mc_now, mc_max) + if mod_list: + self._config_data.cluster.serial_no += 1 + self._WriteConfig() + + return mod_list + + def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid): + """Add a given node to the specified group. + """ - self._OpenConfig() - self._ReleaseLock() - return self._config_data + if nodegroup_uuid not in self._config_data.nodegroups: + # This can happen if a node group gets deleted between its lookup and + # when we're adding the first node to it, since we don't keep a lock in + # the meantime. It's ok though, as we'll fail cleanly if the node group + # is not found anymore. + raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid) + if node_name not in self._config_data.nodegroups[nodegroup_uuid].members: + self._config_data.nodegroups[nodegroup_uuid].members.append(node_name) + + def _UnlockedRemoveNodeFromGroup(self, node): + """Remove a given node from its group. + + """ + nodegroup = node.group + if nodegroup not in self._config_data.nodegroups: + logging.warning("Warning: node '%s' has unknown node group '%s'" + " (while being removed from it)", node.name, nodegroup) + nodegroup_obj = self._config_data.nodegroups[nodegroup] + if node.name not in nodegroup_obj.members: + logging.warning("Warning: node '%s' not a member of its node group '%s'" + " (while being removed from it)", node.name, nodegroup) + else: + nodegroup_obj.members.remove(node.name) def _BumpSerialNo(self): """Bump up the serial number of the config. """ - self._config_data.cluster.serial_no += 1 + self._config_data.serial_no += 1 + self._config_data.mtime = time.time() - def _OpenConfig(self): - """Read the config data from disk. + def _AllUUIDObjects(self): + """Returns all objects with uuid attributes. + + """ + return (self._config_data.instances.values() + + self._config_data.nodes.values() + + self._config_data.nodegroups.values() + + [self._config_data.cluster]) - In case we already have configuration data and the config file has - the same mtime as when we read it, we skip the parsing of the - file, since de-serialisation could be slow. + def _OpenConfig(self, accept_foreign): + """Read the config data from disk. """ + raw_data = utils.ReadFile(self._cfg_file) + try: - st = os.stat(self._cfg_file) - except OSError, err: - raise errors.ConfigurationError("Can't stat config file: %s" % err) - if (self._config_data is not None and - self._config_time is not None and - self._config_time == st.st_mtime and - self._config_size == st.st_size and - self._config_inode == st.st_ino): - # data is current, so skip loading of config file - return - f = open(self._cfg_file, 'r') - try: - try: - data = objects.ConfigData.Load(f) - except Exception, err: - raise errors.ConfigurationError(err) - finally: - f.close() + data = objects.ConfigData.FromDict(serializer.Load(raw_data)) + except Exception, err: + raise errors.ConfigurationError(err) + + # Make sure the configuration has the right version + _ValidateConfig(data) + if (not hasattr(data, 'cluster') or - not hasattr(data.cluster, 'config_version')): + not hasattr(data.cluster, 'rsahostkeypub')): raise errors.ConfigurationError("Incomplete configuration" - " (missing cluster.config_version)") - if data.cluster.config_version != constants.CONFIG_VERSION: - raise errors.ConfigurationError("Cluster configuration version" - " mismatch, got %s instead of %s" % - (data.cluster.config_version, - constants.CONFIG_VERSION)) + " (missing cluster.rsahostkeypub)") + + if data.cluster.master_node != self._my_hostname and not accept_foreign: + msg = ("The configuration denotes node %s as master, while my" + " hostname is %s; opening a foreign configuration is only" + " possible in accept_foreign mode" % + (data.cluster.master_node, self._my_hostname)) + raise errors.ConfigurationError(msg) + + # Upgrade configuration if needed + data.UpgradeConfig() + self._config_data = data - self._config_time = st.st_mtime - self._config_size = st.st_size - self._config_inode = st.st_ino + # reset the last serial as -1 so that the next write will cause + # ssconf update + self._last_cluster_serial = -1 - def _ReleaseLock(self): - """xxxx - """ + # And finally run our (custom) config upgrade sequence + self._UpgradeConfig() + + self._cfg_id = utils.GetFileID(path=self._cfg_file) + + def _UpgradeConfig(self): + """Run upgrade steps that cannot be done purely in the objects. - def _DistributeConfig(self): + This is because some data elements need uniqueness across the + whole configuration, etc. + + @warning: this function will call L{_WriteConfig()}, but also + L{DropECReservations} so it needs to be called only from a + "safe" place (the constructor). If one wanted to call it with + the lock held, a DropECReservationUnlocked would need to be + created first, to avoid causing deadlock. + + """ + modified = False + for item in self._AllUUIDObjects(): + if item.uuid is None: + item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID) + modified = True + if not self._config_data.nodegroups: + default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME + default_nodegroup = objects.NodeGroup(name=default_nodegroup_name, + members=[]) + self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True) + modified = True + for node in self._config_data.nodes.values(): + if not node.group: + node.group = self.LookupNodeGroup(None) + modified = True + # This is technically *not* an upgrade, but needs to be done both when + # nodegroups are being added, and upon normally loading the config, + # because the members list of a node group is discarded upon + # serializing/deserializing the object. + self._UnlockedAddNodeToGroup(node.name, node.group) + if modified: + self._WriteConfig() + # This is ok even if it acquires the internal lock, as _UpgradeConfig is + # only called at config init time, without the lock held + self.DropECReservations(_UPGRADE_CONFIG_JID) + + def _DistributeConfig(self, feedback_fn): """Distribute the configuration to the other nodes. Currently, this only copies the configuration file. In the future, @@ -511,124 +1672,235 @@ class ConfigWriter: """ if self._offline: return True + bad = False - nodelist = self.GetNodeList() - myhostname = self._my_hostname - tgt_list = [] - for node in nodelist: - nodeinfo = self.GetNodeInfo(node) - if nodeinfo.name == myhostname: + node_list = [] + addr_list = [] + myhostname = self._my_hostname + # we can skip checking whether _UnlockedGetNodeInfo returns None + # since the node list comes from _UnlocketGetNodeList, and we are + # called with the lock held, so no modifications should take place + # in between + for node_name in self._UnlockedGetNodeList(): + if node_name == myhostname: + continue + node_info = self._UnlockedGetNodeInfo(node_name) + if not node_info.master_candidate: continue - tgt_list.append(node) + node_list.append(node_info.name) + addr_list.append(node_info.primary_ip) + + result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file, + address_list=addr_list) + for to_node, to_result in result.items(): + msg = to_result.fail_msg + if msg: + msg = ("Copy of file %s to node %s failed: %s" % + (self._cfg_file, to_node, msg)) + logging.error(msg) + + if feedback_fn: + feedback_fn(msg) - result = rpc.call_upload_file(tgt_list, self._cfg_file) - for node in tgt_list: - if not result[node]: - logger.Error("copy of file %s to node %s failed" % - (self._cfg_file, node)) bad = True + return not bad - def _WriteConfig(self, destination=None): + def _WriteConfig(self, destination=None, feedback_fn=None): """Write the configuration data to persistent storage. """ + assert feedback_fn is None or callable(feedback_fn) + + # Warn on config errors, but don't abort the save - the + # configuration has already been modified, and we can't revert; + # the best we can do is to warn the user and save as is, leaving + # recovery to the user + config_errors = self._UnlockedVerifyConfig() + if config_errors: + errmsg = ("Configuration data is not consistent: %s" % + (utils.CommaJoin(config_errors))) + logging.critical(errmsg) + if feedback_fn: + feedback_fn(errmsg) + if destination is None: destination = self._cfg_file self._BumpSerialNo() - dir_name, file_name = os.path.split(destination) - fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name) - f = os.fdopen(fd, 'w') + txt = serializer.Dump(self._config_data.ToDict()) + + getents = self._getents() try: - self._config_data.Dump(f) - os.fsync(f.fileno()) + fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt, + close=False, gid=getents.confd_gid, mode=0640) + except errors.LockError: + raise errors.ConfigurationError("The configuration file has been" + " modified since the last write, cannot" + " update") + try: + self._cfg_id = utils.GetFileID(fd=fd) finally: - f.close() - # we don't need to do os.close(fd) as f.close() did it - os.rename(name, destination) + os.close(fd) + self.write_count += 1 - # re-set our cache as not to re-read the config file - try: - st = os.stat(destination) - except OSError, err: - raise errors.ConfigurationError("Can't stat config file: %s" % err) - self._config_time = st.st_mtime - self._config_size = st.st_size - self._config_inode = st.st_ino - # and redistribute the config file - self._DistributeConfig() - - def InitConfig(self, node, primary_ip, secondary_ip, - hostkeypub, mac_prefix, vg_name, def_bridge): - """Create the initial cluster configuration. - - It will contain the current node, which will also be the master - node, and no instances or operating systmes. - - Args: - node: the nodename of the initial node - primary_ip: the IP address of the current host - secondary_ip: the secondary IP of the current host or None - hostkeypub: the public hostkey of this host - - """ - hu_port = constants.FIRST_DRBD_PORT - 1 - globalconfig = objects.Cluster(config_version=constants.CONFIG_VERSION, - serial_no=1, - rsahostkeypub=hostkeypub, - highest_used_port=hu_port, - mac_prefix=mac_prefix, - volume_group_name=vg_name, - default_bridge=def_bridge, - tcpudp_port_pool=set()) - if secondary_ip is None: - secondary_ip = primary_ip - nodeconfig = objects.Node(name=node, primary_ip=primary_ip, - secondary_ip=secondary_ip) - - self._config_data = objects.ConfigData(nodes={node: nodeconfig}, - instances={}, - cluster=globalconfig) - self._WriteConfig() + # and redistribute the config file to master candidates + self._DistributeConfig(feedback_fn) + + # Write ssconf files on all nodes (including locally) + if self._last_cluster_serial < self._config_data.cluster.serial_no: + if not self._offline: + result = rpc.RpcRunner.call_write_ssconf_files( + self._UnlockedGetOnlineNodeList(), + self._UnlockedGetSsconfValues()) + + for nname, nresu in result.items(): + msg = nresu.fail_msg + if msg: + errmsg = ("Error while uploading ssconf files to" + " node %s: %s" % (nname, msg)) + logging.warning(errmsg) + + if feedback_fn: + feedback_fn(errmsg) + + self._last_cluster_serial = self._config_data.cluster.serial_no + + def _UnlockedGetSsconfValues(self): + """Return the values needed by ssconf. + + @rtype: dict + @return: a dictionary with keys the ssconf names and values their + associated value + + """ + fn = "\n".join + instance_names = utils.NiceSort(self._UnlockedGetInstanceList()) + node_names = utils.NiceSort(self._UnlockedGetNodeList()) + node_info = [self._UnlockedGetNodeInfo(name) for name in node_names] + node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip) + for ninfo in node_info] + node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip) + for ninfo in node_info] + + instance_data = fn(instance_names) + off_data = fn(node.name for node in node_info if node.offline) + on_data = fn(node.name for node in node_info if not node.offline) + mc_data = fn(node.name for node in node_info if node.master_candidate) + mc_ips_data = fn(node.primary_ip for node in node_info + if node.master_candidate) + node_data = fn(node_names) + node_pri_ips_data = fn(node_pri_ips) + node_snd_ips_data = fn(node_snd_ips) + + cluster = self._config_data.cluster + cluster_tags = fn(cluster.GetTags()) + + hypervisor_list = fn(cluster.enabled_hypervisors) + + uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n") + + nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in + self._config_data.nodegroups.values()] + nodegroups_data = fn(utils.NiceSort(nodegroups)) + + ssconf_values = { + constants.SS_CLUSTER_NAME: cluster.cluster_name, + constants.SS_CLUSTER_TAGS: cluster_tags, + constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, + constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir, + constants.SS_MASTER_CANDIDATES: mc_data, + constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, + constants.SS_MASTER_IP: cluster.master_ip, + constants.SS_MASTER_NETDEV: cluster.master_netdev, + constants.SS_MASTER_NODE: cluster.master_node, + constants.SS_NODE_LIST: node_data, + constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, + constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, + constants.SS_OFFLINE_NODES: off_data, + constants.SS_ONLINE_NODES: on_data, + constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family), + constants.SS_INSTANCE_LIST: instance_data, + constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, + constants.SS_HYPERVISOR_LIST: hypervisor_list, + constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health), + constants.SS_UID_POOL: uid_pool, + constants.SS_NODEGROUPS: nodegroups_data, + } + bad_values = [(k, v) for k, v in ssconf_values.items() + if not isinstance(v, (str, basestring))] + if bad_values: + err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values) + raise errors.ConfigurationError("Some ssconf key(s) have non-string" + " values: %s" % err) + return ssconf_values + + @locking.ssynchronized(_config_lock, shared=1) + def GetSsconfValues(self): + """Wrapper using lock around _UnlockedGetSsconf(). + + """ + return self._UnlockedGetSsconfValues() + + @locking.ssynchronized(_config_lock, shared=1) def GetVGName(self): """Return the volume group name. """ - self._OpenConfig() - self._ReleaseLock() return self._config_data.cluster.volume_group_name - def GetDefBridge(self): - """Return the default bridge. + @locking.ssynchronized(_config_lock) + def SetVGName(self, vg_name): + """Set the volume group name. """ - self._OpenConfig() - self._ReleaseLock() - return self._config_data.cluster.default_bridge + self._config_data.cluster.volume_group_name = vg_name + self._config_data.cluster.serial_no += 1 + self._WriteConfig() + + @locking.ssynchronized(_config_lock, shared=1) + def GetDRBDHelper(self): + """Return DRBD usermode helper. + + """ + return self._config_data.cluster.drbd_usermode_helper + + @locking.ssynchronized(_config_lock) + def SetDRBDHelper(self, drbd_helper): + """Set DRBD usermode helper. + + """ + self._config_data.cluster.drbd_usermode_helper = drbd_helper + self._config_data.cluster.serial_no += 1 + self._WriteConfig() + @locking.ssynchronized(_config_lock, shared=1) def GetMACPrefix(self): """Return the mac prefix. """ - self._OpenConfig() - self._ReleaseLock() return self._config_data.cluster.mac_prefix + @locking.ssynchronized(_config_lock, shared=1) def GetClusterInfo(self): - """Returns informations about the cluster + """Returns information about the cluster - Returns: - the cluster object + @rtype: L{objects.Cluster} + @return: the cluster object """ - self._OpenConfig() - self._ReleaseLock() - return self._config_data.cluster - def Update(self, target): + @locking.ssynchronized(_config_lock, shared=1) + def HasAnyDiskOfType(self, dev_type): + """Check if in there is at disk of the given type in the configuration. + + """ + return self._config_data.HasAnyDiskOfType(dev_type) + + @locking.ssynchronized(_config_lock) + def Update(self, target, feedback_fn): """Notify function to be called after updates. This function must be called when an object (as returned by @@ -637,20 +1909,48 @@ class ConfigWriter: that all modified objects will be saved, but the target argument is the one the caller wants to ensure that it's saved. + @param target: an instance of either L{objects.Cluster}, + L{objects.Node} or L{objects.Instance} which is existing in + the cluster + @param feedback_fn: Callable feedback function + """ if self._config_data is None: raise errors.ProgrammerError("Configuration file not read," " cannot save.") + update_serial = False if isinstance(target, objects.Cluster): test = target == self._config_data.cluster elif isinstance(target, objects.Node): test = target in self._config_data.nodes.values() + update_serial = True elif isinstance(target, objects.Instance): test = target in self._config_data.instances.values() + elif isinstance(target, objects.NodeGroup): + test = target in self._config_data.nodegroups.values() else: raise errors.ProgrammerError("Invalid object type (%s) passed to" " ConfigWriter.Update" % type(target)) if not test: raise errors.ConfigurationError("Configuration updated since object" " has been read or unknown object") - self._WriteConfig() + target.serial_no += 1 + target.mtime = now = time.time() + + if update_serial: + # for node updates, we need to increase the cluster serial too + self._config_data.cluster.serial_no += 1 + self._config_data.cluster.mtime = now + + if isinstance(target, objects.Instance): + self._UnlockedReleaseDRBDMinors(target.name) + + self._WriteConfig(feedback_fn=feedback_fn) + + @locking.ssynchronized(_config_lock) + def DropECReservations(self, ec_id): + """Drop per-execution-context reservations + + """ + for rm in self._all_rms: + rm.DropECReservations(ec_id)