X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/8a147bba1bad0e82416ab9d5cf81b9173fed8ac8..2a6f6ef7ce23bdf1b55627785320d8086dcb77aa:/lib/config.py diff --git a/lib/config.py b/lib/config.py index 3e0b471..2ecd511 100644 --- a/lib/config.py +++ b/lib/config.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -34,6 +34,7 @@ much memory. # pylint: disable=R0904 # R0904: Too many public methods +import copy import os import random import logging @@ -50,6 +51,8 @@ from ganeti import serializer from ganeti import uidpool from ganeti import netutils from ganeti import runtime +from ganeti import pathutils +from ganeti import network _config_lock = locking.SharedLock("ConfigWriter") @@ -106,6 +109,17 @@ class TemporaryReservationManager: all_reserved.update(holder_reserved) return all_reserved + def GetECReserved(self, ec_id): + """ Used when you want to retrieve all reservations for a specific + execution context. E.g when commiting reserved IPs for a specific + network. + + """ + ec_reserved = set() + if ec_id in self._ec_reserved: + ec_reserved.update(self._ec_reserved[ec_id]) + return ec_reserved + def Generate(self, existing, generate_one_fn, ec_id): """Generate a new resource of this type @@ -167,7 +181,7 @@ class ConfigWriter: self._config_data = None self._offline = offline if cfg_file is None: - self._cfg_file = constants.CLUSTER_CONF_FILE + self._cfg_file = pathutils.CLUSTER_CONF_FILE else: self._cfg_file = cfg_file self._getents = _getents @@ -176,8 +190,10 @@ class ConfigWriter: self._temporary_macs = TemporaryReservationManager() self._temporary_secrets = TemporaryReservationManager() self._temporary_lvs = TemporaryReservationManager() + self._temporary_ips = TemporaryReservationManager() self._all_rms = [self._temporary_ids, self._temporary_macs, - self._temporary_secrets, self._temporary_lvs] + self._temporary_secrets, self._temporary_lvs, + self._temporary_ips] # Note: in order to prevent errors when resolving our name in # _DistributeConfig, we compute it here once and reuse it; it's # better to raise an error before starting to modify the config @@ -206,18 +222,7 @@ class ConfigWriter: """Check if the cluster is configured. """ - return os.path.exists(constants.CLUSTER_CONF_FILE) - - def _GenerateOneMAC(self): - """Generate one mac address - - """ - prefix = self._config_data.cluster.mac_prefix - byte1 = random.randrange(0, 256) - byte2 = random.randrange(0, 256) - byte3 = random.randrange(0, 256) - mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3) - return mac + return os.path.exists(pathutils.CLUSTER_CONF_FILE) @locking.ssynchronized(_config_lock, shared=1) def GetNdParams(self, node): @@ -242,17 +247,70 @@ class ConfigWriter: """ node = self._UnlockedGetNodeInfo(instance.primary_node) nodegroup = self._UnlockedGetNodeGroup(node.group) - return self._config_data.cluster.SimpleFillDP(nodegroup.diskparams) + return self._UnlockedGetGroupDiskParams(nodegroup) + + @locking.ssynchronized(_config_lock, shared=1) + def GetGroupDiskParams(self, group): + """Get the disk params populated with inherit chain. + + @type group: L{objects.NodeGroup} + @param group: The group we want to know the params for + @return: A dict with the filled in disk params + + """ + return self._UnlockedGetGroupDiskParams(group) + + def _UnlockedGetGroupDiskParams(self, group): + """Get the disk params populated with inherit chain down to node-group. + + @type group: L{objects.NodeGroup} + @param group: The group we want to know the params for + @return: A dict with the filled in disk params + + """ + return self._config_data.cluster.SimpleFillDP(group.diskparams) + + def _UnlockedGetNetworkMACPrefix(self, net_uuid): + """Return the network mac prefix if it exists or the cluster level default. + + """ + prefix = None + if net_uuid: + nobj = self._UnlockedGetNetwork(net_uuid) + if nobj.mac_prefix: + prefix = nobj.mac_prefix + + return prefix + + def _GenerateOneMAC(self, prefix=None): + """Return a function that randomly generates a MAC suffic + and appends it to the given prefix. If prefix is not given get + the cluster level default. + + """ + if not prefix: + prefix = self._config_data.cluster.mac_prefix + + def GenMac(): + byte1 = random.randrange(0, 256) + byte2 = random.randrange(0, 256) + byte3 = random.randrange(0, 256) + mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3) + return mac + + return GenMac @locking.ssynchronized(_config_lock, shared=1) - def GenerateMAC(self, ec_id): + def GenerateMAC(self, net_uuid, ec_id): """Generate a MAC for an instance. This should check the current instances for duplicates. """ existing = self._AllMACs() - return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id) + prefix = self._UnlockedGetNetworkMACPrefix(net_uuid) + gen_mac = self._GenerateOneMAC(prefix) + return self._temporary_ids.Generate(existing, gen_mac, ec_id) @locking.ssynchronized(_config_lock, shared=1) def ReserveMAC(self, mac, ec_id): @@ -268,6 +326,89 @@ class ConfigWriter: else: self._temporary_macs.Reserve(ec_id, mac) + def _UnlockedCommitTemporaryIps(self, ec_id): + """Commit all reserved IP address to their respective pools + + """ + for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id): + self._UnlockedCommitIp(action, net_uuid, address) + + def _UnlockedCommitIp(self, action, net_uuid, address): + """Commit a reserved IP address to an IP pool. + + The IP address is taken from the network's IP pool and marked as reserved. + + """ + nobj = self._UnlockedGetNetwork(net_uuid) + pool = network.AddressPool(nobj) + if action == constants.RESERVE_ACTION: + pool.Reserve(address) + elif action == constants.RELEASE_ACTION: + pool.Release(address) + + def _UnlockedReleaseIp(self, net_uuid, address, ec_id): + """Give a specific IP address back to an IP pool. + + The IP address is returned to the IP pool designated by pool_id and marked + as reserved. + + """ + self._temporary_ips.Reserve(ec_id, + (constants.RELEASE_ACTION, address, net_uuid)) + + @locking.ssynchronized(_config_lock, shared=1) + def ReleaseIp(self, net_uuid, address, ec_id): + """Give a specified IP address back to an IP pool. + + This is just a wrapper around _UnlockedReleaseIp. + + """ + if net_uuid: + self._UnlockedReleaseIp(net_uuid, address, ec_id) + + @locking.ssynchronized(_config_lock, shared=1) + def GenerateIp(self, net_uuid, ec_id): + """Find a free IPv4 address for an instance. + + """ + nobj = self._UnlockedGetNetwork(net_uuid) + pool = network.AddressPool(nobj) + + def gen_one(): + try: + ip = pool.GenerateFree() + except errors.AddressPoolError: + raise errors.ReservationError("Cannot generate IP. Network is full") + return (constants.RESERVE_ACTION, ip, net_uuid) + + _, address, _ = self._temporary_ips.Generate([], gen_one, ec_id) + return address + + def _UnlockedReserveIp(self, net_uuid, address, ec_id): + """Reserve a given IPv4 address for use by an instance. + + """ + nobj = self._UnlockedGetNetwork(net_uuid) + pool = network.AddressPool(nobj) + try: + isreserved = pool.IsReserved(address) + except errors.AddressPoolError: + raise errors.ReservationError("IP address not in network") + if isreserved: + raise errors.ReservationError("IP address already in use") + + return self._temporary_ips.Reserve(ec_id, + (constants.RESERVE_ACTION, + address, net_uuid)) + + @locking.ssynchronized(_config_lock, shared=1) + def ReserveIp(self, net_uuid, address, ec_id): + """Reserve a given IPv4 address for use by an instance. + + """ + if net_uuid: + return self._UnlockedReserveIp(net_uuid, address, ec_id) + @locking.ssynchronized(_config_lock, shared=1) def ReserveLV(self, lv_name, ec_id): """Reserve an VG/LV pair for an instance. @@ -304,6 +445,35 @@ class ConfigWriter: lvnames.update(lv_list) return lvnames + def _AllDisks(self): + """Compute the list of all Disks (recursively, including children). + + """ + def DiskAndAllChildren(disk): + """Returns a list containing the given disk and all of his children. + + """ + disks = [disk] + if disk.children: + for child_disk in disk.children: + disks.extend(DiskAndAllChildren(child_disk)) + return disks + + disks = [] + for instance in self._config_data.instances.values(): + for disk in instance.disks: + disks.extend(DiskAndAllChildren(disk)) + return disks + + def _AllNICs(self): + """Compute the list of all NICs. + + """ + nics = [] + for instance in self._config_data.instances.values(): + nics.extend(instance.nics) + return nics + def _AllIDs(self, include_temporary): """Compute the list of all UUIDs and names we have. @@ -436,13 +606,21 @@ class ConfigWriter: invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: result.append("enabled hypervisors contains invalid entries: %s" % - invalid_hvs) + utils.CommaJoin(invalid_hvs)) missing_hvp = (set(cluster.enabled_hypervisors) - set(cluster.hvparams.keys())) if missing_hvp: result.append("hypervisor parameters missing for the enabled" " hypervisor(s) %s" % utils.CommaJoin(missing_hvp)) + if not cluster.enabled_disk_templates: + result.append("enabled disk templates list doesn't have any entries") + invalid_disk_templates = set(cluster.enabled_disk_templates) \ + - constants.DISK_TEMPLATES + if invalid_disk_templates: + result.append("enabled disk templates list contains invalid entries:" + " %s" % utils.CommaJoin(invalid_disk_templates)) + if cluster.master_node not in data.nodes: result.append("cluster has invalid primary node '%s'" % cluster.master_node) @@ -459,17 +637,18 @@ class ConfigWriter: except errors.ConfigurationError, err: result.append("%s has invalid nicparams: %s" % (owner, err)) - def _helper_ipolicy(owner, params): + def _helper_ipolicy(owner, ipolicy, iscluster): try: - objects.InstancePolicy.CheckParameterSyntax(params) + objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster) except errors.ConfigurationError, err: result.append("%s has invalid instance policy: %s" % (owner, err)) - - def _helper_ispecs(owner, params): - for key, value in params.items(): - if key in constants.IPOLICY_ISPECS: - fullkey = "ipolicy/" + key - _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES) + for key, value in ipolicy.items(): + if key == constants.ISPECS_MINMAX: + for k in range(len(value)): + _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k]) + elif key == constants.ISPECS_STD: + _helper(owner, "ipolicy/" + key, value, + constants.ISPECS_PARAMETER_TYPES) else: # FIXME: assuming list type if key in constants.IPOLICY_PARAMETERS: @@ -481,6 +660,11 @@ class ConfigWriter: " expecting %s, got %s" % (owner, key, exp_type.__name__, type(value))) + def _helper_ispecs(owner, parentkey, params): + for (key, value) in params.items(): + fullkey = "/".join([parentkey, key]) + _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES) + # check cluster parameters _helper("cluster", "beparams", cluster.SimpleFillBE({}), constants.BES_PARAMETER_TYPES) @@ -489,26 +673,25 @@ class ConfigWriter: _helper_nic("cluster", cluster.SimpleFillNIC({})) _helper("cluster", "ndparams", cluster.SimpleFillND({}), constants.NDS_PARAMETER_TYPES) - _helper_ipolicy("cluster", cluster.SimpleFillIPolicy({})) - _helper_ispecs("cluster", cluster.SimpleFillIPolicy({})) + _helper_ipolicy("cluster", cluster.ipolicy, True) # per-instance checks - for instance_name in data.instances: - instance = data.instances[instance_name] - if instance.name != instance_name: - result.append("instance '%s' is indexed by wrong name '%s'" % - (instance.name, instance_name)) + for instance_uuid in data.instances: + instance = data.instances[instance_uuid] + if instance.uuid != instance_uuid: + result.append("instance '%s' is indexed by wrong UUID '%s'" % + (instance.name, instance_uuid)) if instance.primary_node not in data.nodes: result.append("instance '%s' has invalid primary node '%s'" % - (instance_name, instance.primary_node)) + (instance.name, instance.primary_node)) for snode in instance.secondary_nodes: if snode not in data.nodes: result.append("instance '%s' has invalid secondary node '%s'" % - (instance_name, snode)) + (instance.name, snode)) for idx, nic in enumerate(instance.nics): if nic.mac in seen_macs: result.append("instance '%s' has NIC %d mac %s duplicate" % - (instance_name, idx, nic.mac)) + (instance.name, idx, nic.mac)) else: seen_macs.append(nic.mac) if nic.nicparams: @@ -518,6 +701,11 @@ class ConfigWriter: filled, constants.NICS_PARAMETER_TYPES) _helper_nic(owner, filled) + # disk template checks + if not instance.disk_template in data.cluster.enabled_disk_templates: + result.append("instance '%s' uses the disabled disk template '%s'." % + (instance.name, instance.disk_template)) + # parameter checks if instance.beparams: _helper("instance %s" % instance.name, "beparams", @@ -525,7 +713,7 @@ class ConfigWriter: # gather the drbd ports for duplicate checks for (idx, dsk) in enumerate(instance.disks): - if dsk.dev_type in constants.LDS_DRBD: + if dsk.dev_type in constants.DTS_DRBD: tcp_port = dsk.logical_id[2] if tcp_port not in ports: ports[tcp_port] = [] @@ -583,10 +771,10 @@ class ConfigWriter: (mc_now, mc_max)) # node checks - for node_name, node in data.nodes.items(): - if node.name != node_name: - result.append("Node '%s' is indexed by wrong name '%s'" % - (node.name, node_name)) + for node_uuid, node in data.nodes.items(): + if node.uuid != node_uuid: + result.append("Node '%s' is indexed by wrong UUID '%s'" % + (node.name, node_uuid)) if [node.master_candidate, node.drained, node.offline].count(True) > 1: result.append("Node %s state is invalid: master_candidate=%s," " drain=%s, offline=%s" % @@ -599,6 +787,10 @@ class ConfigWriter: _helper("node %s" % node.name, "ndparams", cluster.FillND(node, data.nodegroups[node.group]), constants.NDS_PARAMETER_TYPES) + used_globals = constants.NDC_GLOBALS.intersection(node.ndparams) + if used_globals: + result.append("Node '%s' has some global parameters set: %s" % + (node.name, utils.CommaJoin(used_globals))) # nodegroups checks nodegroups_names = set() @@ -615,8 +807,8 @@ class ConfigWriter: else: nodegroups_names.add(nodegroup.name) group_name = "group %s" % nodegroup.name - _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy)) - _helper_ispecs(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy)) + _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy), + False) if nodegroup.ndparams: _helper(group_name, "ndparams", cluster.SimpleFillND(nodegroup.ndparams), @@ -658,7 +850,7 @@ class ConfigWriter: else: raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) - _AddIpAddress("%s/%s" % (link, nic.ip), + _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network), "instance:%s/nic:%d" % (instance.name, idx)) for ip, owners in ips.items(): @@ -681,7 +873,7 @@ class ConfigWriter: """ return self._UnlockedVerifyConfig() - def _UnlockedSetDiskID(self, disk, node_name): + def _UnlockedSetDiskID(self, disk, node_uuid): """Convert the unique ID to the ID needed on the target nodes. This is used only for drbd, which needs ip/port configuration. @@ -695,15 +887,15 @@ class ConfigWriter: """ if disk.children: for child in disk.children: - self._UnlockedSetDiskID(child, node_name) + self._UnlockedSetDiskID(child, node_uuid) if disk.logical_id is None and disk.physical_id is not None: return - if disk.dev_type == constants.LD_DRBD8: + if disk.dev_type == constants.DT_DRBD8: pnode, snode, port, pminor, sminor, secret = disk.logical_id - if node_name not in (pnode, snode): + if node_uuid not in (pnode, snode): raise errors.ConfigurationError("DRBD device not knowing node %s" % - node_name) + node_uuid) pnode_info = self._UnlockedGetNodeInfo(pnode) snode_info = self._UnlockedGetNodeInfo(snode) if pnode_info is None or snode_info is None: @@ -711,7 +903,7 @@ class ConfigWriter: " for %s" % str(disk)) p_data = (pnode_info.secondary_ip, port) s_data = (snode_info.secondary_ip, port) - if pnode == node_name: + if pnode == node_uuid: disk.physical_id = p_data + s_data + (pminor, secret) else: # it must be secondary, we tested above disk.physical_id = s_data + p_data + (sminor, secret) @@ -720,7 +912,7 @@ class ConfigWriter: return @locking.ssynchronized(_config_lock) - def SetDiskID(self, disk, node_name): + def SetDiskID(self, disk, node_uuid): """Convert the unique ID to the ID needed on the target nodes. This is used only for drbd, which needs ip/port configuration. @@ -730,7 +922,7 @@ class ConfigWriter: node. """ - return self._UnlockedSetDiskID(disk, node_name) + return self._UnlockedSetDiskID(disk, node_uuid) @locking.ssynchronized(_config_lock) def AddTcpUdpPort(self, port): @@ -780,39 +972,44 @@ class ConfigWriter: """Compute the used DRBD minor/nodes. @rtype: (dict, list) - @return: dictionary of node_name: dict of minor: instance_name; + @return: dictionary of node_uuid: dict of minor: instance_uuid; the returned dict will have all the nodes in it (even if with an empty list), and a list of duplicates; if the duplicates list is not empty, the configuration is corrupted and its caller should raise an exception """ - def _AppendUsedPorts(instance_name, disk, used): + def _AppendUsedMinors(get_node_name_fn, instance, disk, used): duplicates = [] - if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5: + if disk.dev_type == constants.DT_DRBD8 and len(disk.logical_id) >= 5: node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5] - for node, port in ((node_a, minor_a), (node_b, minor_b)): - assert node in used, ("Node '%s' of instance '%s' not found" - " in node list" % (node, instance_name)) - if port in used[node]: - duplicates.append((node, port, instance_name, used[node][port])) + for node_uuid, minor in ((node_a, minor_a), (node_b, minor_b)): + assert node_uuid in used, \ + ("Node '%s' of instance '%s' not found in node list" % + (get_node_name_fn(node_uuid), instance.name)) + if minor in used[node_uuid]: + duplicates.append((node_uuid, minor, instance.uuid, + used[node_uuid][minor])) else: - used[node][port] = instance_name + used[node_uuid][minor] = instance.uuid if disk.children: for child in disk.children: - duplicates.extend(_AppendUsedPorts(instance_name, child, used)) + duplicates.extend(_AppendUsedMinors(get_node_name_fn, instance, child, + used)) return duplicates duplicates = [] - my_dict = dict((node, {}) for node in self._config_data.nodes) + my_dict = dict((node_uuid, {}) for node_uuid in self._config_data.nodes) for instance in self._config_data.instances.itervalues(): for disk in instance.disks: - duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict)) - for (node, minor), instance in self._temporary_drbds.iteritems(): - if minor in my_dict[node] and my_dict[node][minor] != instance: - duplicates.append((node, minor, instance, my_dict[node][minor])) + duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName, + instance, disk, my_dict)) + for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems(): + if minor in my_dict[node_uuid] and my_dict[node_uuid][minor] != inst_uuid: + duplicates.append((node_uuid, minor, inst_uuid, + my_dict[node_uuid][minor])) else: - my_dict[node][minor] = instance + my_dict[node_uuid][minor] = inst_uuid return my_dict, duplicates @locking.ssynchronized(_config_lock) @@ -821,7 +1018,7 @@ class ConfigWriter: This is just a wrapper over L{_UnlockedComputeDRBDMap}. - @return: dictionary of node_name: dict of minor: instance_name; + @return: dictionary of node_uuid: dict of minor: instance_uuid; the returned dict will have all the nodes in it (even if with an empty list). @@ -833,7 +1030,7 @@ class ConfigWriter: return d_map @locking.ssynchronized(_config_lock) - def AllocateDRBDMinor(self, nodes, instance): + def AllocateDRBDMinor(self, node_uuids, inst_uuid): """Allocate a drbd minor. The free minor will be automatically computed from the existing @@ -841,25 +1038,25 @@ class ConfigWriter: multiple minors. The result is the list of minors, in the same order as the passed nodes. - @type instance: string - @param instance: the instance for which we allocate minors + @type inst_uuid: string + @param inst_uuid: the instance for which we allocate minors """ - assert isinstance(instance, basestring), \ - "Invalid argument '%s' passed to AllocateDRBDMinor" % instance + assert isinstance(inst_uuid, basestring), \ + "Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid d_map, duplicates = self._UnlockedComputeDRBDMap() if duplicates: raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" % str(duplicates)) result = [] - for nname in nodes: - ndata = d_map[nname] + for nuuid in node_uuids: + ndata = d_map[nuuid] if not ndata: # no minors used, we can start at 0 result.append(0) - ndata[0] = instance - self._temporary_drbds[(nname, 0)] = instance + ndata[0] = inst_uuid + self._temporary_drbds[(nuuid, 0)] = inst_uuid continue keys = ndata.keys() keys.sort() @@ -871,39 +1068,39 @@ class ConfigWriter: else: minor = ffree # double-check minor against current instances - assert minor not in d_map[nname], \ + assert minor not in d_map[nuuid], \ ("Attempt to reuse allocated DRBD minor %d on node %s," " already allocated to instance %s" % - (minor, nname, d_map[nname][minor])) - ndata[minor] = instance + (minor, nuuid, d_map[nuuid][minor])) + ndata[minor] = inst_uuid # double-check minor against reservation - r_key = (nname, minor) + r_key = (nuuid, minor) assert r_key not in self._temporary_drbds, \ ("Attempt to reuse reserved DRBD minor %d on node %s," " reserved for instance %s" % - (minor, nname, self._temporary_drbds[r_key])) - self._temporary_drbds[r_key] = instance + (minor, nuuid, self._temporary_drbds[r_key])) + self._temporary_drbds[r_key] = inst_uuid result.append(minor) logging.debug("Request to allocate drbd minors, input: %s, returning %s", - nodes, result) + node_uuids, result) return result - def _UnlockedReleaseDRBDMinors(self, instance): + def _UnlockedReleaseDRBDMinors(self, inst_uuid): """Release temporary drbd minors allocated for a given instance. - @type instance: string - @param instance: the instance for which temporary minors should be - released + @type inst_uuid: string + @param inst_uuid: the instance for which temporary minors should be + released """ - assert isinstance(instance, basestring), \ + assert isinstance(inst_uuid, basestring), \ "Invalid argument passed to ReleaseDRBDMinors" - for key, name in self._temporary_drbds.items(): - if name == instance: + for key, uuid in self._temporary_drbds.items(): + if uuid == inst_uuid: del self._temporary_drbds[key] @locking.ssynchronized(_config_lock) - def ReleaseDRBDMinors(self, instance): + def ReleaseDRBDMinors(self, inst_uuid): """Release temporary drbd minors allocated for a given instance. This should be called on the error paths, on the success paths @@ -912,12 +1109,12 @@ class ConfigWriter: This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}. - @type instance: string - @param instance: the instance for which temporary minors should be - released + @type inst_uuid: string + @param inst_uuid: the instance for which temporary minors should be + released """ - self._UnlockedReleaseDRBDMinors(instance) + self._UnlockedReleaseDRBDMinors(inst_uuid) @locking.ssynchronized(_config_lock, shared=1) def GetConfigVersion(self): @@ -939,14 +1136,23 @@ class ConfigWriter: @locking.ssynchronized(_config_lock, shared=1) def GetMasterNode(self): - """Get the hostname of the master node for this cluster. + """Get the UUID of the master node for this cluster. - @return: Master hostname + @return: Master node UUID """ return self._config_data.cluster.master_node @locking.ssynchronized(_config_lock, shared=1) + def GetMasterNodeName(self): + """Get the hostname of the master node for this cluster. + + @return: Master node hostname + + """ + return self._UnlockedGetNodeName(self._config_data.cluster.master_node) + + @locking.ssynchronized(_config_lock, shared=1) def GetMasterIP(self): """Get the IP of the master node for this cluster. @@ -998,7 +1204,7 @@ class ConfigWriter: return self._config_data.cluster.enabled_hypervisors[0] @locking.ssynchronized(_config_lock, shared=1) - def GetHostKey(self): + def GetRsaHostKey(self): """Return the rsa hostkey from the config. @rtype: string @@ -1008,6 +1214,16 @@ class ConfigWriter: return self._config_data.cluster.rsahostkeypub @locking.ssynchronized(_config_lock, shared=1) + def GetDsaHostKey(self): + """Return the dsa hostkey from the config. + + @rtype: string + @return: the dsa hostkey + + """ + return self._config_data.cluster.dsahostkeypub + + @locking.ssynchronized(_config_lock, shared=1) def GetDefaultIAllocator(self): """Get the default instance allocator for this cluster. @@ -1032,10 +1248,9 @@ class ConfigWriter: """ cluster = self._config_data.cluster - result = objects.MasterNetworkParameters(name=cluster.master_node, - ip=cluster.master_ip, - netmask=cluster.master_netmask, - netdev=cluster.master_netdev, + result = objects.MasterNetworkParameters( + uuid=cluster.master_node, ip=cluster.master_ip, + netmask=cluster.master_netmask, netdev=cluster.master_netdev, ip_family=cluster.primary_ip_family) return result @@ -1122,7 +1337,7 @@ class ConfigWriter: if target is None: if len(self._config_data.nodegroups) != 1: raise errors.OpPrereqError("More than one node group exists. Target" - " group must be specified explicitely.") + " group must be specified explicitly.") else: return self._config_data.nodegroups.keys()[0] if target in self._config_data.nodegroups: @@ -1192,11 +1407,11 @@ class ConfigWriter: """Get nodes which are member in the same nodegroups as the given nodes. """ - ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group - return frozenset(member_name - for node_name in nodes - for member_name in - self._UnlockedGetNodeGroup(ngfn(node_name)).members) + ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group + return frozenset(member_uuid + for node_uuid in nodes + for member_uuid in + self._UnlockedGetNodeGroup(ngfn(node_uuid)).members) @locking.ssynchronized(_config_lock, shared=1) def GetMultiNodeGroupInfo(self, group_uuids): @@ -1233,13 +1448,14 @@ class ConfigWriter: " MAC address '%s' already in use." % (instance.name, nic.mac)) - self._EnsureUUID(instance, ec_id) + self._CheckUniqueUUID(instance, include_temporary=False) instance.serial_no = 1 instance.ctime = instance.mtime = time.time() - self._config_data.instances[instance.name] = instance + self._config_data.instances[instance.uuid] = instance self._config_data.cluster.serial_no += 1 - self._UnlockedReleaseDRBDMinors(instance.name) + self._UnlockedReleaseDRBDMinors(instance.uuid) + self._UnlockedCommitTemporaryIps(ec_id) self._WriteConfig() def _EnsureUUID(self, item, ec_id): @@ -1251,62 +1467,95 @@ class ConfigWriter: """ if not item.uuid: item.uuid = self._GenerateUniqueID(ec_id) - elif item.uuid in self._AllIDs(include_temporary=True): + else: + self._CheckUniqueUUID(item, include_temporary=True) + + def _CheckUniqueUUID(self, item, include_temporary): + """Checks that the UUID of the given object is unique. + + @param item: the instance or node to be checked + @param include_temporary: whether temporarily generated UUID's should be + included in the check. If the UUID of the item to be checked is + a temporarily generated one, this has to be C{False}. + + """ + if not item.uuid: + raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) + if item.uuid in self._AllIDs(include_temporary=include_temporary): raise errors.ConfigurationError("Cannot add '%s': UUID %s already" " in use" % (item.name, item.uuid)) - def _SetInstanceStatus(self, instance_name, status): + def _SetInstanceStatus(self, inst_uuid, status, disks_active): """Set the instance's status to a given value. """ + if inst_uuid not in self._config_data.instances: + raise errors.ConfigurationError("Unknown instance '%s'" % + inst_uuid) + instance = self._config_data.instances[inst_uuid] + + if status is None: + status = instance.admin_state + if disks_active is None: + disks_active = instance.disks_active + assert status in constants.ADMINST_ALL, \ "Invalid status '%s' passed to SetInstanceStatus" % (status,) - if instance_name not in self._config_data.instances: - raise errors.ConfigurationError("Unknown instance '%s'" % - instance_name) - instance = self._config_data.instances[instance_name] - if instance.admin_state != status: + if instance.admin_state != status or \ + instance.disks_active != disks_active: instance.admin_state = status + instance.disks_active = disks_active instance.serial_no += 1 instance.mtime = time.time() self._WriteConfig() @locking.ssynchronized(_config_lock) - def MarkInstanceUp(self, instance_name): + def MarkInstanceUp(self, inst_uuid): """Mark the instance status to up in the config. + This also sets the instance disks active flag. + """ - self._SetInstanceStatus(instance_name, constants.ADMINST_UP) + self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True) @locking.ssynchronized(_config_lock) - def MarkInstanceOffline(self, instance_name): + def MarkInstanceOffline(self, inst_uuid): """Mark the instance status to down in the config. + This also clears the instance disks active flag. + """ - self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE) + self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False) @locking.ssynchronized(_config_lock) - def RemoveInstance(self, instance_name): + def RemoveInstance(self, inst_uuid): """Remove the instance from the configuration. """ - if instance_name not in self._config_data.instances: - raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) + if inst_uuid not in self._config_data.instances: + raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) # If a network port has been allocated to the instance, # return it to the pool of free ports. - inst = self._config_data.instances[instance_name] + inst = self._config_data.instances[inst_uuid] network_port = getattr(inst, "network_port", None) if network_port is not None: self._config_data.cluster.tcpudp_port_pool.add(network_port) - del self._config_data.instances[instance_name] + instance = self._UnlockedGetInstanceInfo(inst_uuid) + + for nic in instance.nics: + if nic.network and nic.ip: + # Return all IP addresses to the respective address pools + self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip) + + del self._config_data.instances[inst_uuid] self._config_data.cluster.serial_no += 1 self._WriteConfig() @locking.ssynchronized(_config_lock) - def RenameInstance(self, old_name, new_name): + def RenameInstance(self, inst_uuid, new_name): """Rename an instance. This needs to be done in ConfigWriter and not by RemoveInstance @@ -1314,37 +1563,49 @@ class ConfigWriter: rename. """ - if old_name not in self._config_data.instances: - raise errors.ConfigurationError("Unknown instance '%s'" % old_name) + if inst_uuid not in self._config_data.instances: + raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) - # Operate on a copy to not loose instance object in case of a failure - inst = self._config_data.instances[old_name].Copy() + inst = self._config_data.instances[inst_uuid] inst.name = new_name - for (idx, disk) in enumerate(inst.disks): - if disk.dev_type == constants.LD_FILE: + for (_, disk) in enumerate(inst.disks): + if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: # rename the file paths in logical and physical id file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) disk.logical_id = (disk.logical_id[0], utils.PathJoin(file_storage_dir, inst.name, - "disk%s" % idx)) + os.path.basename(disk.logical_id[1]))) disk.physical_id = disk.logical_id - # Actually replace instance object - del self._config_data.instances[old_name] - self._config_data.instances[inst.name] = inst - # Force update of ssconf files self._config_data.cluster.serial_no += 1 self._WriteConfig() @locking.ssynchronized(_config_lock) - def MarkInstanceDown(self, instance_name): + def MarkInstanceDown(self, inst_uuid): """Mark the status of an instance to down in the configuration. + This does not touch the instance disks active flag, as shut down instances + can still have active disks. + + """ + self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None) + + @locking.ssynchronized(_config_lock) + def MarkInstanceDisksActive(self, inst_uuid): + """Mark the status of instance disks active. + + """ + self._SetInstanceStatus(inst_uuid, None, True) + + @locking.ssynchronized(_config_lock) + def MarkInstanceDisksInactive(self, inst_uuid): + """Mark the status of instance disks inactive. + """ - self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN) + self._SetInstanceStatus(inst_uuid, None, False) def _UnlockedGetInstanceList(self): """Get the list of instances. @@ -1358,8 +1619,7 @@ class ConfigWriter: def GetInstanceList(self): """Get the list of instances. - @return: array of instances, ex. ['instance2.example.com', - 'instance1.example.com'] + @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid'] """ return self._UnlockedGetInstanceList() @@ -1368,67 +1628,110 @@ class ConfigWriter: """Attempt to expand an incomplete instance name. """ - # Locking is done in L{ConfigWriter.GetInstanceList} - return _MatchNameComponentIgnoreCase(short_name, self.GetInstanceList()) + # Locking is done in L{ConfigWriter.GetAllInstancesInfo} + all_insts = self.GetAllInstancesInfo().values() + expanded_name = _MatchNameComponentIgnoreCase( + short_name, [inst.name for inst in all_insts]) + + if expanded_name is not None: + # there has to be exactly one instance with that name + inst = (filter(lambda n: n.name == expanded_name, all_insts)[0]) + return (inst.uuid, inst.name) + else: + return (None, None) - def _UnlockedGetInstanceInfo(self, instance_name): + def _UnlockedGetInstanceInfo(self, inst_uuid): """Returns information about an instance. This function is for internal use, when the config lock is already held. """ - if instance_name not in self._config_data.instances: + if inst_uuid not in self._config_data.instances: return None - return self._config_data.instances[instance_name] + return self._config_data.instances[inst_uuid] @locking.ssynchronized(_config_lock, shared=1) - def GetInstanceInfo(self, instance_name): + def GetInstanceInfo(self, inst_uuid): """Returns information about an instance. It takes the information from the configuration file. Other information of an instance are taken from the live systems. - @param instance_name: name of the instance, e.g. - I{instance1.example.com} + @param inst_uuid: UUID of the instance @rtype: L{objects.Instance} @return: the instance object """ - return self._UnlockedGetInstanceInfo(instance_name) + return self._UnlockedGetInstanceInfo(inst_uuid) @locking.ssynchronized(_config_lock, shared=1) - def GetInstanceNodeGroups(self, instance_name, primary_only=False): + def GetInstanceNodeGroups(self, inst_uuid, primary_only=False): """Returns set of node group UUIDs for instance's nodes. @rtype: frozenset """ - instance = self._UnlockedGetInstanceInfo(instance_name) + instance = self._UnlockedGetInstanceInfo(inst_uuid) if not instance: - raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) + raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) if primary_only: nodes = [instance.primary_node] else: nodes = instance.all_nodes - return frozenset(self._UnlockedGetNodeInfo(node_name).group - for node_name in nodes) + return frozenset(self._UnlockedGetNodeInfo(node_uuid).group + for node_uuid in nodes) + + @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceNetworks(self, inst_uuid): + """Returns set of network UUIDs for instance's nics. + + @rtype: frozenset + + """ + instance = self._UnlockedGetInstanceInfo(inst_uuid) + if not instance: + raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) + + networks = set() + for nic in instance.nics: + if nic.network: + networks.add(nic.network) + + return frozenset(networks) + + @locking.ssynchronized(_config_lock, shared=1) + def GetMultiInstanceInfo(self, inst_uuids): + """Get the configuration of multiple instances. + + @param inst_uuids: list of instance UUIDs + @rtype: list + @return: list of tuples (instance UUID, instance_info), where + instance_info is what would GetInstanceInfo return for the + node, while keeping the original order + + """ + return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids] @locking.ssynchronized(_config_lock, shared=1) - def GetMultiInstanceInfo(self, instances): + def GetMultiInstanceInfoByName(self, inst_names): """Get the configuration of multiple instances. - @param instances: list of instance names + @param inst_names: list of instance names @rtype: list @return: list of tuples (instance, instance_info), where instance_info is what would GetInstanceInfo return for the node, while keeping the original order """ - return [(name, self._UnlockedGetInstanceInfo(name)) for name in instances] + result = [] + for name in inst_names: + instance = self._UnlockedGetInstanceInfoByName(name) + result.append((instance.uuid, instance)) + return result @locking.ssynchronized(_config_lock, shared=1) def GetAllInstancesInfo(self): @@ -1439,8 +1742,11 @@ class ConfigWriter: would GetInstanceInfo return for the node """ - my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance)) - for instance in self._UnlockedGetInstanceList()]) + return self._UnlockedGetAllInstancesInfo() + + def _UnlockedGetAllInstancesInfo(self): + my_dict = dict([(inst_uuid, self._UnlockedGetInstanceInfo(inst_uuid)) + for inst_uuid in self._UnlockedGetInstanceList()]) return my_dict @locking.ssynchronized(_config_lock, shared=1) @@ -1455,10 +1761,61 @@ class ConfigWriter: other functions and just compares instance attributes. """ - return dict((name, inst) - for (name, inst) in self._config_data.instances.items() + return dict((uuid, inst) + for (uuid, inst) in self._config_data.instances.items() if filter_fn(inst)) + @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceInfoByName(self, inst_name): + """Get the L{objects.Instance} object for a named instance. + + @param inst_name: name of the instance to get information for + @type inst_name: string + @return: the corresponding L{objects.Instance} instance or None if no + information is available + + """ + return self._UnlockedGetInstanceInfoByName(inst_name) + + def _UnlockedGetInstanceInfoByName(self, inst_name): + for inst in self._UnlockedGetAllInstancesInfo().values(): + if inst.name == inst_name: + return inst + return None + + def _UnlockedGetInstanceName(self, inst_uuid): + inst_info = self._UnlockedGetInstanceInfo(inst_uuid) + if inst_info is None: + raise errors.OpExecError("Unknown instance: %s" % inst_uuid) + return inst_info.name + + @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceName(self, inst_uuid): + """Gets the instance name for the passed instance. + + @param inst_uuid: instance UUID to get name for + @type inst_uuid: string + @rtype: string + @return: instance name + + """ + return self._UnlockedGetInstanceName(inst_uuid) + + @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceNames(self, inst_uuids): + """Gets the instance names for the passed list of nodes. + + @param inst_uuids: list of instance UUIDs to get names for + @type inst_uuids: list of strings + @rtype: list of strings + @return: list of instance names + + """ + return self._UnlockedGetInstanceNames(inst_uuids) + + def _UnlockedGetInstanceNames(self, inst_uuids): + return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids] + @locking.ssynchronized(_config_lock) def AddNode(self, node, ec_id): """Add a node to the configuration. @@ -1473,69 +1830,78 @@ class ConfigWriter: node.serial_no = 1 node.ctime = node.mtime = time.time() - self._UnlockedAddNodeToGroup(node.name, node.group) - self._config_data.nodes[node.name] = node + self._UnlockedAddNodeToGroup(node.uuid, node.group) + self._config_data.nodes[node.uuid] = node self._config_data.cluster.serial_no += 1 self._WriteConfig() @locking.ssynchronized(_config_lock) - def RemoveNode(self, node_name): + def RemoveNode(self, node_uuid): """Remove a node from the configuration. """ - logging.info("Removing node %s from configuration", node_name) + logging.info("Removing node %s from configuration", node_uuid) - if node_name not in self._config_data.nodes: - raise errors.ConfigurationError("Unknown node '%s'" % node_name) + if node_uuid not in self._config_data.nodes: + raise errors.ConfigurationError("Unknown node '%s'" % node_uuid) - self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name]) - del self._config_data.nodes[node_name] + self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_uuid]) + del self._config_data.nodes[node_uuid] self._config_data.cluster.serial_no += 1 self._WriteConfig() def ExpandNodeName(self, short_name): - """Attempt to expand an incomplete node name. + """Attempt to expand an incomplete node name into a node UUID. """ - # Locking is done in L{ConfigWriter.GetNodeList} - return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList()) + # Locking is done in L{ConfigWriter.GetAllNodesInfo} + all_nodes = self.GetAllNodesInfo().values() + expanded_name = _MatchNameComponentIgnoreCase( + short_name, [node.name for node in all_nodes]) + + if expanded_name is not None: + # there has to be exactly one node with that name + node = (filter(lambda n: n.name == expanded_name, all_nodes)[0]) + return (node.uuid, node.name) + else: + return (None, None) - def _UnlockedGetNodeInfo(self, node_name): + def _UnlockedGetNodeInfo(self, node_uuid): """Get the configuration of a node, as stored in the config. This function is for internal use, when the config lock is already held. - @param node_name: the node name, e.g. I{node1.example.com} + @param node_uuid: the node UUID @rtype: L{objects.Node} @return: the node object """ - if node_name not in self._config_data.nodes: + if node_uuid not in self._config_data.nodes: return None - return self._config_data.nodes[node_name] + return self._config_data.nodes[node_uuid] @locking.ssynchronized(_config_lock, shared=1) - def GetNodeInfo(self, node_name): + def GetNodeInfo(self, node_uuid): """Get the configuration of a node, as stored in the config. This is just a locked wrapper over L{_UnlockedGetNodeInfo}. - @param node_name: the node name, e.g. I{node1.example.com} + @param node_uuid: the node UUID @rtype: L{objects.Node} @return: the node object """ - return self._UnlockedGetNodeInfo(node_name) + return self._UnlockedGetNodeInfo(node_uuid) @locking.ssynchronized(_config_lock, shared=1) - def GetNodeInstances(self, node_name): + def GetNodeInstances(self, node_uuid): """Get the instances of a node, as stored in the config. - @param node_name: the node name, e.g. I{node1.example.com} + @param node_uuid: the node UUID @rtype: (list, list) @return: a tuple with two lists: the primary and the secondary instances @@ -1544,10 +1910,10 @@ class ConfigWriter: pri = [] sec = [] for inst in self._config_data.instances.values(): - if inst.primary_node == node_name: - pri.append(inst.name) - if node_name in inst.secondary_nodes: - sec.append(inst.name) + if inst.primary_node == node_uuid: + pri.append(inst.uuid) + if node_uuid in inst.secondary_nodes: + sec.append(inst.uuid) return (pri, sec) @locking.ssynchronized(_config_lock, shared=1) @@ -1557,7 +1923,7 @@ class ConfigWriter: @param uuid: Node group UUID @param primary_only: Whether to only consider primary nodes @rtype: frozenset - @return: List of instance names in node group + @return: List of instance UUIDs in node group """ if primary_only: @@ -1565,10 +1931,36 @@ class ConfigWriter: else: nodes_fn = lambda inst: inst.all_nodes - return frozenset(inst.name + return frozenset(inst.uuid for inst in self._config_data.instances.values() - for node_name in nodes_fn(inst) - if self._UnlockedGetNodeInfo(node_name).group == uuid) + for node_uuid in nodes_fn(inst) + if self._UnlockedGetNodeInfo(node_uuid).group == uuid) + + def _UnlockedGetHvparamsString(self, hvname): + """Return the string representation of the list of hyervisor parameters of + the given hypervisor. + + @see: C{GetHvparams} + + """ + result = "" + hvparams = self._config_data.cluster.hvparams[hvname] + for key in hvparams: + result += "%s=%s\n" % (key, hvparams[key]) + return result + + @locking.ssynchronized(_config_lock, shared=1) + def GetHvparamsString(self, hvname): + """Return the hypervisor parameters of the given hypervisor. + + @type hvname: string + @param hvname: name of a hypervisor + @rtype: string + @return: string containing key-value-pairs, one pair on each line; + format: KEY=VALUE + + """ + return self._UnlockedGetHvparamsString(hvname) def _UnlockedGetNodeList(self): """Return the list of nodes which are in the configuration. @@ -1594,7 +1986,7 @@ class ConfigWriter: """ all_nodes = [self._UnlockedGetNodeInfo(node) for node in self._UnlockedGetNodeList()] - return [node.name for node in all_nodes if not node.offline] + return [node.uuid for node in all_nodes if not node.offline] @locking.ssynchronized(_config_lock, shared=1) def GetOnlineNodeList(self): @@ -1610,7 +2002,7 @@ class ConfigWriter: """ all_nodes = [self._UnlockedGetNodeInfo(node) for node in self._UnlockedGetNodeList()] - return [node.name for node in all_nodes if node.vm_capable] + return [node.uuid for node in all_nodes if node.vm_capable] @locking.ssynchronized(_config_lock, shared=1) def GetNonVmCapableNodeList(self): @@ -1619,20 +2011,29 @@ class ConfigWriter: """ all_nodes = [self._UnlockedGetNodeInfo(node) for node in self._UnlockedGetNodeList()] - return [node.name for node in all_nodes if not node.vm_capable] + return [node.uuid for node in all_nodes if not node.vm_capable] @locking.ssynchronized(_config_lock, shared=1) - def GetMultiNodeInfo(self, nodes): + def GetMultiNodeInfo(self, node_uuids): """Get the configuration of multiple nodes. - @param nodes: list of node names + @param node_uuids: list of node UUIDs @rtype: list @return: list of tuples of (node, node_info), where node_info is what would GetNodeInfo return for the node, in the original order """ - return [(name, self._UnlockedGetNodeInfo(name)) for name in nodes] + return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids] + + def _UnlockedGetAllNodesInfo(self): + """Gets configuration of all nodes. + + @note: See L{GetAllNodesInfo} + + """ + return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid)) + for node_uuid in self._UnlockedGetNodeList()]) @locking.ssynchronized(_config_lock, shared=1) def GetAllNodesInfo(self): @@ -1645,25 +2046,73 @@ class ConfigWriter: """ return self._UnlockedGetAllNodesInfo() - def _UnlockedGetAllNodesInfo(self): - """Gets configuration of all nodes. + def _UnlockedGetNodeInfoByName(self, node_name): + for node in self._UnlockedGetAllNodesInfo().values(): + if node.name == node_name: + return node + return None - @note: See L{GetAllNodesInfo} + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeInfoByName(self, node_name): + """Get the L{objects.Node} object for a named node. + + @param node_name: name of the node to get information for + @type node_name: string + @return: the corresponding L{objects.Node} instance or None if no + information is available + + """ + return self._UnlockedGetNodeInfoByName(node_name) + + def _UnlockedGetNodeName(self, node_spec): + if isinstance(node_spec, objects.Node): + return node_spec.name + elif isinstance(node_spec, basestring): + node_info = self._UnlockedGetNodeInfo(node_spec) + if node_info is None: + raise errors.OpExecError("Unknown node: %s" % node_spec) + return node_info.name + else: + raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec) + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeName(self, node_spec): + """Gets the node name for the passed node. + + @param node_spec: node to get names for + @type node_spec: either node UUID or a L{objects.Node} object + @rtype: string + @return: node name + + """ + return self._UnlockedGetNodeName(node_spec) + + def _UnlockedGetNodeNames(self, node_specs): + return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs] + + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeNames(self, node_specs): + """Gets the node names for the passed list of nodes. + + @param node_specs: list of nodes to get names for + @type node_specs: list of either node UUIDs or L{objects.Node} objects + @rtype: list of strings + @return: list of node names """ - return dict([(node, self._UnlockedGetNodeInfo(node)) - for node in self._UnlockedGetNodeList()]) + return self._UnlockedGetNodeNames(node_specs) @locking.ssynchronized(_config_lock, shared=1) - def GetNodeGroupsFromNodes(self, nodes): + def GetNodeGroupsFromNodes(self, node_uuids): """Returns groups for a list of nodes. - @type nodes: list of string - @param nodes: List of node names + @type node_uuids: list of string + @param node_uuids: List of node UUIDs @rtype: frozenset """ - return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes) + return frozenset(self._UnlockedGetNodeInfo(uuid).group + for uuid in node_uuids) def _UnlockedGetMasterCandidateStats(self, exceptions=None): """Get the number of current and maximum desired and possible candidates. @@ -1676,7 +2125,7 @@ class ConfigWriter: """ mc_now = mc_should = mc_max = 0 for node in self._config_data.nodes.values(): - if exceptions and node.name in exceptions: + if exceptions and node.uuid in exceptions: continue if not (node.offline or node.drained) and node.master_capable: mc_max += 1 @@ -1700,26 +2149,27 @@ class ConfigWriter: return self._UnlockedGetMasterCandidateStats(exceptions) @locking.ssynchronized(_config_lock) - def MaintainCandidatePool(self, exceptions): + def MaintainCandidatePool(self, exception_node_uuids): """Try to grow the candidate pool to the desired size. - @type exceptions: list - @param exceptions: if passed, list of nodes that should be ignored + @type exception_node_uuids: list + @param exception_node_uuids: if passed, list of nodes that should be ignored @rtype: list @return: list with the adjusted nodes (L{objects.Node} instances) """ - mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions) + mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats( + exception_node_uuids) mod_list = [] if mc_now < mc_max: node_list = self._config_data.nodes.keys() random.shuffle(node_list) - for name in node_list: + for uuid in node_list: if mc_now >= mc_max: break - node = self._config_data.nodes[name] + node = self._config_data.nodes[uuid] if (node.master_candidate or node.offline or node.drained or - node.name in exceptions or not node.master_capable): + node.uuid in exception_node_uuids or not node.master_capable): continue mod_list.append(node) node.master_candidate = True @@ -1735,7 +2185,7 @@ class ConfigWriter: return mod_list - def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid): + def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid): """Add a given node to the specified group. """ @@ -1745,8 +2195,8 @@ class ConfigWriter: # the meantime. It's ok though, as we'll fail cleanly if the node group # is not found anymore. raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid) - if node_name not in self._config_data.nodegroups[nodegroup_uuid].members: - self._config_data.nodegroups[nodegroup_uuid].members.append(node_name) + if node_uuid not in self._config_data.nodegroups[nodegroup_uuid].members: + self._config_data.nodegroups[nodegroup_uuid].members.append(node_uuid) def _UnlockedRemoveNodeFromGroup(self, node): """Remove a given node from its group. @@ -1755,13 +2205,13 @@ class ConfigWriter: nodegroup = node.group if nodegroup not in self._config_data.nodegroups: logging.warning("Warning: node '%s' has unknown node group '%s'" - " (while being removed from it)", node.name, nodegroup) + " (while being removed from it)", node.uuid, nodegroup) nodegroup_obj = self._config_data.nodegroups[nodegroup] - if node.name not in nodegroup_obj.members: + if node.uuid not in nodegroup_obj.members: logging.warning("Warning: node '%s' not a member of its node group '%s'" - " (while being removed from it)", node.name, nodegroup) + " (while being removed from it)", node.uuid, nodegroup) else: - nodegroup_obj.members.remove(node.name) + nodegroup_obj.members.remove(node.uuid) @locking.ssynchronized(_config_lock) def AssignGroupNodes(self, mods): @@ -1776,17 +2226,17 @@ class ConfigWriter: resmod = [] - # Try to resolve names/UUIDs first - for (node_name, new_group_uuid) in mods: + # Try to resolve UUIDs first + for (node_uuid, new_group_uuid) in mods: try: - node = nodes[node_name] + node = nodes[node_uuid] except KeyError: - raise errors.ConfigurationError("Unable to find node '%s'" % node_name) + raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid) if node.group == new_group_uuid: # Node is being assigned to its current group logging.debug("Node '%s' was assigned to its current group (%s)", - node_name, node.group) + node_uuid, node.group) continue # Try to find current group of node @@ -1803,12 +2253,12 @@ class ConfigWriter: raise errors.ConfigurationError("Unable to find new group '%s'" % new_group_uuid) - assert node.name in old_group.members, \ + assert node.uuid in old_group.members, \ ("Inconsistent configuration: node '%s' not listed in members for its" - " old group '%s'" % (node.name, old_group.uuid)) - assert node.name not in new_group.members, \ + " old group '%s'" % (node.uuid, old_group.uuid)) + assert node.uuid not in new_group.members, \ ("Inconsistent configuration: node '%s' already listed in members for" - " its new group '%s'" % (node.name, new_group.uuid)) + " its new group '%s'" % (node.uuid, new_group.uuid)) resmod.append((node, old_group, new_group)) @@ -1820,10 +2270,10 @@ class ConfigWriter: node.group = new_group.uuid # Update members of involved groups - if node.name in old_group.members: - old_group.members.remove(node.name) - if node.name not in new_group.members: - new_group.members.append(node.name) + if node.uuid in old_group.members: + old_group.members.remove(node.uuid) + if node.uuid not in new_group.members: + new_group.members.append(node.uuid) # Update timestamps and serials (only once per node/group object) now = time.time() @@ -1850,6 +2300,9 @@ class ConfigWriter: return (self._config_data.instances.values() + self._config_data.nodes.values() + self._config_data.nodegroups.values() + + self._config_data.networks.values() + + self._AllDisks() + + self._AllNICs() + [self._config_data.cluster]) def _OpenConfig(self, accept_foreign): @@ -1871,31 +2324,36 @@ class ConfigWriter: raise errors.ConfigurationError("Incomplete configuration" " (missing cluster.rsahostkeypub)") - if data.cluster.master_node != self._my_hostname and not accept_foreign: + if not data.cluster.master_node in data.nodes: + msg = ("The configuration denotes node %s as master, but does not" + " contain information about this node" % + data.cluster.master_node) + raise errors.ConfigurationError(msg) + + master_info = data.nodes[data.cluster.master_node] + if master_info.name != self._my_hostname and not accept_foreign: msg = ("The configuration denotes node %s as master, while my" " hostname is %s; opening a foreign configuration is only" " possible in accept_foreign mode" % - (data.cluster.master_node, self._my_hostname)) + (master_info.name, self._my_hostname)) raise errors.ConfigurationError(msg) - # Upgrade configuration if needed - data.UpgradeConfig() - self._config_data = data # reset the last serial as -1 so that the next write will cause # ssconf update self._last_cluster_serial = -1 - # And finally run our (custom) config upgrade sequence + # Upgrade configuration if needed self._UpgradeConfig() self._cfg_id = utils.GetFileID(path=self._cfg_file) def _UpgradeConfig(self): - """Run upgrade steps that cannot be done purely in the objects. + """Run any upgrade steps. - This is because some data elements need uniqueness across the - whole configuration, etc. + This method performs both in-object upgrades and also update some data + elements that need uniqueness across the whole configuration or interact + with other objects. @warning: this function will call L{_WriteConfig()}, but also L{DropECReservations} so it needs to be called only from a @@ -1904,31 +2362,42 @@ class ConfigWriter: created first, to avoid causing deadlock. """ - modified = False + # Keep a copy of the persistent part of _config_data to check for changes + # Serialization doesn't guarantee order in dictionaries + oldconf = copy.deepcopy(self._config_data.ToDict()) + + # In-object upgrades + self._config_data.UpgradeConfig() + for item in self._AllUUIDObjects(): if item.uuid is None: item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID) - modified = True if not self._config_data.nodegroups: default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME default_nodegroup = objects.NodeGroup(name=default_nodegroup_name, members=[]) self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True) - modified = True for node in self._config_data.nodes.values(): if not node.group: node.group = self.LookupNodeGroup(None) - modified = True # This is technically *not* an upgrade, but needs to be done both when # nodegroups are being added, and upon normally loading the config, # because the members list of a node group is discarded upon # serializing/deserializing the object. - self._UnlockedAddNodeToGroup(node.name, node.group) + self._UnlockedAddNodeToGroup(node.uuid, node.group) + + modified = (oldconf != self._config_data.ToDict()) if modified: self._WriteConfig() # This is ok even if it acquires the internal lock, as _UpgradeConfig is # only called at config init time, without the lock held self.DropECReservations(_UPGRADE_CONFIG_JID) + else: + config_errors = self._UnlockedVerifyConfig() + if config_errors: + errmsg = ("Loaded configuration data is not consistent: %s" % + (utils.CommaJoin(config_errors))) + logging.critical(errmsg) def _DistributeConfig(self, feedback_fn): """Distribute the configuration to the other nodes. @@ -1949,11 +2418,9 @@ class ConfigWriter: # since the node list comes from _UnlocketGetNodeList, and we are # called with the lock held, so no modifications should take place # in between - for node_name in self._UnlockedGetNodeList(): - if node_name == myhostname: - continue - node_info = self._UnlockedGetNodeInfo(node_name) - if not node_info.master_candidate: + for node_uuid in self._UnlockedGetNodeList(): + node_info = self._UnlockedGetNodeInfo(node_uuid) + if node_info.name == myhostname or not node_info.master_candidate: continue node_list.append(node_info.name) addr_list.append(node_info.primary_ip) @@ -2020,7 +2487,7 @@ class ConfigWriter: if self._last_cluster_serial < self._config_data.cluster.serial_no: if not self._offline: result = self._GetRpc(None).call_write_ssconf_files( - self._UnlockedGetOnlineNodeList(), + self._UnlockedGetNodeNames(self._UnlockedGetOnlineNodeList()), self._UnlockedGetSsconfValues()) for nname, nresu in result.items(): @@ -2035,6 +2502,40 @@ class ConfigWriter: self._last_cluster_serial = self._config_data.cluster.serial_no + def _GetAllHvparamsStrings(self, hypervisors): + """Get the hvparams of all given hypervisors from the config. + + @type hypervisors: list of string + @param hypervisors: list of hypervisor names + @rtype: dict of strings + @returns: dictionary mapping the hypervisor name to a string representation + of the hypervisor's hvparams + + """ + hvparams = {} + for hv in hypervisors: + hvparams[hv] = self._UnlockedGetHvparamsString(hv) + return hvparams + + @staticmethod + def _ExtendByAllHvparamsStrings(ssconf_values, all_hvparams): + """Extends the ssconf_values dictionary by hvparams. + + @type ssconf_values: dict of strings + @param ssconf_values: dictionary mapping ssconf_keys to strings + representing the content of ssconf files + @type all_hvparams: dict of strings + @param all_hvparams: dictionary mapping hypervisor names to a string + representation of their hvparams + @rtype: same as ssconf_values + @returns: the ssconf_values dictionary extended by hvparams + + """ + for hv in all_hvparams: + ssconf_key = constants.SS_HVPARAMS_PREF + hv + ssconf_values[ssconf_key] = all_hvparams[hv] + return ssconf_values + def _UnlockedGetSsconfValues(self): """Return the values needed by ssconf. @@ -2044,19 +2545,21 @@ class ConfigWriter: """ fn = "\n".join - instance_names = utils.NiceSort(self._UnlockedGetInstanceList()) - node_names = utils.NiceSort(self._UnlockedGetNodeList()) - node_info = [self._UnlockedGetNodeInfo(name) for name in node_names] + instance_names = utils.NiceSort( + [inst.name for inst in + self._UnlockedGetAllInstancesInfo().values()]) + node_infos = self._UnlockedGetAllNodesInfo().values() + node_names = [node.name for node in node_infos] node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip) - for ninfo in node_info] + for ninfo in node_infos] node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip) - for ninfo in node_info] + for ninfo in node_infos] instance_data = fn(instance_names) - off_data = fn(node.name for node in node_info if node.offline) - on_data = fn(node.name for node in node_info if not node.offline) - mc_data = fn(node.name for node in node_info if node.master_candidate) - mc_ips_data = fn(node.primary_ip for node in node_info + off_data = fn(node.name for node in node_infos if node.offline) + on_data = fn(node.name for node in node_infos if not node.offline) + mc_data = fn(node.name for node in node_infos if node.master_candidate) + mc_ips_data = fn(node.primary_ip for node in node_infos if node.master_candidate) node_data = fn(node_names) node_pri_ips_data = fn(node_pri_ips) @@ -2066,12 +2569,16 @@ class ConfigWriter: cluster_tags = fn(cluster.GetTags()) hypervisor_list = fn(cluster.enabled_hypervisors) + all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES) uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n") nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in self._config_data.nodegroups.values()] nodegroups_data = fn(utils.NiceSort(nodegroups)) + networks = ["%s %s" % (net.uuid, net.name) for net in + self._config_data.networks.values()] + networks_data = fn(utils.NiceSort(networks)) ssconf_values = { constants.SS_CLUSTER_NAME: cluster.cluster_name, @@ -2083,7 +2590,7 @@ class ConfigWriter: constants.SS_MASTER_IP: cluster.master_ip, constants.SS_MASTER_NETDEV: cluster.master_netdev, constants.SS_MASTER_NETMASK: str(cluster.master_netmask), - constants.SS_MASTER_NODE: cluster.master_node, + constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node), constants.SS_NODE_LIST: node_data, constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, @@ -2096,7 +2603,10 @@ class ConfigWriter: constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health), constants.SS_UID_POOL: uid_pool, constants.SS_NODEGROUPS: nodegroups_data, + constants.SS_NETWORKS: networks_data, } + ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values, + all_hvparams) bad_values = [(k, v) for k, v in ssconf_values.items() if not isinstance(v, (str, basestring))] if bad_values: @@ -2169,7 +2679,7 @@ class ConfigWriter: return self._config_data.HasAnyDiskOfType(dev_type) @locking.ssynchronized(_config_lock) - def Update(self, target, feedback_fn): + def Update(self, target, feedback_fn, ec_id=None): """Notify function to be called after updates. This function must be called when an object (as returned by @@ -2197,6 +2707,8 @@ class ConfigWriter: test = target in self._config_data.instances.values() elif isinstance(target, objects.NodeGroup): test = target in self._config_data.nodegroups.values() + elif isinstance(target, objects.Network): + test = target in self._config_data.networks.values() else: raise errors.ProgrammerError("Invalid object type (%s) passed to" " ConfigWriter.Update" % type(target)) @@ -2212,7 +2724,11 @@ class ConfigWriter: self._config_data.cluster.mtime = now if isinstance(target, objects.Instance): - self._UnlockedReleaseDRBDMinors(target.name) + self._UnlockedReleaseDRBDMinors(target.uuid) + + if ec_id is not None: + # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams + self._UnlockedCommitTemporaryIps(ec_id) self._WriteConfig(feedback_fn=feedback_fn) @@ -2223,3 +2739,193 @@ class ConfigWriter: """ for rm in self._all_rms: rm.DropECReservations(ec_id) + + @locking.ssynchronized(_config_lock, shared=1) + def GetAllNetworksInfo(self): + """Get configuration info of all the networks. + + """ + return dict(self._config_data.networks) + + def _UnlockedGetNetworkList(self): + """Get the list of networks. + + This function is for internal use, when the config lock is already held. + + """ + return self._config_data.networks.keys() + + @locking.ssynchronized(_config_lock, shared=1) + def GetNetworkList(self): + """Get the list of networks. + + @return: array of networks, ex. ["main", "vlan100", "200] + + """ + return self._UnlockedGetNetworkList() + + @locking.ssynchronized(_config_lock, shared=1) + def GetNetworkNames(self): + """Get a list of network names + + """ + names = [net.name + for net in self._config_data.networks.values()] + return names + + def _UnlockedGetNetwork(self, uuid): + """Returns information about a network. + + This function is for internal use, when the config lock is already held. + + """ + if uuid not in self._config_data.networks: + return None + + return self._config_data.networks[uuid] + + @locking.ssynchronized(_config_lock, shared=1) + def GetNetwork(self, uuid): + """Returns information about a network. + + It takes the information from the configuration file. + + @param uuid: UUID of the network + + @rtype: L{objects.Network} + @return: the network object + + """ + return self._UnlockedGetNetwork(uuid) + + @locking.ssynchronized(_config_lock) + def AddNetwork(self, net, ec_id, check_uuid=True): + """Add a network to the configuration. + + @type net: L{objects.Network} + @param net: the Network object to add + @type ec_id: string + @param ec_id: unique id for the job to use when creating a missing UUID + + """ + self._UnlockedAddNetwork(net, ec_id, check_uuid) + self._WriteConfig() + + def _UnlockedAddNetwork(self, net, ec_id, check_uuid): + """Add a network to the configuration. + + """ + logging.info("Adding network %s to configuration", net.name) + + if check_uuid: + self._EnsureUUID(net, ec_id) + + net.serial_no = 1 + net.ctime = net.mtime = time.time() + self._config_data.networks[net.uuid] = net + self._config_data.cluster.serial_no += 1 + + def _UnlockedLookupNetwork(self, target): + """Lookup a network's UUID. + + @type target: string + @param target: network name or UUID + @rtype: string + @return: network UUID + @raises errors.OpPrereqError: when the target network cannot be found + + """ + if target is None: + return None + if target in self._config_data.networks: + return target + for net in self._config_data.networks.values(): + if net.name == target: + return net.uuid + raise errors.OpPrereqError("Network '%s' not found" % target, + errors.ECODE_NOENT) + + @locking.ssynchronized(_config_lock, shared=1) + def LookupNetwork(self, target): + """Lookup a network's UUID. + + This function is just a wrapper over L{_UnlockedLookupNetwork}. + + @type target: string + @param target: network name or UUID + @rtype: string + @return: network UUID + + """ + return self._UnlockedLookupNetwork(target) + + @locking.ssynchronized(_config_lock) + def RemoveNetwork(self, network_uuid): + """Remove a network from the configuration. + + @type network_uuid: string + @param network_uuid: the UUID of the network to remove + + """ + logging.info("Removing network %s from configuration", network_uuid) + + if network_uuid not in self._config_data.networks: + raise errors.ConfigurationError("Unknown network '%s'" % network_uuid) + + del self._config_data.networks[network_uuid] + self._config_data.cluster.serial_no += 1 + self._WriteConfig() + + def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid): + """Get the netparams (mode, link) of a network. + + Get a network's netparams for a given node. + + @type net_uuid: string + @param net_uuid: network uuid + @type node_uuid: string + @param node_uuid: node UUID + @rtype: dict or None + @return: netparams + + """ + node_info = self._UnlockedGetNodeInfo(node_uuid) + nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) + netparams = nodegroup_info.networks.get(net_uuid, None) + + return netparams + + @locking.ssynchronized(_config_lock, shared=1) + def GetGroupNetParams(self, net_uuid, node_uuid): + """Locking wrapper of _UnlockedGetGroupNetParams() + + """ + return self._UnlockedGetGroupNetParams(net_uuid, node_uuid) + + @locking.ssynchronized(_config_lock, shared=1) + def CheckIPInNodeGroup(self, ip, node_uuid): + """Check IP uniqueness in nodegroup. + + Check networks that are connected in the node's node group + if ip is contained in any of them. Used when creating/adding + a NIC to ensure uniqueness among nodegroups. + + @type ip: string + @param ip: ip address + @type node_uuid: string + @param node_uuid: node UUID + @rtype: (string, dict) or (None, None) + @return: (network name, netparams) + + """ + if ip is None: + return (None, None) + node_info = self._UnlockedGetNodeInfo(node_uuid) + nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) + for net_uuid in nodegroup_info.networks.keys(): + net_info = self._UnlockedGetNetwork(net_uuid) + pool = network.AddressPool(net_info) + if pool.Contains(ip): + return (net_info.name, nodegroup_info.networks[net_uuid]) + + return (None, None)