X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/0389c42aadc7b929a1f984626a82f39c8481ed6d..2492231f7381ce33f2164322f799c84b2d7cceef:/lib/config.py diff --git a/lib/config.py b/lib/config.py index 6cec7c4..7d81b37 100644 --- a/lib/config.py +++ b/lib/config.py @@ -125,6 +125,13 @@ class TemporaryReservationManager: return new_resource +def _MatchNameComponentIgnoreCase(short_name, names): + """Wrapper around L{utils.text.MatchNameComponent}. + + """ + return utils.MatchNameComponent(short_name, names, case_sensitive=False) + + class ConfigWriter: """The interface to the cluster configuration. @@ -212,7 +219,7 @@ class ConfigWriter: if mac in all_macs: raise errors.ReservationError("mac already in use") else: - self._temporary_macs.Reserve(mac, ec_id) + self._temporary_macs.Reserve(ec_id, mac) @locking.ssynchronized(_config_lock, shared=1) def ReserveLV(self, lv_name, ec_id): @@ -226,7 +233,7 @@ class ConfigWriter: if lv_name in all_lvs: raise errors.ReservationError("LV already in use") else: - self._temporary_lvs.Reserve(lv_name, ec_id) + self._temporary_lvs.Reserve(ec_id, lv_name) @locking.ssynchronized(_config_lock, shared=1) def GenerateDRBDSecret(self, ec_id): @@ -367,29 +374,52 @@ class ConfigWriter: configuration errors """ + # pylint: disable-msg=R0914 result = [] seen_macs = [] ports = {} data = self._config_data + cluster = data.cluster seen_lids = [] seen_pids = [] # global cluster checks - if not data.cluster.enabled_hypervisors: + if not cluster.enabled_hypervisors: result.append("enabled hypervisors list doesn't have any entries") - invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES + invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: result.append("enabled hypervisors contains invalid entries: %s" % invalid_hvs) - missing_hvp = (set(data.cluster.enabled_hypervisors) - - set(data.cluster.hvparams.keys())) + missing_hvp = (set(cluster.enabled_hypervisors) - + set(cluster.hvparams.keys())) if missing_hvp: result.append("hypervisor parameters missing for the enabled" " hypervisor(s) %s" % utils.CommaJoin(missing_hvp)) - if data.cluster.master_node not in data.nodes: + if cluster.master_node not in data.nodes: result.append("cluster has invalid primary node '%s'" % - data.cluster.master_node) + cluster.master_node) + + def _helper(owner, attr, value, template): + try: + utils.ForceDictType(value, template) + except errors.GenericError, err: + result.append("%s has invalid %s: %s" % (owner, attr, err)) + + def _helper_nic(owner, params): + try: + objects.NIC.CheckParameterSyntax(params) + except errors.ConfigurationError, err: + result.append("%s has invalid nicparams: %s" % (owner, err)) + + # check cluster parameters + _helper("cluster", "beparams", cluster.SimpleFillBE({}), + constants.BES_PARAMETER_TYPES) + _helper("cluster", "nicparams", cluster.SimpleFillNIC({}), + constants.NICS_PARAMETER_TYPES) + _helper_nic("cluster", cluster.SimpleFillNIC({})) + _helper("cluster", "ndparams", cluster.SimpleFillND({}), + constants.NDS_PARAMETER_TYPES) # per-instance checks for instance_name in data.instances: @@ -410,6 +440,17 @@ class ConfigWriter: (instance_name, idx, nic.mac)) else: seen_macs.append(nic.mac) + if nic.nicparams: + filled = cluster.SimpleFillNIC(nic.nicparams) + owner = "instance %s nic %d" % (instance.name, idx) + _helper(owner, "nicparams", + filled, constants.NICS_PARAMETER_TYPES) + _helper_nic(owner, filled) + + # parameter checks + if instance.beparams: + _helper("instance %s" % instance.name, "beparams", + cluster.FillBE(instance), constants.BES_PARAMETER_TYPES) # gather the drbd ports for duplicate checks for dsk in instance.disks: @@ -432,7 +473,7 @@ class ConfigWriter: result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids)) # cluster-wide pool of free ports - for free_port in data.cluster.tcpudp_port_pool: + for free_port in cluster.tcpudp_port_pool: if free_port not in ports: ports[free_port] = [] ports[free_port].append(("cluster", "port marked as free")) @@ -448,11 +489,11 @@ class ConfigWriter: # highest used tcp port check if keys: - if keys[-1] > data.cluster.highest_used_port: + if keys[-1] > cluster.highest_used_port: result.append("Highest used port mismatch, saved %s, computed %s" % - (data.cluster.highest_used_port, keys[-1])) + (cluster.highest_used_port, keys[-1])) - if not data.nodes[data.cluster.master_node].master_candidate: + if not data.nodes[cluster.master_node].master_candidate: result.append("Master node is not a master candidate") # master candidate checks @@ -471,6 +512,13 @@ class ConfigWriter: " drain=%s, offline=%s" % (node.name, node.master_candidate, node.drained, node.offline)) + if node.group not in data.nodegroups: + result.append("Node '%s' has invalid group '%s'" % + (node.name, node.group)) + else: + _helper("node %s" % node.name, "ndparams", + cluster.FillND(node, data.nodegroups[node.group]), + constants.NDS_PARAMETER_TYPES) # nodegroups checks nodegroups_names = set() @@ -486,6 +534,11 @@ class ConfigWriter: result.append("duplicate node group name '%s'" % nodegroup.name) else: nodegroups_names.add(nodegroup.name) + if nodegroup.ndparams: + _helper("group %s" % nodegroup.name, "ndparams", + cluster.SimpleFillND(nodegroup.ndparams), + constants.NDS_PARAMETER_TYPES) + # drbd minors check _, duplicates = self._UnlockedComputeDRBDMap() @@ -494,13 +547,13 @@ class ConfigWriter: " %s and %s" % (minor, node, instance_a, instance_b)) # IP checks - default_nicparams = data.cluster.nicparams[constants.PP_DEFAULT] + default_nicparams = cluster.nicparams[constants.PP_DEFAULT] ips = {} def _AddIpAddress(ip, name): ips.setdefault(ip, []).append(name) - _AddIpAddress(data.cluster.master_ip, "cluster_ip") + _AddIpAddress(cluster.master_ip, "cluster_ip") for node in data.nodes.values(): _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) @@ -832,6 +885,13 @@ class ConfigWriter: return self._config_data.cluster.file_storage_dir @locking.ssynchronized(_config_lock, shared=1) + def GetSharedFileStorageDir(self): + """Get the shared file storage dir for this cluster. + + """ + return self._config_data.cluster.shared_file_storage_dir + + @locking.ssynchronized(_config_lock, shared=1) def GetHypervisorType(self): """Get the hypervisor type for this cluster. @@ -896,6 +956,16 @@ class ConfigWriter: if check_uuid: self._EnsureUUID(group, ec_id) + try: + existing_uuid = self._UnlockedLookupNodeGroup(group.name) + except errors.OpPrereqError: + pass + else: + raise errors.OpPrereqError("Desired group name '%s' already exists as a" + " node group (UUID: %s)" % + (group.name, existing_uuid), + errors.ECODE_EXISTS) + group.serial_no = 1 group.ctime = group.mtime = time.time() group.UpgradeConfig() @@ -923,8 +993,7 @@ class ConfigWriter: self._config_data.cluster.serial_no += 1 self._WriteConfig() - @locking.ssynchronized(_config_lock, shared=1) - def LookupNodeGroup(self, target): + def _UnlockedLookupNodeGroup(self, target): """Lookup a node group's UUID. @type target: string or None @@ -948,6 +1017,20 @@ class ConfigWriter: raise errors.OpPrereqError("Node group '%s' not found" % target, errors.ECODE_NOENT) + @locking.ssynchronized(_config_lock, shared=1) + def LookupNodeGroup(self, target): + """Lookup a node group's UUID. + + This function is just a wrapper over L{_UnlockedLookupNodeGroup}. + + @type target: string or None + @param target: group name or UUID or None to look for the default + @rtype: string + @return: nodegroup UUID + + """ + return self._UnlockedLookupNodeGroup(target) + def _UnlockedGetNodeGroup(self, uuid): """Lookup a node group. @@ -988,6 +1071,17 @@ class ConfigWriter: """ return self._config_data.nodegroups.keys() + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupMembersByNodes(self, nodes): + """Get nodes which are member in the same nodegroups as the given nodes. + + """ + ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group + return frozenset(member_name + for node_name in nodes + for member_name in + self._UnlockedGetNodeGroup(ngfn(node_name)).members) + @locking.ssynchronized(_config_lock) def AddInstance(self, instance, ec_id): """Add an instance to the config. @@ -1125,14 +1219,12 @@ class ConfigWriter: """ return self._UnlockedGetInstanceList() - @locking.ssynchronized(_config_lock, shared=1) def ExpandInstanceName(self, short_name): """Attempt to expand an incomplete instance name. """ - return utils.MatchNameComponent(short_name, - self._config_data.instances.keys(), - case_sensitive=False) + # Locking is done in L{ConfigWriter.GetInstanceList} + return _MatchNameComponentIgnoreCase(short_name, self.GetInstanceList()) def _UnlockedGetInstanceInfo(self, instance_name): """Returns information about an instance. @@ -1162,6 +1254,25 @@ class ConfigWriter: return self._UnlockedGetInstanceInfo(instance_name) @locking.ssynchronized(_config_lock, shared=1) + def GetInstanceNodeGroups(self, instance_name, primary_only=False): + """Returns set of node group UUIDs for instance's nodes. + + @rtype: frozenset + + """ + instance = self._UnlockedGetInstanceInfo(instance_name) + if not instance: + raise errors.ConfigurationError("Unknown instance '%s'" % instance_name) + + if primary_only: + nodes = [instance.primary_node] + else: + nodes = instance.all_nodes + + return frozenset(self._UnlockedGetNodeInfo(node_name).group + for node_name in nodes) + + @locking.ssynchronized(_config_lock, shared=1) def GetAllInstancesInfo(self): """Get the configuration of all instances. @@ -1208,14 +1319,12 @@ class ConfigWriter: self._config_data.cluster.serial_no += 1 self._WriteConfig() - @locking.ssynchronized(_config_lock, shared=1) def ExpandNodeName(self, short_name): - """Attempt to expand an incomplete instance name. + """Attempt to expand an incomplete node name. """ - return utils.MatchNameComponent(short_name, - self._config_data.nodes.keys(), - case_sensitive=False) + # Locking is done in L{ConfigWriter.GetNodeList} + return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList()) def _UnlockedGetNodeInfo(self, node_name): """Get the configuration of a node, as stored in the config. @@ -1267,6 +1376,26 @@ class ConfigWriter: sec.append(inst.name) return (pri, sec) + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupInstances(self, uuid, primary_only=False): + """Get the instances of a node group. + + @param uuid: Node group UUID + @param primary_only: Whether to only consider primary nodes + @rtype: frozenset + @return: List of instance names in node group + + """ + if primary_only: + nodes_fn = lambda inst: [inst.primary_node] + else: + nodes_fn = lambda inst: inst.all_nodes + + return frozenset(inst.name + for inst in self._config_data.instances.values() + for node_name in nodes_fn(inst) + if self._UnlockedGetNodeInfo(node_name).group == uuid) + def _UnlockedGetNodeList(self): """Return the list of nodes which are in the configuration. @@ -1331,6 +1460,17 @@ class ConfigWriter: for node in self._UnlockedGetNodeList()]) return my_dict + @locking.ssynchronized(_config_lock, shared=1) + def GetNodeGroupsFromNodes(self, nodes): + """Returns groups for a list of nodes. + + @type nodes: list of string + @param nodes: List of node names + @rtype: frozenset + + """ + return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes) + def _UnlockedGetMasterCandidateStats(self, exceptions=None): """Get the number of current and maximum desired and possible candidates. @@ -1665,10 +1805,11 @@ class ConfigWriter: self._config_data.nodegroups.values()] nodegroups_data = fn(utils.NiceSort(nodegroups)) - return { + ssconf_values = { constants.SS_CLUSTER_NAME: cluster.cluster_name, constants.SS_CLUSTER_TAGS: cluster_tags, constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, + constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir, constants.SS_MASTER_CANDIDATES: mc_data, constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, constants.SS_MASTER_IP: cluster.master_ip, @@ -1687,6 +1828,13 @@ class ConfigWriter: constants.SS_UID_POOL: uid_pool, constants.SS_NODEGROUPS: nodegroups_data, } + bad_values = [(k, v) for k, v in ssconf_values.items() + if not isinstance(v, (str, basestring))] + if bad_values: + err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values) + raise errors.ConfigurationError("Some ssconf key(s) have non-string" + " values: %s" % err) + return ssconf_values @locking.ssynchronized(_config_lock, shared=1) def GetSsconfValues(self):