X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/5d60b3bd4b31ada6c00d845d659d4938049b8635..fc8a6b8f3334c1352caafcede1e31d801fc146ba:/lib/config.py diff --git a/lib/config.py b/lib/config.py index 8917f4d..712f599 100644 --- a/lib/config.py +++ b/lib/config.py @@ -148,7 +148,7 @@ class ConfigWriter: raise errors.ConfigurationError("Can't generate unique DRBD secret") return secret - def _ComputeAllLVs(self): + def _AllLVs(self): """Compute the list of all LVs. """ @@ -159,6 +159,23 @@ class ConfigWriter: lvnames.update(lv_list) return lvnames + def _AllIDs(self, include_temporary): + """Compute the list of all UUIDs and names we have. + + @type include_temporary: boolean + @param include_temporary: whether to include the _temporary_ids set + @rtype: set + @return: a set of IDs + + """ + existing = set() + if include_temporary: + existing.update(self._temporary_ids) + existing.update(self._AllLVs()) + existing.update(self._config_data.instances.keys()) + existing.update(self._config_data.nodes.keys()) + return existing + @locking.ssynchronized(_config_lock, shared=1) def GenerateUniqueID(self, exceptions=None): """Generate an unique disk name. @@ -175,11 +192,7 @@ class ConfigWriter: @return: the unique id """ - existing = set() - existing.update(self._temporary_ids) - existing.update(self._ComputeAllLVs()) - existing.update(self._config_data.instances.keys()) - existing.update(self._config_data.nodes.keys()) + existing = self._AllIDs(include_temporary=True) if exceptions is not None: existing.update(exceptions) retries = 64 @@ -193,6 +206,13 @@ class ConfigWriter: self._temporary_ids.add(unique_id) return unique_id + def _CleanupTemporaryIDs(self): + """Cleanups the _temporary_ids structure. + + """ + existing = self._AllIDs(include_temporary=False) + self._temporary_ids = self._temporary_ids - existing + def _AllMACs(self): """Return all MACs present in the config. @@ -273,6 +293,20 @@ class ConfigWriter: data = self._config_data seen_lids = [] seen_pids = [] + + # global cluster checks + if not data.cluster.enabled_hypervisors: + result.append("enabled hypervisors list doesn't have any entries") + invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES + if invalid_hvs: + result.append("enabled hypervisors contains invalid entries: %s" % + invalid_hvs) + + if data.cluster.master_node not in data.nodes: + result.append("cluster has invalid primary node '%s'" % + data.cluster.master_node) + + # per-instance checks for instance_name in data.instances: instance = data.instances[instance_name] if instance.primary_node not in data.nodes: @@ -474,8 +508,8 @@ class ConfigWriter: def _AppendUsedPorts(instance_name, disk, used): duplicates = [] if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5: - nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5] - for node, port in ((nodeA, minorA), (nodeB, minorB)): + node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5] + for node, port in ((node_a, minor_a), (node_b, minor_b)): assert node in used, ("Node '%s' of instance '%s' not found" " in node list" % (node, instance_name)) if port in used[node]: @@ -796,7 +830,7 @@ class ConfigWriter: self._config_data.instances.keys()) def _UnlockedGetInstanceInfo(self, instance_name): - """Returns informations about an instance. + """Returns information about an instance. This function is for internal use, when the config lock is already held. @@ -808,9 +842,9 @@ class ConfigWriter: @locking.ssynchronized(_config_lock, shared=1) def GetInstanceInfo(self, instance_name): - """Returns informations about an instance. + """Returns information about an instance. - It takes the information from the configuration file. Other informations of + It takes the information from the configuration file. Other information of an instance are taken from the live systems. @param instance_name: name of the instance, e.g. @@ -945,15 +979,19 @@ class ConfigWriter: for node in self._UnlockedGetNodeList()]) return my_dict - def _UnlockedGetMasterCandidateStats(self): + def _UnlockedGetMasterCandidateStats(self, exceptions=None): """Get the number of current and maximum desired and possible candidates. + @type exceptions: list + @param exceptions: if passed, list of nodes that should be ignored @rtype: tuple @return: tuple of (current, desired and possible) """ mc_now = mc_max = 0 - for node in self._config_data.nodes.itervalues(): + for node in self._config_data.nodes.values(): + if exceptions and node.name in exceptions: + continue if not (node.offline or node.drained): mc_max += 1 if node.master_candidate: @@ -962,16 +1000,18 @@ class ConfigWriter: return (mc_now, mc_max) @locking.ssynchronized(_config_lock, shared=1) - def GetMasterCandidateStats(self): + def GetMasterCandidateStats(self, exceptions=None): """Get the number of current and maximum possible candidates. This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. + @type exceptions: list + @param exceptions: if passed, list of nodes that should be ignored @rtype: tuple @return: tuple of (current, max) """ - return self._UnlockedGetMasterCandidateStats() + return self._UnlockedGetMasterCandidateStats(exceptions) @locking.ssynchronized(_config_lock) def MaintainCandidatePool(self): @@ -1032,6 +1072,10 @@ class ConfigWriter: not hasattr(data.cluster, 'rsahostkeypub')): raise errors.ConfigurationError("Incomplete configuration" " (missing cluster.rsahostkeypub)") + + # Upgrade configuration if needed + data.UpgradeConfig() + self._config_data = data # reset the last serial as -1 so that the next write will cause # ssconf update @@ -1077,6 +1121,10 @@ class ConfigWriter: """Write the configuration data to persistent storage. """ + # first, cleanup the _temporary_ids set, if an ID is now in the + # other objects it should be discarded to prevent unbounded growth + # of that structure + self._CleanupTemporaryIDs() config_errors = self._UnlockedVerifyConfig() if config_errors: raise errors.ConfigurationError("Configuration data is not" @@ -1144,32 +1192,6 @@ class ConfigWriter: constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, } - @locking.ssynchronized(_config_lock) - def InitConfig(self, version, cluster_config, master_node_config): - """Create the initial cluster configuration. - - It will contain the current node, which will also be the master - node, and no instances. - - @type version: int - @param version: Configuration version - @type cluster_config: objects.Cluster - @param cluster_config: Cluster configuration - @type master_node_config: objects.Node - @param master_node_config: Master node configuration - - """ - nodes = { - master_node_config.name: master_node_config, - } - - self._config_data = objects.ConfigData(version=version, - cluster=cluster_config, - nodes=nodes, - instances={}, - serial_no=1) - self._WriteConfig() - @locking.ssynchronized(_config_lock, shared=1) def GetVGName(self): """Return the volume group name. @@ -1202,7 +1224,7 @@ class ConfigWriter: @locking.ssynchronized(_config_lock, shared=1) def GetClusterInfo(self): - """Returns informations about the cluster + """Returns information about the cluster @rtype: L{objects.Cluster} @return: the cluster object