"""
import os
-import tempfile
import random
import logging
import time
raise errors.ConfigurationError("Can't generate unique DRBD secret")
return secret
- def _ComputeAllLVs(self):
+ def _AllLVs(self):
"""Compute the list of all LVs.
"""
lvnames.update(lv_list)
return lvnames
- @locking.ssynchronized(_config_lock, shared=1)
- def GenerateUniqueID(self, exceptions=None):
- """Generate an unique disk name.
+ def _AllIDs(self, include_temporary):
+ """Compute the list of all UUIDs and names we have.
+
+ @type include_temporary: boolean
+ @param include_temporary: whether to include the _temporary_ids set
+ @rtype: set
+ @return: a set of IDs
+
+ """
+ existing = set()
+ if include_temporary:
+ existing.update(self._temporary_ids)
+ existing.update(self._AllLVs())
+ existing.update(self._config_data.instances.keys())
+ existing.update(self._config_data.nodes.keys())
+ existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
+ return existing
+
+ def _GenerateUniqueID(self, exceptions=None):
+ """Generate an unique UUID.
This checks the current node, instances and disk names for
duplicates.
- @param exceptions: a list with some other names which should be checked
- for uniqueness (used for example when you want to get
- more than one id at one time without adding each one in
- turn to the config file)
+ @param exceptions: a list with some other names which should be
+ checked for uniqueness (used for example when you want to get
+ more than one id at one time without adding each one in turn
+ to the config file)
@rtype: string
@return: the unique id
"""
- existing = set()
- existing.update(self._temporary_ids)
- existing.update(self._ComputeAllLVs())
- existing.update(self._config_data.instances.keys())
- existing.update(self._config_data.nodes.keys())
+ existing = self._AllIDs(include_temporary=True)
if exceptions is not None:
existing.update(exceptions)
retries = 64
self._temporary_ids.add(unique_id)
return unique_id
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GenerateUniqueID(self, exceptions=None):
+ """Generate an unique ID.
+
+ This is just a wrapper over the unlocked version.
+
+ """
+ return self._GenerateUniqueID(exceptions=exceptions)
+
+ def _CleanupTemporaryIDs(self):
+ """Cleanups the _temporary_ids structure.
+
+ """
+ existing = self._AllIDs(include_temporary=False)
+ self._temporary_ids = self._temporary_ids - existing
+
def _AllMACs(self):
"""Return all MACs present in the config.
result.append("Master node is not a master candidate")
# master candidate checks
- mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
if mc_now < mc_max:
result.append("Not enough master candidates: actual %d, target %d" %
(mc_now, mc_max))
result.append("DRBD minor %d on node %s is assigned twice to instances"
" %s and %s" % (minor, node, instance_a, instance_b))
+ # IP checks
+ ips = { data.cluster.master_ip: ["cluster_ip"] }
+ def _helper(ip, name):
+ if ip in ips:
+ ips[ip].append(name)
+ else:
+ ips[ip] = [name]
+
+ for node in data.nodes.values():
+ _helper(node.primary_ip, "node:%s/primary" % node.name)
+ if node.secondary_ip != node.primary_ip:
+ _helper(node.secondary_ip, "node:%s/secondary" % node.name)
+
+ for ip, owners in ips.items():
+ if len(owners) > 1:
+ result.append("IP address %s is used by multiple owners: %s" %
+ (ip, ", ".join(owners)))
return result
@locking.ssynchronized(_config_lock, shared=1)
for nic in instance.nics:
if nic.mac in all_macs:
raise errors.ConfigurationError("Cannot add instance %s:"
- " MAC address '%s' already in use." % (instance.name, nic.mac))
+ " MAC address '%s' already in use." %
+ (instance.name, nic.mac))
+
+ self._EnsureUUID(instance)
instance.serial_no = 1
instance.ctime = instance.mtime = time.time()
self._temporary_macs.discard(nic.mac)
self._WriteConfig()
+ def _EnsureUUID(self, item):
+ """Ensures a given object has a valid UUID.
+
+ @param item: the instance or node to be checked
+
+ """
+ if not item.uuid:
+ item.uuid = self._GenerateUniqueID()
+ elif item.uuid in self._AllIDs(temporary=True):
+ raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
+ (item.name, item.uuid))
+
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
"""
return utils.MatchNameComponent(short_name,
- self._config_data.instances.keys())
+ self._config_data.instances.keys(),
+ case_sensitive=False)
def _UnlockedGetInstanceInfo(self, instance_name):
"""Returns information about an instance.
@param node: a Node instance
"""
- logging.info("Adding node %s to configuration" % node.name)
+ logging.info("Adding node %s to configuration", node.name)
+
+ self._EnsureUUID(node)
node.serial_no = 1
node.ctime = node.mtime = time.time()
"""Remove a node from the configuration.
"""
- logging.info("Removing node %s from configuration" % node_name)
+ logging.info("Removing node %s from configuration", node_name)
if node_name not in self._config_data.nodes:
raise errors.ConfigurationError("Unknown node '%s'" % node_name)
"""
return utils.MatchNameComponent(short_name,
- self._config_data.nodes.keys())
+ self._config_data.nodes.keys(),
+ case_sensitive=False)
def _UnlockedGetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
@type exceptions: list
@param exceptions: if passed, list of nodes that should be ignored
@rtype: tuple
- @return: tuple of (current, desired and possible)
+ @return: tuple of (current, desired and possible, possible)
"""
- mc_now = mc_max = 0
+ mc_now = mc_should = mc_max = 0
for node in self._config_data.nodes.values():
if exceptions and node.name in exceptions:
continue
mc_max += 1
if node.master_candidate:
mc_now += 1
- mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
- return (mc_now, mc_max)
+ mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
+ return (mc_now, mc_should, mc_max)
@locking.ssynchronized(_config_lock, shared=1)
def GetMasterCandidateStats(self, exceptions=None):
return self._UnlockedGetMasterCandidateStats(exceptions)
@locking.ssynchronized(_config_lock)
- def MaintainCandidatePool(self):
+ def MaintainCandidatePool(self, exceptions):
"""Try to grow the candidate pool to the desired size.
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
@rtype: list
@return: list with the adjusted nodes (L{objects.Node} instances)
"""
- mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
mod_list = []
if mc_now < mc_max:
node_list = self._config_data.nodes.keys()
if mc_now >= mc_max:
break
node = self._config_data.nodes[name]
- if node.master_candidate or node.offline or node.drained:
+ if (node.master_candidate or node.offline or node.drained or
+ node.name in exceptions):
continue
mod_list.append(node)
node.master_candidate = True
self._config_data.serial_no += 1
self._config_data.mtime = time.time()
+ def _AllUUIDObjects(self):
+ """Returns all objects with uuid attributes.
+
+ """
+ return (self._config_data.instances.values() +
+ self._config_data.nodes.values() +
+ [self._config_data.cluster])
+
def _OpenConfig(self):
"""Read the config data from disk.
"""
- f = open(self._cfg_file, 'r')
+ raw_data = utils.ReadFile(self._cfg_file)
+
try:
- try:
- data = objects.ConfigData.FromDict(serializer.Load(f.read()))
- except Exception, err:
- raise errors.ConfigurationError(err)
- finally:
- f.close()
+ data = objects.ConfigData.FromDict(serializer.Load(raw_data))
+ except Exception, err:
+ raise errors.ConfigurationError(err)
# Make sure the configuration has the right version
_ValidateConfig(data)
not hasattr(data.cluster, 'rsahostkeypub')):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
+
+ # Upgrade configuration if needed
+ data.UpgradeConfig()
+
self._config_data = data
# reset the last serial as -1 so that the next write will cause
# ssconf update
self._last_cluster_serial = -1
- def _DistributeConfig(self):
+ # And finally run our (custom) config upgrade sequence
+ self._UpgradeConfig()
+
+ def _UpgradeConfig(self):
+ """Run upgrade steps that cannot be done purely in the objects.
+
+ This is because some data elements need uniqueness across the
+ whole configuration, etc.
+
+ @warning: this function will call L{_WriteConfig()}, so it needs
+ to either be called with the lock held or from a safe place
+ (the constructor)
+
+ """
+ modified = False
+ for item in self._AllUUIDObjects():
+ if item.uuid is None:
+ item.uuid = self._GenerateUniqueID()
+ modified = True
+ if modified:
+ self._WriteConfig()
+
+ def _DistributeConfig(self, feedback_fn):
"""Distribute the configuration to the other nodes.
Currently, this only copies the configuration file. In the future,
"""
if self._offline:
return True
+
bad = False
node_list = []
result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
address_list=addr_list)
for to_node, to_result in result.items():
- msg = to_result.RemoteFailMsg()
+ msg = to_result.fail_msg
if msg:
msg = ("Copy of file %s to node %s failed: %s" %
(self._cfg_file, to_node, msg))
logging.error(msg)
+
+ if feedback_fn:
+ feedback_fn(msg)
+
bad = True
+
return not bad
- def _WriteConfig(self, destination=None):
+ def _WriteConfig(self, destination=None, feedback_fn=None):
"""Write the configuration data to persistent storage.
"""
+ assert feedback_fn is None or callable(feedback_fn)
+
+ # First, cleanup the _temporary_ids set, if an ID is now in the
+ # other objects it should be discarded to prevent unbounded growth
+ # of that structure
+ self._CleanupTemporaryIDs()
+
+ # Warn on config errors, but don't abort the save - the
+ # configuration has already been modified, and we can't revert;
+ # the best we can do is to warn the user and save as is, leaving
+ # recovery to the user
config_errors = self._UnlockedVerifyConfig()
if config_errors:
- raise errors.ConfigurationError("Configuration data is not"
- " consistent: %s" %
- (", ".join(config_errors)))
+ errmsg = ("Configuration data is not consistent: %s" %
+ (", ".join(config_errors)))
+ logging.critical(errmsg)
+ if feedback_fn:
+ feedback_fn(errmsg)
+
if destination is None:
destination = self._cfg_file
self._BumpSerialNo()
txt = serializer.Dump(self._config_data.ToDict())
- dir_name, file_name = os.path.split(destination)
- fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
- f = os.fdopen(fd, 'w')
- try:
- f.write(txt)
- os.fsync(f.fileno())
- finally:
- f.close()
- # we don't need to do os.close(fd) as f.close() did it
- os.rename(name, destination)
+
+ utils.WriteFile(destination, data=txt)
+
self.write_count += 1
# and redistribute the config file to master candidates
- self._DistributeConfig()
+ self._DistributeConfig(feedback_fn)
# Write ssconf files on all nodes (including locally)
if self._last_cluster_serial < self._config_data.cluster.serial_no:
if not self._offline:
- result = rpc.RpcRunner.call_write_ssconf_files(\
+ result = rpc.RpcRunner.call_write_ssconf_files(
self._UnlockedGetNodeList(),
self._UnlockedGetSsconfValues())
+
for nname, nresu in result.items():
- msg = nresu.RemoteFailMsg()
+ msg = nresu.fail_msg
if msg:
- logging.warning("Error while uploading ssconf files to"
- " node %s: %s", nname, msg)
+ errmsg = ("Error while uploading ssconf files to"
+ " node %s: %s" % (nname, msg))
+ logging.warning(errmsg)
+
+ if feedback_fn:
+ feedback_fn(errmsg)
+
self._last_cluster_serial = self._config_data.cluster.serial_no
def _UnlockedGetSsconfValues(self):
return self._config_data.cluster
@locking.ssynchronized(_config_lock)
- def Update(self, target):
+ def Update(self, target, feedback_fn):
"""Notify function to be called after updates.
This function must be called when an object (as returned by
@param target: an instance of either L{objects.Cluster},
L{objects.Node} or L{objects.Instance} which is existing in
the cluster
+ @param feedback_fn: Callable feedback function
"""
if self._config_data is None:
for nic in target.nics:
self._temporary_macs.discard(nic.mac)
- self._WriteConfig()
+ self._WriteConfig(feedback_fn=feedback_fn)