"""
import os
-import tempfile
import random
import logging
+import time
from ganeti import errors
from ganeti import locking
def _ValidateConfig(data):
+ """Verifies that a configuration objects looks valid.
+
+ This only verifies the version of the configuration.
+
+ @raise errors.ConfigurationError: if the version differs from what
+ we expect
+
+ """
if data.version != constants.CONFIG_VERSION:
raise errors.ConfigurationError("Cluster configuration version"
" mismatch, got %s instead of %s" %
self._cfg_file = cfg_file
self._temporary_ids = set()
self._temporary_drbds = {}
+ self._temporary_macs = set()
# Note: in order to prevent errors when resolving our name in
# _DistributeConfig, we compute it here once and reuse it; it's
# better to raise an error before starting to modify the config
# file than after it was modified
self._my_hostname = utils.HostInfo().name
+ self._last_cluster_serial = -1
self._OpenConfig()
# this method needs to be static, so that we can call it on the class
byte2 = random.randrange(0, 256)
byte3 = random.randrange(0, 256)
mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
- if mac not in all_macs:
+ if mac not in all_macs and mac not in self._temporary_macs:
break
retries -= 1
else:
raise errors.ConfigurationError("Can't generate unique MAC")
+ self._temporary_macs.add(mac)
return mac
@locking.ssynchronized(_config_lock, shared=1)
"""
all_macs = self._AllMACs()
- return mac in all_macs
+ return mac in all_macs or mac in self._temporary_macs
@locking.ssynchronized(_config_lock, shared=1)
def GenerateDRBDSecret(self):
raise errors.ConfigurationError("Can't generate unique DRBD secret")
return secret
- def _ComputeAllLVs(self):
+ def _AllLVs(self):
"""Compute the list of all LVs.
"""
lvnames.update(lv_list)
return lvnames
- @locking.ssynchronized(_config_lock, shared=1)
- def GenerateUniqueID(self, exceptions=None):
- """Generate an unique disk name.
+ def _AllIDs(self, include_temporary):
+ """Compute the list of all UUIDs and names we have.
+
+ @type include_temporary: boolean
+ @param include_temporary: whether to include the _temporary_ids set
+ @rtype: set
+ @return: a set of IDs
+
+ """
+ existing = set()
+ if include_temporary:
+ existing.update(self._temporary_ids)
+ existing.update(self._AllLVs())
+ existing.update(self._config_data.instances.keys())
+ existing.update(self._config_data.nodes.keys())
+ existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
+ return existing
+
+ def _GenerateUniqueID(self, exceptions=None):
+ """Generate an unique UUID.
This checks the current node, instances and disk names for
duplicates.
- Args:
- - exceptions: a list with some other names which should be checked
- for uniqueness (used for example when you want to get
- more than one id at one time without adding each one in
- turn to the config file
+ @param exceptions: a list with some other names which should be
+ checked for uniqueness (used for example when you want to get
+ more than one id at one time without adding each one in turn
+ to the config file)
- Returns: the unique id as a string
+ @rtype: string
+ @return: the unique id
"""
- existing = set()
- existing.update(self._temporary_ids)
- existing.update(self._ComputeAllLVs())
- existing.update(self._config_data.instances.keys())
- existing.update(self._config_data.nodes.keys())
+ existing = self._AllIDs(include_temporary=True)
if exceptions is not None:
existing.update(exceptions)
retries = 64
self._temporary_ids.add(unique_id)
return unique_id
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GenerateUniqueID(self, exceptions=None):
+ """Generate an unique ID.
+
+ This is just a wrapper over the unlocked version.
+
+ """
+ return self._GenerateUniqueID(exceptions=exceptions)
+
+ def _CleanupTemporaryIDs(self):
+ """Cleanups the _temporary_ids structure.
+
+ """
+ existing = self._AllIDs(include_temporary=False)
+ self._temporary_ids = self._temporary_ids - existing
+
def _AllMACs(self):
"""Return all MACs present in the config.
+ @rtype: list
+ @return: the list of all MACs
+
"""
result = []
for instance in self._config_data.instances.values():
def _AllDRBDSecrets(self):
"""Return all DRBD secrets present in the config.
+ @rtype: list
+ @return: the list of all DRBD secrets
+
"""
def helper(disk, result):
"""Recursively gather secrets from this disk."""
return result
- @locking.ssynchronized(_config_lock, shared=1)
- def VerifyConfig(self):
- """Stub verify function.
+ def _CheckDiskIDs(self, disk, l_ids, p_ids):
+ """Compute duplicate disk IDs
+
+ @type disk: L{objects.Disk}
+ @param disk: the disk at which to start searching
+ @type l_ids: list
+ @param l_ids: list of current logical ids
+ @type p_ids: list
+ @param p_ids: list of current physical ids
+ @rtype: list
+ @return: a list of error messages
+
+ """
+ result = []
+ if disk.logical_id is not None:
+ if disk.logical_id in l_ids:
+ result.append("duplicate logical id %s" % str(disk.logical_id))
+ else:
+ l_ids.append(disk.logical_id)
+ if disk.physical_id is not None:
+ if disk.physical_id in p_ids:
+ result.append("duplicate physical id %s" % str(disk.physical_id))
+ else:
+ p_ids.append(disk.physical_id)
+
+ if disk.children:
+ for child in disk.children:
+ result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
+ return result
+
+ def _UnlockedVerifyConfig(self):
+ """Verify function.
+
+ @rtype: list
+ @return: a list of error messages; a non-empty list signifies
+ configuration errors
+
"""
result = []
seen_macs = []
ports = {}
data = self._config_data
+ seen_lids = []
+ seen_pids = []
+
+ # global cluster checks
+ if not data.cluster.enabled_hypervisors:
+ result.append("enabled hypervisors list doesn't have any entries")
+ invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
+ if invalid_hvs:
+ result.append("enabled hypervisors contains invalid entries: %s" %
+ invalid_hvs)
+
+ if data.cluster.master_node not in data.nodes:
+ result.append("cluster has invalid primary node '%s'" %
+ data.cluster.master_node)
+
+ # per-instance checks
for instance_name in data.instances:
instance = data.instances[instance_name]
if instance.primary_node not in data.nodes:
ports[net_port] = []
ports[net_port].append((instance.name, "network port"))
+ # instance disk verify
+ for idx, disk in enumerate(instance.disks):
+ result.extend(["instance '%s' disk %d error: %s" %
+ (instance.name, idx, msg) for msg in disk.Verify()])
+ result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
+
# cluster-wide pool of free ports
- for free_port in self._config_data.cluster.tcpudp_port_pool:
+ for free_port in data.cluster.tcpudp_port_pool:
if free_port not in ports:
ports[free_port] = []
ports[free_port].append(("cluster", "port marked as free"))
# highest used tcp port check
if keys:
- if keys[-1] > self._config_data.cluster.highest_used_port:
+ if keys[-1] > data.cluster.highest_used_port:
result.append("Highest used port mismatch, saved %s, computed %s" %
- (self._config_data.cluster.highest_used_port,
- keys[-1]))
+ (data.cluster.highest_used_port, keys[-1]))
+
+ if not data.nodes[data.cluster.master_node].master_candidate:
+ result.append("Master node is not a master candidate")
+
+ # master candidate checks
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
+ if mc_now < mc_max:
+ result.append("Not enough master candidates: actual %d, target %d" %
+ (mc_now, mc_max))
+
+ # node checks
+ for node in data.nodes.values():
+ if [node.master_candidate, node.drained, node.offline].count(True) > 1:
+ result.append("Node %s state is invalid: master_candidate=%s,"
+ " drain=%s, offline=%s" %
+ (node.name, node.master_candidate, node.drain,
+ node.offline))
+
+ # drbd minors check
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ for node, minor, instance_a, instance_b in duplicates:
+ result.append("DRBD minor %d on node %s is assigned twice to instances"
+ " %s and %s" % (minor, node, instance_a, instance_b))
+
+ # IP checks
+ ips = { data.cluster.master_ip: ["cluster_ip"] }
+ def _helper(ip, name):
+ if ip in ips:
+ ips[ip].append(name)
+ else:
+ ips[ip] = [name]
+
+ for node in data.nodes.values():
+ _helper(node.primary_ip, "node:%s/primary" % node.name)
+ if node.secondary_ip != node.primary_ip:
+ _helper(node.secondary_ip, "node:%s/secondary" % node.name)
+ for ip, owners in ips.items():
+ if len(owners) > 1:
+ result.append("IP address %s is used by multiple owners: %s" %
+ (ip, ", ".join(owners)))
return result
+ @locking.ssynchronized(_config_lock, shared=1)
+ def VerifyConfig(self):
+ """Verify function.
+
+ This is just a wrapper over L{_UnlockedVerifyConfig}.
+
+ @rtype: list
+ @return: a list of error messages; a non-empty list signifies
+ configuration errors
+
+ """
+ return self._UnlockedVerifyConfig()
+
def _UnlockedSetDiskID(self, disk, node_name):
"""Convert the unique ID to the ID needed on the target nodes.
self._WriteConfig()
return port
- def _ComputeDRBDMap(self, instance):
+ def _UnlockedComputeDRBDMap(self):
"""Compute the used DRBD minor/nodes.
- Return: dictionary of node_name: dict of minor: instance_name. The
- returned dict will have all the nodes in it (even if with an empty
- list).
+ @rtype: (dict, list)
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list), and a list of duplicates; if the duplicates
+ list is not empty, the configuration is corrupted and its caller
+ should raise an exception
"""
def _AppendUsedPorts(instance_name, disk, used):
+ duplicates = []
if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
- nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
- for node, port in ((nodeA, minorA), (nodeB, minorB)):
- assert node in used, "Instance node not found in node list"
+ node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
+ for node, port in ((node_a, minor_a), (node_b, minor_b)):
+ assert node in used, ("Node '%s' of instance '%s' not found"
+ " in node list" % (node, instance_name))
if port in used[node]:
- raise errors.ProgrammerError("DRBD minor already used:"
- " %s/%s, %s/%s" %
- (node, port, instance_name,
- used[node][port]))
-
- used[node][port] = instance_name
+ duplicates.append((node, port, instance_name, used[node][port]))
+ else:
+ used[node][port] = instance_name
if disk.children:
for child in disk.children:
- _AppendUsedPorts(instance_name, child, used)
+ duplicates.extend(_AppendUsedPorts(instance_name, child, used))
+ return duplicates
+ duplicates = []
my_dict = dict((node, {}) for node in self._config_data.nodes)
- for (node, minor), instance in self._temporary_drbds.iteritems():
- my_dict[node][minor] = instance
for instance in self._config_data.instances.itervalues():
for disk in instance.disks:
- _AppendUsedPorts(instance.name, disk, my_dict)
- return my_dict
+ duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
+ for (node, minor), instance in self._temporary_drbds.iteritems():
+ if minor in my_dict[node] and my_dict[node][minor] != instance:
+ duplicates.append((node, minor, instance, my_dict[node][minor]))
+ else:
+ my_dict[node][minor] = instance
+ return my_dict, duplicates
+
+ @locking.ssynchronized(_config_lock)
+ def ComputeDRBDMap(self):
+ """Compute the used DRBD minor/nodes.
+
+ This is just a wrapper over L{_UnlockedComputeDRBDMap}.
+
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list).
+
+ """
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
+ return d_map
@locking.ssynchronized(_config_lock)
def AllocateDRBDMinor(self, nodes, instance):
multiple minors. The result is the list of minors, in the same
order as the passed nodes.
+ @type instance: string
+ @param instance: the instance for which we allocate minors
+
"""
- d_map = self._ComputeDRBDMap(instance)
+ assert isinstance(instance, basestring), \
+ "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
+
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
result = []
for nname in nodes:
ndata = d_map[nname]
minor = keys[-1] + 1
else:
minor = ffree
- result.append(minor)
+ # double-check minor against current instances
+ assert minor not in d_map[nname], \
+ ("Attempt to reuse allocated DRBD minor %d on node %s,"
+ " already allocated to instance %s" %
+ (minor, nname, d_map[nname][minor]))
ndata[minor] = instance
- assert (nname, minor) not in self._temporary_drbds, \
- "Attempt to reuse reserved DRBD minor"
- self._temporary_drbds[(nname, minor)] = instance
+ # double-check minor against reservation
+ r_key = (nname, minor)
+ assert r_key not in self._temporary_drbds, \
+ ("Attempt to reuse reserved DRBD minor %d on node %s,"
+ " reserved for instance %s" %
+ (minor, nname, self._temporary_drbds[r_key]))
+ self._temporary_drbds[r_key] = instance
+ result.append(minor)
logging.debug("Request to allocate drbd minors, input: %s, returning %s",
nodes, result)
return result
- @locking.ssynchronized(_config_lock)
- def ReleaseDRBDMinors(self, instance):
+ def _UnlockedReleaseDRBDMinors(self, instance):
"""Release temporary drbd minors allocated for a given instance.
- This should be called on both the error paths and on the success
- paths (after the instance has been added or updated).
-
@type instance: string
@param instance: the instance for which temporary minors should be
released
"""
+ assert isinstance(instance, basestring), \
+ "Invalid argument passed to ReleaseDRBDMinors"
for key, name in self._temporary_drbds.items():
if name == instance:
del self._temporary_drbds[key]
+ @locking.ssynchronized(_config_lock)
+ def ReleaseDRBDMinors(self, instance):
+ """Release temporary drbd minors allocated for a given instance.
+
+ This should be called on the error paths, on the success paths
+ it's automatically called by the ConfigWriter add and update
+ functions.
+
+ This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
+
+ @type instance: string
+ @param instance: the instance for which temporary minors should be
+ released
+
+ """
+ self._UnlockedReleaseDRBDMinors(instance)
+
@locking.ssynchronized(_config_lock, shared=1)
def GetConfigVersion(self):
"""Get the configuration version.
"""Get the hypervisor type for this cluster.
"""
- return self._config_data.cluster.default_hypervisor
+ return self._config_data.cluster.enabled_hypervisors[0]
@locking.ssynchronized(_config_lock, shared=1)
def GetHostKey(self):
"""Return the rsa hostkey from the config.
- Args: None
+ @rtype: string
+ @return: the rsa hostkey
- Returns: rsa hostkey
"""
return self._config_data.cluster.rsahostkeypub
This should be used after creating a new instance.
- Args:
- instance: the instance object
+ @type instance: L{objects.Instance}
+ @param instance: the instance object
+
"""
if not isinstance(instance, objects.Instance):
raise errors.ProgrammerError("Invalid type passed to AddInstance")
all_lvs = instance.MapLVsByNode()
logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
+ all_macs = self._AllMACs()
+ for nic in instance.nics:
+ if nic.mac in all_macs:
+ raise errors.ConfigurationError("Cannot add instance %s:"
+ " MAC address '%s' already in use." %
+ (instance.name, nic.mac))
+
+ self._EnsureUUID(instance)
+
instance.serial_no = 1
+ instance.ctime = instance.mtime = time.time()
self._config_data.instances[instance.name] = instance
self._config_data.cluster.serial_no += 1
+ self._UnlockedReleaseDRBDMinors(instance.name)
+ for nic in instance.nics:
+ self._temporary_macs.discard(nic.mac)
self._WriteConfig()
+ def _EnsureUUID(self, item):
+ """Ensures a given object has a valid UUID.
+
+ @param item: the instance or node to be checked
+
+ """
+ if not item.uuid:
+ item.uuid = self._GenerateUniqueID()
+ elif item.uuid in self._AllIDs(temporary=True):
+ raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
+ (item.name, item.uuid))
+
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
"""
- if status not in ("up", "down"):
- raise errors.ProgrammerError("Invalid status '%s' passed to"
- " ConfigWriter._SetInstanceStatus()" %
- status)
+ assert isinstance(status, bool), \
+ "Invalid status '%s' passed to SetInstanceStatus" % (status,)
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" %
instance_name)
instance = self._config_data.instances[instance_name]
- if instance.status != status:
- instance.status = status
+ if instance.admin_up != status:
+ instance.admin_up = status
instance.serial_no += 1
+ instance.mtime = time.time()
self._WriteConfig()
@locking.ssynchronized(_config_lock)
"""Mark the instance status to up in the config.
"""
- self._SetInstanceStatus(instance_name, "up")
+ self._SetInstanceStatus(instance_name, True)
@locking.ssynchronized(_config_lock)
def RemoveInstance(self, instance_name):
disk.iv_name))
self._config_data.instances[inst.name] = inst
- self._config_data.cluster.serial_no += 1
self._WriteConfig()
@locking.ssynchronized(_config_lock)
"""Mark the status of an instance to down in the configuration.
"""
- self._SetInstanceStatus(instance_name, "down")
+ self._SetInstanceStatus(instance_name, False)
def _UnlockedGetInstanceList(self):
"""Get the list of instances.
def GetInstanceList(self):
"""Get the list of instances.
- Returns:
- array of instances, ex. ['instance2.example.com','instance1.example.com']
- these contains all the instances, also the ones in Admin_down state
+ @return: array of instances, ex. ['instance2.example.com',
+ 'instance1.example.com']
"""
return self._UnlockedGetInstanceList()
"""
return utils.MatchNameComponent(short_name,
- self._config_data.instances.keys())
+ self._config_data.instances.keys(),
+ case_sensitive=False)
def _UnlockedGetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
This function is for internal use, when the config lock is already held.
@locking.ssynchronized(_config_lock, shared=1)
def GetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
- It takes the information from the configuration file. Other informations of
+ It takes the information from the configuration file. Other information of
an instance are taken from the live systems.
- Args:
- instance: name of the instance, ex instance1.example.com
+ @param instance_name: name of the instance, e.g.
+ I{instance1.example.com}
- Returns:
- the instance object
+ @rtype: L{objects.Instance}
+ @return: the instance object
"""
return self._UnlockedGetInstanceInfo(instance_name)
"""Get the configuration of all instances.
@rtype: dict
- @returns: dict of (instance, instance_info), where instance_info is what
+ @return: dict of (instance, instance_info), where instance_info is what
would GetInstanceInfo return for the node
"""
def AddNode(self, node):
"""Add a node to the configuration.
- Args:
- node: an object.Node instance
+ @type node: L{objects.Node}
+ @param node: a Node instance
"""
- logging.info("Adding node %s to configuration" % node.name)
+ logging.info("Adding node %s to configuration", node.name)
+
+ self._EnsureUUID(node)
node.serial_no = 1
+ node.ctime = node.mtime = time.time()
self._config_data.nodes[node.name] = node
self._config_data.cluster.serial_no += 1
self._WriteConfig()
"""Remove a node from the configuration.
"""
- logging.info("Removing node %s from configuration" % node_name)
+ logging.info("Removing node %s from configuration", node_name)
if node_name not in self._config_data.nodes:
raise errors.ConfigurationError("Unknown node '%s'" % node_name)
"""
return utils.MatchNameComponent(short_name,
- self._config_data.nodes.keys())
+ self._config_data.nodes.keys(),
+ case_sensitive=False)
def _UnlockedGetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
- Args: node: nodename (tuple) of the node
+ @param node_name: the node name, e.g. I{node1.example.com}
- Returns: the node object
+ @rtype: L{objects.Node}
+ @return: the node object
"""
if node_name not in self._config_data.nodes:
def GetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- Args: node: nodename (tuple) of the node
+ This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
- Returns: the node object
+ @param node_name: the node name, e.g. I{node1.example.com}
+
+ @rtype: L{objects.Node}
+ @return: the node object
"""
return self._UnlockedGetNodeInfo(node_name)
def _UnlockedGetNodeList(self):
"""Return the list of nodes which are in the configuration.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
+
+ @rtype: list
"""
return self._config_data.nodes.keys()
return self._UnlockedGetNodeList()
@locking.ssynchronized(_config_lock, shared=1)
+ def GetOnlineNodeList(self):
+ """Return the list of nodes which are online.
+
+ """
+ all_nodes = [self._UnlockedGetNodeInfo(node)
+ for node in self._UnlockedGetNodeList()]
+ return [node.name for node in all_nodes if not node.offline]
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetAllNodesInfo(self):
"""Get the configuration of all nodes.
@rtype: dict
- @returns: dict of (node, node_info), where node_info is what
+ @return: dict of (node, node_info), where node_info is what
would GetNodeInfo return for the node
"""
for node in self._UnlockedGetNodeList()])
return my_dict
+ def _UnlockedGetMasterCandidateStats(self, exceptions=None):
+ """Get the number of current and maximum desired and possible candidates.
+
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
+ @rtype: tuple
+ @return: tuple of (current, desired and possible, possible)
+
+ """
+ mc_now = mc_should = mc_max = 0
+ for node in self._config_data.nodes.values():
+ if exceptions and node.name in exceptions:
+ continue
+ if not (node.offline or node.drained):
+ mc_max += 1
+ if node.master_candidate:
+ mc_now += 1
+ mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
+ return (mc_now, mc_should, mc_max)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterCandidateStats(self, exceptions=None):
+ """Get the number of current and maximum possible candidates.
+
+ This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
+
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
+ @rtype: tuple
+ @return: tuple of (current, max)
+
+ """
+ return self._UnlockedGetMasterCandidateStats(exceptions)
+
+ @locking.ssynchronized(_config_lock)
+ def MaintainCandidatePool(self, exceptions):
+ """Try to grow the candidate pool to the desired size.
+
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
+ @rtype: list
+ @return: list with the adjusted nodes (L{objects.Node} instances)
+
+ """
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
+ mod_list = []
+ if mc_now < mc_max:
+ node_list = self._config_data.nodes.keys()
+ random.shuffle(node_list)
+ for name in node_list:
+ if mc_now >= mc_max:
+ break
+ node = self._config_data.nodes[name]
+ if (node.master_candidate or node.offline or node.drained or
+ node.name in exceptions):
+ continue
+ mod_list.append(node)
+ node.master_candidate = True
+ node.serial_no += 1
+ mc_now += 1
+ if mc_now != mc_max:
+ # this should not happen
+ logging.warning("Warning: MaintainCandidatePool didn't manage to"
+ " fill the candidate pool (%d/%d)", mc_now, mc_max)
+ if mod_list:
+ self._config_data.cluster.serial_no += 1
+ self._WriteConfig()
+
+ return mod_list
+
def _BumpSerialNo(self):
"""Bump up the serial number of the config.
"""
self._config_data.serial_no += 1
+ self._config_data.mtime = time.time()
+
+ def _AllUUIDObjects(self):
+ """Returns all objects with uuid attributes.
+
+ """
+ return (self._config_data.instances.values() +
+ self._config_data.nodes.values() +
+ [self._config_data.cluster])
def _OpenConfig(self):
"""Read the config data from disk.
- In case we already have configuration data and the config file has
- the same mtime as when we read it, we skip the parsing of the
- file, since de-serialisation could be slow.
-
"""
- f = open(self._cfg_file, 'r')
+ raw_data = utils.ReadFile(self._cfg_file)
+
try:
- try:
- data = objects.ConfigData.FromDict(serializer.Load(f.read()))
- except Exception, err:
- raise errors.ConfigurationError(err)
- finally:
- f.close()
+ data = objects.ConfigData.FromDict(serializer.Load(raw_data))
+ except Exception, err:
+ raise errors.ConfigurationError(err)
# Make sure the configuration has the right version
_ValidateConfig(data)
not hasattr(data.cluster, 'rsahostkeypub')):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
+
+ # Upgrade configuration if needed
+ data.UpgradeConfig()
+
self._config_data = data
+ # reset the last serial as -1 so that the next write will cause
+ # ssconf update
+ self._last_cluster_serial = -1
+
+ # And finally run our (custom) config upgrade sequence
+ self._UpgradeConfig()
+
+ def _UpgradeConfig(self):
+ """Run upgrade steps that cannot be done purely in the objects.
+
+ This is because some data elements need uniqueness across the
+ whole configuration, etc.
+
+ @warning: this function will call L{_WriteConfig()}, so it needs
+ to either be called with the lock held or from a safe place
+ (the constructor)
+
+ """
+ modified = False
+ for item in self._AllUUIDObjects():
+ if item.uuid is None:
+ item.uuid = self._GenerateUniqueID()
+ modified = True
+ if modified:
+ self._WriteConfig()
- def _DistributeConfig(self):
+ def _DistributeConfig(self, feedback_fn):
"""Distribute the configuration to the other nodes.
Currently, this only copies the configuration file. In the future,
"""
if self._offline:
return True
+
bad = False
- nodelist = self._UnlockedGetNodeList()
- myhostname = self._my_hostname
- try:
- nodelist.remove(myhostname)
- except ValueError:
- pass
+ node_list = []
+ addr_list = []
+ myhostname = self._my_hostname
# we can skip checking whether _UnlockedGetNodeInfo returns None
# since the node list comes from _UnlocketGetNodeList, and we are
# called with the lock held, so no modifications should take place
# in between
- address_list = [self._UnlockedGetNodeInfo(name).primary_ip
- for name in nodelist]
-
- result = rpc.RpcRunner.call_upload_file(nodelist, self._cfg_file,
- address_list=address_list)
- for node in nodelist:
- if not result[node]:
- logging.error("copy of file %s to node %s failed",
- self._cfg_file, node)
+ for node_name in self._UnlockedGetNodeList():
+ if node_name == myhostname:
+ continue
+ node_info = self._UnlockedGetNodeInfo(node_name)
+ if not node_info.master_candidate:
+ continue
+ node_list.append(node_info.name)
+ addr_list.append(node_info.primary_ip)
+
+ result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
+ address_list=addr_list)
+ for to_node, to_result in result.items():
+ msg = to_result.fail_msg
+ if msg:
+ msg = ("Copy of file %s to node %s failed: %s" %
+ (self._cfg_file, to_node, msg))
+ logging.error(msg)
+
+ if feedback_fn:
+ feedback_fn(msg)
+
bad = True
+
return not bad
- def _WriteConfig(self, destination=None):
+ def _WriteConfig(self, destination=None, feedback_fn=None):
"""Write the configuration data to persistent storage.
"""
+ assert feedback_fn is None or callable(feedback_fn)
+
+ # First, cleanup the _temporary_ids set, if an ID is now in the
+ # other objects it should be discarded to prevent unbounded growth
+ # of that structure
+ self._CleanupTemporaryIDs()
+
+ # Warn on config errors, but don't abort the save - the
+ # configuration has already been modified, and we can't revert;
+ # the best we can do is to warn the user and save as is, leaving
+ # recovery to the user
+ config_errors = self._UnlockedVerifyConfig()
+ if config_errors:
+ errmsg = ("Configuration data is not consistent: %s" %
+ (", ".join(config_errors)))
+ logging.critical(errmsg)
+ if feedback_fn:
+ feedback_fn(errmsg)
+
if destination is None:
destination = self._cfg_file
self._BumpSerialNo()
txt = serializer.Dump(self._config_data.ToDict())
- dir_name, file_name = os.path.split(destination)
- fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
- f = os.fdopen(fd, 'w')
- try:
- f.write(txt)
- os.fsync(f.fileno())
- finally:
- f.close()
- # we don't need to do os.close(fd) as f.close() did it
- os.rename(name, destination)
+
+ utils.WriteFile(destination, data=txt)
+
self.write_count += 1
- # and redistribute the config file
- self._DistributeConfig()
+ # and redistribute the config file to master candidates
+ self._DistributeConfig(feedback_fn)
# Write ssconf files on all nodes (including locally)
- rpc.RpcRunner.call_write_ssconf_files(self._UnlockedGetNodeList())
+ if self._last_cluster_serial < self._config_data.cluster.serial_no:
+ if not self._offline:
+ result = rpc.RpcRunner.call_write_ssconf_files(
+ self._UnlockedGetNodeList(),
+ self._UnlockedGetSsconfValues())
- @locking.ssynchronized(_config_lock)
- def InitConfig(self, version, cluster_config, master_node_config):
- """Create the initial cluster configuration.
+ for nname, nresu in result.items():
+ msg = nresu.fail_msg
+ if msg:
+ errmsg = ("Error while uploading ssconf files to"
+ " node %s: %s" % (nname, msg))
+ logging.warning(errmsg)
+
+ if feedback_fn:
+ feedback_fn(errmsg)
- It will contain the current node, which will also be the master
- node, and no instances.
+ self._last_cluster_serial = self._config_data.cluster.serial_no
- @type version: int
- @param version: Configuration version
- @type cluster_config: objects.Cluster
- @param cluster_config: Cluster configuration
- @type master_node_config: objects.Node
- @param master_node_config: Master node configuration
+ def _UnlockedGetSsconfValues(self):
+ """Return the values needed by ssconf.
+
+ @rtype: dict
+ @return: a dictionary with keys the ssconf names and values their
+ associated value
"""
- nodes = {
- master_node_config.name: master_node_config,
+ fn = "\n".join
+ instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
+ node_names = utils.NiceSort(self._UnlockedGetNodeList())
+ node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
+ node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
+ for ninfo in node_info]
+ node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
+ for ninfo in node_info]
+
+ instance_data = fn(instance_names)
+ off_data = fn(node.name for node in node_info if node.offline)
+ on_data = fn(node.name for node in node_info if not node.offline)
+ mc_data = fn(node.name for node in node_info if node.master_candidate)
+ mc_ips_data = fn(node.primary_ip for node in node_info
+ if node.master_candidate)
+ node_data = fn(node_names)
+ node_pri_ips_data = fn(node_pri_ips)
+ node_snd_ips_data = fn(node_snd_ips)
+
+ cluster = self._config_data.cluster
+ cluster_tags = fn(cluster.GetTags())
+ return {
+ constants.SS_CLUSTER_NAME: cluster.cluster_name,
+ constants.SS_CLUSTER_TAGS: cluster_tags,
+ constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
+ constants.SS_MASTER_CANDIDATES: mc_data,
+ constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
+ constants.SS_MASTER_IP: cluster.master_ip,
+ constants.SS_MASTER_NETDEV: cluster.master_netdev,
+ constants.SS_MASTER_NODE: cluster.master_node,
+ constants.SS_NODE_LIST: node_data,
+ constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
+ constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
+ constants.SS_OFFLINE_NODES: off_data,
+ constants.SS_ONLINE_NODES: on_data,
+ constants.SS_INSTANCE_LIST: instance_data,
+ constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
}
- self._config_data = objects.ConfigData(version=version,
- cluster=cluster_config,
- nodes=nodes,
- instances={},
- serial_no=1)
- self._WriteConfig()
-
@locking.ssynchronized(_config_lock, shared=1)
def GetVGName(self):
"""Return the volume group name.
self._WriteConfig()
@locking.ssynchronized(_config_lock, shared=1)
- def GetDefBridge(self):
- """Return the default bridge.
-
- """
- return self._config_data.cluster.default_bridge
-
- @locking.ssynchronized(_config_lock, shared=1)
def GetMACPrefix(self):
"""Return the mac prefix.
@locking.ssynchronized(_config_lock, shared=1)
def GetClusterInfo(self):
- """Returns informations about the cluster
+ """Returns information about the cluster
- Returns:
- the cluster object
+ @rtype: L{objects.Cluster}
+ @return: the cluster object
"""
return self._config_data.cluster
@locking.ssynchronized(_config_lock)
- def Update(self, target):
+ def Update(self, target, feedback_fn):
"""Notify function to be called after updates.
This function must be called when an object (as returned by
that all modified objects will be saved, but the target argument
is the one the caller wants to ensure that it's saved.
+ @param target: an instance of either L{objects.Cluster},
+ L{objects.Node} or L{objects.Instance} which is existing in
+ the cluster
+ @param feedback_fn: Callable feedback function
+
"""
if self._config_data is None:
raise errors.ProgrammerError("Configuration file not read,"
" cannot save.")
+ update_serial = False
if isinstance(target, objects.Cluster):
test = target == self._config_data.cluster
elif isinstance(target, objects.Node):
test = target in self._config_data.nodes.values()
+ update_serial = True
elif isinstance(target, objects.Instance):
test = target in self._config_data.instances.values()
else:
raise errors.ConfigurationError("Configuration updated since object"
" has been read or unknown object")
target.serial_no += 1
+ target.mtime = now = time.time()
- self._WriteConfig()
+ if update_serial:
+ # for node updates, we need to increase the cluster serial too
+ self._config_data.cluster.serial_no += 1
+ self._config_data.cluster.mtime = now
+
+ if isinstance(target, objects.Instance):
+ self._UnlockedReleaseDRBDMinors(target.name)
+ for nic in target.nics:
+ self._temporary_macs.discard(nic.mac)
+
+ self._WriteConfig(feedback_fn=feedback_fn)