#
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
"""
+# pylint: disable-msg=R0904
+# R0904: Too many public methods
+
import os
-import tempfile
import random
import logging
+import time
from ganeti import errors
from ganeti import locking
from ganeti import rpc
from ganeti import objects
from ganeti import serializer
+from ganeti import uidpool
+from ganeti import netutils
+from ganeti import runtime
+
+_config_lock = locking.SharedLock("ConfigWriter")
-_config_lock = locking.SharedLock()
+# job id used for resource management at config upgrade time
+_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
def _ValidateConfig(data):
"""
if data.version != constants.CONFIG_VERSION:
- raise errors.ConfigurationError("Cluster configuration version"
- " mismatch, got %s instead of %s" %
- (data.version,
- constants.CONFIG_VERSION))
+ raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, data.version)
+
+
+class TemporaryReservationManager:
+ """A temporary resource reservation manager.
+
+ This is used to reserve resources in a job, before using them, making sure
+ other jobs cannot get them in the meantime.
+
+ """
+ def __init__(self):
+ self._ec_reserved = {}
+
+ def Reserved(self, resource):
+ for holder_reserved in self._ec_reserved.values():
+ if resource in holder_reserved:
+ return True
+ return False
+
+ def Reserve(self, ec_id, resource):
+ if self.Reserved(resource):
+ raise errors.ReservationError("Duplicate reservation for resource '%s'"
+ % str(resource))
+ if ec_id not in self._ec_reserved:
+ self._ec_reserved[ec_id] = set([resource])
+ else:
+ self._ec_reserved[ec_id].add(resource)
+
+ def DropECReservations(self, ec_id):
+ if ec_id in self._ec_reserved:
+ del self._ec_reserved[ec_id]
+
+ def GetReserved(self):
+ all_reserved = set()
+ for holder_reserved in self._ec_reserved.values():
+ all_reserved.update(holder_reserved)
+ return all_reserved
+
+ def Generate(self, existing, generate_one_fn, ec_id):
+ """Generate a new resource of this type
+
+ """
+ assert callable(generate_one_fn)
+
+ all_elems = self.GetReserved()
+ all_elems.update(existing)
+ retries = 64
+ while retries > 0:
+ new_resource = generate_one_fn()
+ if new_resource is not None and new_resource not in all_elems:
+ break
+ else:
+ raise errors.ConfigurationError("Not able generate new resource"
+ " (last tried: %s)" % new_resource)
+ self.Reserve(ec_id, new_resource)
+ return new_resource
class ConfigWriter:
"""The interface to the cluster configuration.
+ @ivar _temporary_lvs: reservation manager for temporary LVs
+ @ivar _all_rms: a list of all temporary reservation managers
+
"""
- def __init__(self, cfg_file=None, offline=False):
+ def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
+ accept_foreign=False):
self.write_count = 0
self._lock = _config_lock
self._config_data = None
self._cfg_file = constants.CLUSTER_CONF_FILE
else:
self._cfg_file = cfg_file
- self._temporary_ids = set()
+ self._getents = _getents
+ self._temporary_ids = TemporaryReservationManager()
self._temporary_drbds = {}
- self._temporary_macs = set()
+ self._temporary_macs = TemporaryReservationManager()
+ self._temporary_secrets = TemporaryReservationManager()
+ self._temporary_lvs = TemporaryReservationManager()
+ self._all_rms = [self._temporary_ids, self._temporary_macs,
+ self._temporary_secrets, self._temporary_lvs]
# Note: in order to prevent errors when resolving our name in
# _DistributeConfig, we compute it here once and reuse it; it's
# better to raise an error before starting to modify the config
# file than after it was modified
- self._my_hostname = utils.HostInfo().name
+ self._my_hostname = netutils.Hostname.GetSysName()
self._last_cluster_serial = -1
- self._OpenConfig()
+ self._cfg_id = None
+ self._OpenConfig(accept_foreign)
# this method needs to be static, so that we can call it on the class
@staticmethod
"""
return os.path.exists(constants.CLUSTER_CONF_FILE)
+ def _GenerateOneMAC(self):
+ """Generate one mac address
+
+ """
+ prefix = self._config_data.cluster.mac_prefix
+ byte1 = random.randrange(0, 256)
+ byte2 = random.randrange(0, 256)
+ byte3 = random.randrange(0, 256)
+ mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
+ return mac
+
@locking.ssynchronized(_config_lock, shared=1)
- def GenerateMAC(self):
+ def GenerateMAC(self, ec_id):
"""Generate a MAC for an instance.
This should check the current instances for duplicates.
"""
- prefix = self._config_data.cluster.mac_prefix
- all_macs = self._AllMACs()
- retries = 64
- while retries > 0:
- byte1 = random.randrange(0, 256)
- byte2 = random.randrange(0, 256)
- byte3 = random.randrange(0, 256)
- mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
- if mac not in all_macs and mac not in self._temporary_macs:
- break
- retries -= 1
- else:
- raise errors.ConfigurationError("Can't generate unique MAC")
- self._temporary_macs.add(mac)
- return mac
+ existing = self._AllMACs()
+ return self._temporary_ids.Generate(existing, self._GenerateOneMAC, ec_id)
@locking.ssynchronized(_config_lock, shared=1)
- def IsMacInUse(self, mac):
- """Predicate: check if the specified MAC is in use in the Ganeti cluster.
+ def ReserveMAC(self, mac, ec_id):
+ """Reserve a MAC for an instance.
This only checks instances managed by this cluster, it does not
check for potential collisions elsewhere.
"""
all_macs = self._AllMACs()
- return mac in all_macs or mac in self._temporary_macs
+ if mac in all_macs:
+ raise errors.ReservationError("mac already in use")
+ else:
+ self._temporary_macs.Reserve(mac, ec_id)
@locking.ssynchronized(_config_lock, shared=1)
- def GenerateDRBDSecret(self):
+ def ReserveLV(self, lv_name, ec_id):
+ """Reserve an VG/LV pair for an instance.
+
+ @type lv_name: string
+ @param lv_name: the logical volume name to reserve
+
+ """
+ all_lvs = self._AllLVs()
+ if lv_name in all_lvs:
+ raise errors.ReservationError("LV already in use")
+ else:
+ self._temporary_lvs.Reserve(lv_name, ec_id)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GenerateDRBDSecret(self, ec_id):
"""Generate a DRBD secret.
This checks the current disks for duplicates.
"""
- all_secrets = self._AllDRBDSecrets()
- retries = 64
- while retries > 0:
- secret = utils.GenerateSecret()
- if secret not in all_secrets:
- break
- retries -= 1
- else:
- raise errors.ConfigurationError("Can't generate unique DRBD secret")
- return secret
+ return self._temporary_secrets.Generate(self._AllDRBDSecrets(),
+ utils.GenerateSecret,
+ ec_id)
def _AllLVs(self):
"""Compute the list of all LVs.
"""
existing = set()
if include_temporary:
- existing.update(self._temporary_ids)
+ existing.update(self._temporary_ids.GetReserved())
existing.update(self._AllLVs())
existing.update(self._config_data.instances.keys())
existing.update(self._config_data.nodes.keys())
+ existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
return existing
- @locking.ssynchronized(_config_lock, shared=1)
- def GenerateUniqueID(self, exceptions=None):
- """Generate an unique disk name.
+ def _GenerateUniqueID(self, ec_id):
+ """Generate an unique UUID.
This checks the current node, instances and disk names for
duplicates.
- @param exceptions: a list with some other names which should be checked
- for uniqueness (used for example when you want to get
- more than one id at one time without adding each one in
- turn to the config file)
-
@rtype: string
@return: the unique id
"""
- existing = self._AllIDs(include_temporary=True)
- if exceptions is not None:
- existing.update(exceptions)
- retries = 64
- while retries > 0:
- unique_id = utils.NewUUID()
- if unique_id not in existing and unique_id is not None:
- break
- else:
- raise errors.ConfigurationError("Not able generate an unique ID"
- " (last tried ID: %s" % unique_id)
- self._temporary_ids.add(unique_id)
- return unique_id
+ existing = self._AllIDs(include_temporary=False)
+ return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GenerateUniqueID(self, ec_id):
+ """Generate an unique ID.
+
+ This is just a wrapper over the unlocked version.
+
+ @type ec_id: string
+ @param ec_id: unique id for the job to reserve the id to
+
+ """
+ return self._GenerateUniqueID(ec_id)
def _AllMACs(self):
"""Return all MACs present in the config.
if invalid_hvs:
result.append("enabled hypervisors contains invalid entries: %s" %
invalid_hvs)
+ missing_hvp = (set(data.cluster.enabled_hypervisors) -
+ set(data.cluster.hvparams.keys()))
+ if missing_hvp:
+ result.append("hypervisor parameters missing for the enabled"
+ " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
if data.cluster.master_node not in data.nodes:
result.append("cluster has invalid primary node '%s'" %
# per-instance checks
for instance_name in data.instances:
instance = data.instances[instance_name]
+ if instance.name != instance_name:
+ result.append("instance '%s' is indexed by wrong name '%s'" %
+ (instance.name, instance_name))
if instance.primary_node not in data.nodes:
result.append("instance '%s' has invalid primary node '%s'" %
(instance_name, instance.primary_node))
for pnum in keys:
pdata = ports[pnum]
if len(pdata) > 1:
- txt = ", ".join(["%s/%s" % val for val in pdata])
+ txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
# highest used tcp port check
result.append("Master node is not a master candidate")
# master candidate checks
- mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
if mc_now < mc_max:
result.append("Not enough master candidates: actual %d, target %d" %
(mc_now, mc_max))
# node checks
- for node in data.nodes.values():
+ for node_name, node in data.nodes.items():
+ if node.name != node_name:
+ result.append("Node '%s' is indexed by wrong name '%s'" %
+ (node.name, node_name))
if [node.master_candidate, node.drained, node.offline].count(True) > 1:
result.append("Node %s state is invalid: master_candidate=%s,"
" drain=%s, offline=%s" %
- (node.name, node.master_candidate, node.drain,
+ (node.name, node.master_candidate, node.drained,
node.offline))
+ # nodegroups checks
+ nodegroups_names = set()
+ for nodegroup_uuid in data.nodegroups:
+ nodegroup = data.nodegroups[nodegroup_uuid]
+ if nodegroup.uuid != nodegroup_uuid:
+ result.append("nodegroup '%s' (uuid: '%s') indexed by wrong uuid '%s'"
+ % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
+ if utils.UUID_RE.match(nodegroup.name.lower()):
+ result.append("nodegroup '%s' (uuid: '%s') has uuid-like name" %
+ (nodegroup.name, nodegroup.uuid))
+ if nodegroup.name in nodegroups_names:
+ result.append("duplicate nodegroup name '%s'" % nodegroup.name)
+ else:
+ nodegroups_names.add(nodegroup.name)
+
# drbd minors check
- d_map, duplicates = self._UnlockedComputeDRBDMap()
+ _, duplicates = self._UnlockedComputeDRBDMap()
for node, minor, instance_a, instance_b in duplicates:
result.append("DRBD minor %d on node %s is assigned twice to instances"
" %s and %s" % (minor, node, instance_a, instance_b))
+ # IP checks
+ default_nicparams = data.cluster.nicparams[constants.PP_DEFAULT]
+ ips = {}
+
+ def _AddIpAddress(ip, name):
+ ips.setdefault(ip, []).append(name)
+
+ _AddIpAddress(data.cluster.master_ip, "cluster_ip")
+
+ for node in data.nodes.values():
+ _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
+ if node.secondary_ip != node.primary_ip:
+ _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
+
+ for instance in data.instances.values():
+ for idx, nic in enumerate(instance.nics):
+ if nic.ip is None:
+ continue
+
+ nicparams = objects.FillDict(default_nicparams, nic.nicparams)
+ nic_mode = nicparams[constants.NIC_MODE]
+ nic_link = nicparams[constants.NIC_LINK]
+
+ if nic_mode == constants.NIC_MODE_BRIDGED:
+ link = "bridge:%s" % nic_link
+ elif nic_mode == constants.NIC_MODE_ROUTED:
+ link = "route:%s" % nic_link
+ else:
+ raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
+
+ _AddIpAddress("%s/%s" % (link, nic.ip),
+ "instance:%s/nic:%d" % (instance.name, idx))
+
+ for ip, owners in ips.items():
+ if len(owners) > 1:
+ result.append("IP address %s is used by multiple owners: %s" %
+ (ip, utils.CommaJoin(owners)))
+
return result
@locking.ssynchronized(_config_lock, shared=1)
"""Get the hypervisor type for this cluster.
"""
- return self._config_data.cluster.default_hypervisor
+ return self._config_data.cluster.enabled_hypervisors[0]
@locking.ssynchronized(_config_lock, shared=1)
def GetHostKey(self):
"""
return self._config_data.cluster.rsahostkeypub
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetDefaultIAllocator(self):
+ """Get the default instance allocator for this cluster.
+
+ """
+ return self._config_data.cluster.default_iallocator
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetPrimaryIPFamily(self):
+ """Get cluster primary ip family.
+
+ @return: primary ip family
+
+ """
+ return self._config_data.cluster.primary_ip_family
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def LookupNodeGroup(self, target):
+ """Lookup a node group's UUID.
+
+ @type target: string or None
+ @param target: group name or UUID or None to look for the default
+ @rtype: string
+ @return: nodegroup UUID
+ @raises errors.OpPrereqError: when the target group cannot be found
+
+ """
+ if target is None:
+ if len(self._config_data.nodegroups) != 1:
+ raise errors.OpPrereqError("More than one nodegroup exists. Target"
+ " group must be specified explicitely.")
+ else:
+ return self._config_data.nodegroups.keys()[0]
+ if target in self._config_data.nodegroups:
+ return target
+ for nodegroup in self._config_data.nodegroups.values():
+ if nodegroup.name == target:
+ return nodegroup.uuid
+ raise errors.OpPrereqError("Nodegroup '%s' not found" % target)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetNodeGroup(self, uuid):
+ """Lookup a node group.
+
+ @type uuid: string
+ @param uuid: group UUID
+ @rtype: L{objects.NodeGroup} or None
+ @return: nodegroup object, or None if not found
+
+ """
+ if uuid not in self._config_data.nodegroups:
+ return None
+
+ return self._config_data.nodegroups[uuid]
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetAllNodeGroupsInfo(self):
+ """Get the configuration of all node groups.
+
+ """
+ return dict(self._config_data.nodegroups)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetNodeGroupList(self):
+ """Get a list of node groups.
+
+ """
+ return self._config_data.nodegroups.keys()
+
@locking.ssynchronized(_config_lock)
- def AddInstance(self, instance):
+ def AddInstance(self, instance, ec_id):
"""Add an instance to the config.
This should be used after creating a new instance.
for nic in instance.nics:
if nic.mac in all_macs:
raise errors.ConfigurationError("Cannot add instance %s:"
- " MAC address '%s' already in use." % (instance.name, nic.mac))
+ " MAC address '%s' already in use." %
+ (instance.name, nic.mac))
+
+ self._EnsureUUID(instance, ec_id)
instance.serial_no = 1
+ instance.ctime = instance.mtime = time.time()
self._config_data.instances[instance.name] = instance
self._config_data.cluster.serial_no += 1
self._UnlockedReleaseDRBDMinors(instance.name)
- for nic in instance.nics:
- self._temporary_macs.discard(nic.mac)
self._WriteConfig()
+ def _EnsureUUID(self, item, ec_id):
+ """Ensures a given object has a valid UUID.
+
+ @param item: the instance or node to be checked
+ @param ec_id: the execution context id for the uuid reservation
+
+ """
+ if not item.uuid:
+ item.uuid = self._GenerateUniqueID(ec_id)
+ elif item.uuid in self._AllIDs(include_temporary=True):
+ raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
+ " in use" % (item.name, item.uuid))
+
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
if instance.admin_up != status:
instance.admin_up = status
instance.serial_no += 1
+ instance.mtime = time.time()
self._WriteConfig()
@locking.ssynchronized(_config_lock)
if disk.dev_type == constants.LD_FILE:
# rename the file paths in logical and physical id
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
+ disk_fname = "disk%s" % disk.iv_name.split("/")[1]
disk.physical_id = disk.logical_id = (disk.logical_id[0],
- os.path.join(file_storage_dir,
- inst.name,
- disk.iv_name))
+ utils.PathJoin(file_storage_dir,
+ inst.name,
+ disk_fname))
+
+ # Force update of ssconf files
+ self._config_data.cluster.serial_no += 1
self._config_data.instances[inst.name] = inst
self._WriteConfig()
"""
return utils.MatchNameComponent(short_name,
- self._config_data.instances.keys())
+ self._config_data.instances.keys(),
+ case_sensitive=False)
def _UnlockedGetInstanceInfo(self, instance_name):
"""Returns information about an instance.
return my_dict
@locking.ssynchronized(_config_lock)
- def AddNode(self, node):
+ def AddNode(self, node, ec_id):
"""Add a node to the configuration.
@type node: L{objects.Node}
@param node: a Node instance
"""
- logging.info("Adding node %s to configuration" % node.name)
+ logging.info("Adding node %s to configuration", node.name)
+
+ self._EnsureUUID(node, ec_id)
node.serial_no = 1
+ node.ctime = node.mtime = time.time()
+ self._UnlockedAddNodeToGroup(node.name, node.group)
self._config_data.nodes[node.name] = node
self._config_data.cluster.serial_no += 1
self._WriteConfig()
"""Remove a node from the configuration.
"""
- logging.info("Removing node %s from configuration" % node_name)
+ logging.info("Removing node %s from configuration", node_name)
if node_name not in self._config_data.nodes:
raise errors.ConfigurationError("Unknown node '%s'" % node_name)
+ self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
del self._config_data.nodes[node_name]
self._config_data.cluster.serial_no += 1
self._WriteConfig()
"""
return utils.MatchNameComponent(short_name,
- self._config_data.nodes.keys())
+ self._config_data.nodes.keys(),
+ case_sensitive=False)
def _UnlockedGetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
return self._config_data.nodes[node_name]
-
@locking.ssynchronized(_config_lock, shared=1)
def GetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
"""
return self._UnlockedGetNodeInfo(node_name)
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetNodeInstances(self, node_name):
+ """Get the instances of a node, as stored in the config.
+
+ @param node_name: the node name, e.g. I{node1.example.com}
+
+ @rtype: (list, list)
+ @return: a tuple with two lists: the primary and the secondary instances
+
+ """
+ pri = []
+ sec = []
+ for inst in self._config_data.instances.values():
+ if inst.primary_node == node_name:
+ pri.append(inst.name)
+ if node_name in inst.secondary_nodes:
+ sec.append(inst.name)
+ return (pri, sec)
+
def _UnlockedGetNodeList(self):
"""Return the list of nodes which are in the configuration.
"""
return self._config_data.nodes.keys()
-
@locking.ssynchronized(_config_lock, shared=1)
def GetNodeList(self):
"""Return the list of nodes which are in the configuration.
"""
return self._UnlockedGetNodeList()
+ def _UnlockedGetOnlineNodeList(self):
+ """Return the list of nodes which are online.
+
+ """
+ all_nodes = [self._UnlockedGetNodeInfo(node)
+ for node in self._UnlockedGetNodeList()]
+ return [node.name for node in all_nodes if not node.offline]
+
@locking.ssynchronized(_config_lock, shared=1)
def GetOnlineNodeList(self):
"""Return the list of nodes which are online.
"""
+ return self._UnlockedGetOnlineNodeList()
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetNonVmCapableNodeList(self):
+ """Return the list of nodes which are not vm capable.
+
+ """
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
- return [node.name for node in all_nodes if not node.offline]
+ return [node.name for node in all_nodes if not node.vm_capable]
@locking.ssynchronized(_config_lock, shared=1)
def GetAllNodesInfo(self):
@type exceptions: list
@param exceptions: if passed, list of nodes that should be ignored
@rtype: tuple
- @return: tuple of (current, desired and possible)
+ @return: tuple of (current, desired and possible, possible)
"""
- mc_now = mc_max = 0
+ mc_now = mc_should = mc_max = 0
for node in self._config_data.nodes.values():
if exceptions and node.name in exceptions:
continue
- if not (node.offline or node.drained):
+ if not (node.offline or node.drained) and node.master_capable:
mc_max += 1
if node.master_candidate:
mc_now += 1
- mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
- return (mc_now, mc_max)
+ mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
+ return (mc_now, mc_should, mc_max)
@locking.ssynchronized(_config_lock, shared=1)
def GetMasterCandidateStats(self, exceptions=None):
return self._UnlockedGetMasterCandidateStats(exceptions)
@locking.ssynchronized(_config_lock)
- def MaintainCandidatePool(self):
+ def MaintainCandidatePool(self, exceptions):
"""Try to grow the candidate pool to the desired size.
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
@rtype: list
@return: list with the adjusted nodes (L{objects.Node} instances)
"""
- mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
mod_list = []
if mc_now < mc_max:
node_list = self._config_data.nodes.keys()
if mc_now >= mc_max:
break
node = self._config_data.nodes[name]
- if node.master_candidate or node.offline or node.drained:
+ if (node.master_candidate or node.offline or node.drained or
+ node.name in exceptions or not node.master_capable):
continue
mod_list.append(node)
node.master_candidate = True
return mod_list
+ def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid):
+ """Add a given node to the specified group.
+
+ """
+ if nodegroup_uuid not in self._config_data.nodegroups:
+ # This can happen if a node group gets deleted between its lookup and
+ # when we're adding the first node to it, since we don't keep a lock in
+ # the meantime. It's ok though, as we'll fail cleanly if the node group
+ # is not found anymore.
+ raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
+ if node_name not in self._config_data.nodegroups[nodegroup_uuid].members:
+ self._config_data.nodegroups[nodegroup_uuid].members.append(node_name)
+
+ def _UnlockedRemoveNodeFromGroup(self, node):
+ """Remove a given node from its group.
+
+ """
+ nodegroup = node.group
+ if nodegroup not in self._config_data.nodegroups:
+ logging.warning("Warning: node '%s' has unknown node group '%s'"
+ " (while being removed from it)", node.name, nodegroup)
+ nodegroup_obj = self._config_data.nodegroups[nodegroup]
+ if node.name not in nodegroup_obj.members:
+ logging.warning("Warning: node '%s' not a member of its node group '%s'"
+ " (while being removed from it)", node.name, nodegroup)
+ else:
+ nodegroup_obj.members.remove(node.name)
+
def _BumpSerialNo(self):
"""Bump up the serial number of the config.
"""
self._config_data.serial_no += 1
+ self._config_data.mtime = time.time()
+
+ def _AllUUIDObjects(self):
+ """Returns all objects with uuid attributes.
- def _OpenConfig(self):
+ """
+ return (self._config_data.instances.values() +
+ self._config_data.nodes.values() +
+ self._config_data.nodegroups.values() +
+ [self._config_data.cluster])
+
+ def _OpenConfig(self, accept_foreign):
"""Read the config data from disk.
"""
- f = open(self._cfg_file, 'r')
+ raw_data = utils.ReadFile(self._cfg_file)
+
try:
- try:
- data = objects.ConfigData.FromDict(serializer.Load(f.read()))
- except Exception, err:
- raise errors.ConfigurationError(err)
- finally:
- f.close()
+ data = objects.ConfigData.FromDict(serializer.Load(raw_data))
+ except Exception, err:
+ raise errors.ConfigurationError(err)
# Make sure the configuration has the right version
_ValidateConfig(data)
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
+ if data.cluster.master_node != self._my_hostname and not accept_foreign:
+ msg = ("The configuration denotes node %s as master, while my"
+ " hostname is %s; opening a foreign configuration is only"
+ " possible in accept_foreign mode" %
+ (data.cluster.master_node, self._my_hostname))
+ raise errors.ConfigurationError(msg)
+
# Upgrade configuration if needed
data.UpgradeConfig()
# ssconf update
self._last_cluster_serial = -1
- def _DistributeConfig(self):
+ # And finally run our (custom) config upgrade sequence
+ self._UpgradeConfig()
+
+ self._cfg_id = utils.GetFileID(path=self._cfg_file)
+
+ def _UpgradeConfig(self):
+ """Run upgrade steps that cannot be done purely in the objects.
+
+ This is because some data elements need uniqueness across the
+ whole configuration, etc.
+
+ @warning: this function will call L{_WriteConfig()}, but also
+ L{DropECReservations} so it needs to be called only from a
+ "safe" place (the constructor). If one wanted to call it with
+ the lock held, a DropECReservationUnlocked would need to be
+ created first, to avoid causing deadlock.
+
+ """
+ modified = False
+ for item in self._AllUUIDObjects():
+ if item.uuid is None:
+ item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
+ modified = True
+ if not self._config_data.nodegroups:
+ default_nodegroup_uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
+ default_nodegroup = objects.NodeGroup(
+ uuid=default_nodegroup_uuid,
+ name="default",
+ members=[],
+ )
+ self._config_data.nodegroups[default_nodegroup_uuid] = default_nodegroup
+ modified = True
+ for node in self._config_data.nodes.values():
+ if not node.group:
+ node.group = self.LookupNodeGroup(None)
+ modified = True
+ # This is technically *not* an upgrade, but needs to be done both when
+ # nodegroups are being added, and upon normally loading the config,
+ # because the members list of a node group is discarded upon
+ # serializing/deserializing the object.
+ self._UnlockedAddNodeToGroup(node.name, node.group)
+ if modified:
+ self._WriteConfig()
+ # This is ok even if it acquires the internal lock, as _UpgradeConfig is
+ # only called at config init time, without the lock held
+ self.DropECReservations(_UPGRADE_CONFIG_JID)
+
+ def _DistributeConfig(self, feedback_fn):
"""Distribute the configuration to the other nodes.
Currently, this only copies the configuration file. In the future,
"""
if self._offline:
return True
+
bad = False
node_list = []
result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
address_list=addr_list)
- for node in node_list:
- if not result[node]:
- logging.error("copy of file %s to node %s failed",
- self._cfg_file, node)
+ for to_node, to_result in result.items():
+ msg = to_result.fail_msg
+ if msg:
+ msg = ("Copy of file %s to node %s failed: %s" %
+ (self._cfg_file, to_node, msg))
+ logging.error(msg)
+
+ if feedback_fn:
+ feedback_fn(msg)
+
bad = True
+
return not bad
- def _WriteConfig(self, destination=None):
+ def _WriteConfig(self, destination=None, feedback_fn=None):
"""Write the configuration data to persistent storage.
"""
+ assert feedback_fn is None or callable(feedback_fn)
+
+ # Warn on config errors, but don't abort the save - the
+ # configuration has already been modified, and we can't revert;
+ # the best we can do is to warn the user and save as is, leaving
+ # recovery to the user
config_errors = self._UnlockedVerifyConfig()
if config_errors:
- raise errors.ConfigurationError("Configuration data is not"
- " consistent: %s" %
- (", ".join(config_errors)))
+ errmsg = ("Configuration data is not consistent: %s" %
+ (utils.CommaJoin(config_errors)))
+ logging.critical(errmsg)
+ if feedback_fn:
+ feedback_fn(errmsg)
+
if destination is None:
destination = self._cfg_file
self._BumpSerialNo()
txt = serializer.Dump(self._config_data.ToDict())
- dir_name, file_name = os.path.split(destination)
- fd, name = tempfile.mkstemp('.newconfig', file_name, dir_name)
- f = os.fdopen(fd, 'w')
+
+ getents = self._getents()
try:
- f.write(txt)
- os.fsync(f.fileno())
+ fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
+ close=False, gid=getents.confd_gid, mode=0640)
+ except errors.LockError:
+ raise errors.ConfigurationError("The configuration file has been"
+ " modified since the last write, cannot"
+ " update")
+ try:
+ self._cfg_id = utils.GetFileID(fd=fd)
finally:
- f.close()
- # we don't need to do os.close(fd) as f.close() did it
- os.rename(name, destination)
+ os.close(fd)
+
self.write_count += 1
# and redistribute the config file to master candidates
- self._DistributeConfig()
+ self._DistributeConfig(feedback_fn)
# Write ssconf files on all nodes (including locally)
if self._last_cluster_serial < self._config_data.cluster.serial_no:
if not self._offline:
- rpc.RpcRunner.call_write_ssconf_files(self._UnlockedGetNodeList(),
- self._UnlockedGetSsconfValues())
+ result = rpc.RpcRunner.call_write_ssconf_files(
+ self._UnlockedGetOnlineNodeList(),
+ self._UnlockedGetSsconfValues())
+
+ for nname, nresu in result.items():
+ msg = nresu.fail_msg
+ if msg:
+ errmsg = ("Error while uploading ssconf files to"
+ " node %s: %s" % (nname, msg))
+ logging.warning(errmsg)
+
+ if feedback_fn:
+ feedback_fn(errmsg)
+
self._last_cluster_serial = self._config_data.cluster.serial_no
def _UnlockedGetSsconfValues(self):
instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
node_names = utils.NiceSort(self._UnlockedGetNodeList())
node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
+ node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
+ for ninfo in node_info]
+ node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
+ for ninfo in node_info]
instance_data = fn(instance_names)
off_data = fn(node.name for node in node_info if node.offline)
on_data = fn(node.name for node in node_info if not node.offline)
mc_data = fn(node.name for node in node_info if node.master_candidate)
+ mc_ips_data = fn(node.primary_ip for node in node_info
+ if node.master_candidate)
node_data = fn(node_names)
+ node_pri_ips_data = fn(node_pri_ips)
+ node_snd_ips_data = fn(node_snd_ips)
cluster = self._config_data.cluster
cluster_tags = fn(cluster.GetTags())
+
+ hypervisor_list = fn(cluster.enabled_hypervisors)
+
+ uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
+
+ nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
+ self._config_data.nodegroups.values()]
+ nodegroups_data = fn(utils.NiceSort(nodegroups))
+
return {
constants.SS_CLUSTER_NAME: cluster.cluster_name,
constants.SS_CLUSTER_TAGS: cluster_tags,
constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
constants.SS_MASTER_CANDIDATES: mc_data,
+ constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
constants.SS_MASTER_IP: cluster.master_ip,
constants.SS_MASTER_NETDEV: cluster.master_netdev,
constants.SS_MASTER_NODE: cluster.master_node,
constants.SS_NODE_LIST: node_data,
+ constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
+ constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
constants.SS_OFFLINE_NODES: off_data,
constants.SS_ONLINE_NODES: on_data,
+ constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
constants.SS_INSTANCE_LIST: instance_data,
constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
+ constants.SS_HYPERVISOR_LIST: hypervisor_list,
+ constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
+ constants.SS_UID_POOL: uid_pool,
+ constants.SS_NODEGROUPS: nodegroups_data,
}
@locking.ssynchronized(_config_lock, shared=1)
+ def GetSsconfValues(self):
+ """Wrapper using lock around _UnlockedGetSsconf().
+
+ """
+ return self._UnlockedGetSsconfValues()
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetVGName(self):
"""Return the volume group name.
self._WriteConfig()
@locking.ssynchronized(_config_lock, shared=1)
- def GetDefBridge(self):
- """Return the default bridge.
+ def GetDRBDHelper(self):
+ """Return DRBD usermode helper.
"""
- return self._config_data.cluster.default_bridge
+ return self._config_data.cluster.drbd_usermode_helper
+
+ @locking.ssynchronized(_config_lock)
+ def SetDRBDHelper(self, drbd_helper):
+ """Set DRBD usermode helper.
+
+ """
+ self._config_data.cluster.drbd_usermode_helper = drbd_helper
+ self._config_data.cluster.serial_no += 1
+ self._WriteConfig()
@locking.ssynchronized(_config_lock, shared=1)
def GetMACPrefix(self):
"""
return self._config_data.cluster
+ @locking.ssynchronized(_config_lock, shared=1)
+ def HasAnyDiskOfType(self, dev_type):
+ """Check if in there is at disk of the given type in the configuration.
+
+ """
+ return self._config_data.HasAnyDiskOfType(dev_type)
+
@locking.ssynchronized(_config_lock)
- def Update(self, target):
+ def Update(self, target, feedback_fn):
"""Notify function to be called after updates.
This function must be called when an object (as returned by
@param target: an instance of either L{objects.Cluster},
L{objects.Node} or L{objects.Instance} which is existing in
the cluster
+ @param feedback_fn: Callable feedback function
"""
if self._config_data is None:
raise errors.ConfigurationError("Configuration updated since object"
" has been read or unknown object")
target.serial_no += 1
+ target.mtime = now = time.time()
if update_serial:
# for node updates, we need to increase the cluster serial too
self._config_data.cluster.serial_no += 1
+ self._config_data.cluster.mtime = now
if isinstance(target, objects.Instance):
self._UnlockedReleaseDRBDMinors(target.name)
- for nic in target.nics:
- self._temporary_macs.discard(nic.mac)
- self._WriteConfig()
+ self._WriteConfig(feedback_fn=feedback_fn)
+
+ @locking.ssynchronized(_config_lock)
+ def DropECReservations(self, ec_id):
+ """Drop per-execution-context reservations
+
+ """
+ for rm in self._all_rms:
+ rm.DropECReservations(ec_id)