def _ValidateConfig(data):
+ """Verifies that a configuration objects looks valid.
+
+ This only verifies the version of the configuration.
+
+ @raise errors.ConfigurationError: if the version differs from what
+ we expect
+
+ """
if data.version != constants.CONFIG_VERSION:
raise errors.ConfigurationError("Cluster configuration version"
" mismatch, got %s instead of %s" %
self._cfg_file = cfg_file
self._temporary_ids = set()
self._temporary_drbds = {}
+ self._temporary_macs = set()
# Note: in order to prevent errors when resolving our name in
# _DistributeConfig, we compute it here once and reuse it; it's
# better to raise an error before starting to modify the config
# file than after it was modified
self._my_hostname = utils.HostInfo().name
+ self._last_cluster_serial = -1
self._OpenConfig()
# this method needs to be static, so that we can call it on the class
byte2 = random.randrange(0, 256)
byte3 = random.randrange(0, 256)
mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
- if mac not in all_macs:
+ if mac not in all_macs and mac not in self._temporary_macs:
break
retries -= 1
else:
raise errors.ConfigurationError("Can't generate unique MAC")
+ self._temporary_macs.add(mac)
return mac
@locking.ssynchronized(_config_lock, shared=1)
"""
all_macs = self._AllMACs()
- return mac in all_macs
+ return mac in all_macs or mac in self._temporary_macs
@locking.ssynchronized(_config_lock, shared=1)
def GenerateDRBDSecret(self):
raise errors.ConfigurationError("Can't generate unique DRBD secret")
return secret
- def _ComputeAllLVs(self):
+ def _AllLVs(self):
"""Compute the list of all LVs.
"""
lvnames.update(lv_list)
return lvnames
+ def _AllIDs(self, include_temporary):
+ """Compute the list of all UUIDs and names we have.
+
+ @type include_temporary: boolean
+ @param include_temporary: whether to include the _temporary_ids set
+ @rtype: set
+ @return: a set of IDs
+
+ """
+ existing = set()
+ if include_temporary:
+ existing.update(self._temporary_ids)
+ existing.update(self._AllLVs())
+ existing.update(self._config_data.instances.keys())
+ existing.update(self._config_data.nodes.keys())
+ return existing
+
@locking.ssynchronized(_config_lock, shared=1)
def GenerateUniqueID(self, exceptions=None):
"""Generate an unique disk name.
This checks the current node, instances and disk names for
duplicates.
- Args:
- - exceptions: a list with some other names which should be checked
- for uniqueness (used for example when you want to get
- more than one id at one time without adding each one in
- turn to the config file
+ @param exceptions: a list with some other names which should be checked
+ for uniqueness (used for example when you want to get
+ more than one id at one time without adding each one in
+ turn to the config file)
- Returns: the unique id as a string
+ @rtype: string
+ @return: the unique id
"""
- existing = set()
- existing.update(self._temporary_ids)
- existing.update(self._ComputeAllLVs())
- existing.update(self._config_data.instances.keys())
- existing.update(self._config_data.nodes.keys())
+ existing = self._AllIDs(include_temporary=True)
if exceptions is not None:
existing.update(exceptions)
retries = 64
self._temporary_ids.add(unique_id)
return unique_id
+ def _CleanupTemporaryIDs(self):
+ """Cleanups the _temporary_ids structure.
+
+ """
+ existing = self._AllIDs(include_temporary=False)
+ self._temporary_ids = self._temporary_ids - existing
+
def _AllMACs(self):
"""Return all MACs present in the config.
+ @rtype: list
+ @return: the list of all MACs
+
"""
result = []
for instance in self._config_data.instances.values():
def _AllDRBDSecrets(self):
"""Return all DRBD secrets present in the config.
+ @rtype: list
+ @return: the list of all DRBD secrets
+
"""
def helper(disk, result):
"""Recursively gather secrets from this disk."""
return result
- @locking.ssynchronized(_config_lock, shared=1)
- def VerifyConfig(self):
+ def _CheckDiskIDs(self, disk, l_ids, p_ids):
+ """Compute duplicate disk IDs
+
+ @type disk: L{objects.Disk}
+ @param disk: the disk at which to start searching
+ @type l_ids: list
+ @param l_ids: list of current logical ids
+ @type p_ids: list
+ @param p_ids: list of current physical ids
+ @rtype: list
+ @return: a list of error messages
+
+ """
+ result = []
+ if disk.logical_id is not None:
+ if disk.logical_id in l_ids:
+ result.append("duplicate logical id %s" % str(disk.logical_id))
+ else:
+ l_ids.append(disk.logical_id)
+ if disk.physical_id is not None:
+ if disk.physical_id in p_ids:
+ result.append("duplicate physical id %s" % str(disk.physical_id))
+ else:
+ p_ids.append(disk.physical_id)
+
+ if disk.children:
+ for child in disk.children:
+ result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
+ return result
+
+ def _UnlockedVerifyConfig(self):
"""Verify function.
+ @rtype: list
+ @return: a list of error messages; a non-empty list signifies
+ configuration errors
+
"""
result = []
seen_macs = []
ports = {}
data = self._config_data
+ seen_lids = []
+ seen_pids = []
+
+ # global cluster checks
+ if not data.cluster.enabled_hypervisors:
+ result.append("enabled hypervisors list doesn't have any entries")
+ invalid_hvs = set(data.cluster.enabled_hypervisors) - constants.HYPER_TYPES
+ if invalid_hvs:
+ result.append("enabled hypervisors contains invalid entries: %s" %
+ invalid_hvs)
+
+ if data.cluster.master_node not in data.nodes:
+ result.append("cluster has invalid primary node '%s'" %
+ data.cluster.master_node)
+
+ # per-instance checks
for instance_name in data.instances:
instance = data.instances[instance_name]
if instance.primary_node not in data.nodes:
ports[net_port] = []
ports[net_port].append((instance.name, "network port"))
+ # instance disk verify
+ for idx, disk in enumerate(instance.disks):
+ result.extend(["instance '%s' disk %d error: %s" %
+ (instance.name, idx, msg) for msg in disk.Verify()])
+ result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
+
# cluster-wide pool of free ports
for free_port in data.cluster.tcpudp_port_pool:
if free_port not in ports:
result.append("Highest used port mismatch, saved %s, computed %s" %
(data.cluster.highest_used_port, keys[-1]))
- cp_size = data.cluster.candidate_pool_size
- num_c = 0
+ if not data.nodes[data.cluster.master_node].master_candidate:
+ result.append("Master node is not a master candidate")
+
+ # master candidate checks
+ mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ if mc_now < mc_max:
+ result.append("Not enough master candidates: actual %d, target %d" %
+ (mc_now, mc_max))
+
+ # node checks
for node in data.nodes.values():
- if node.master_candidate:
- num_c += 1
- if cp_size > num_c:
- result.append("Not enough master candidates: actual %d, desired %d" %
- (num_c, cp_size))
+ if [node.master_candidate, node.drained, node.offline].count(True) > 1:
+ result.append("Node %s state is invalid: master_candidate=%s,"
+ " drain=%s, offline=%s" %
+ (node.name, node.master_candidate, node.drain,
+ node.offline))
+
+ # drbd minors check
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ for node, minor, instance_a, instance_b in duplicates:
+ result.append("DRBD minor %d on node %s is assigned twice to instances"
+ " %s and %s" % (minor, node, instance_a, instance_b))
return result
+ @locking.ssynchronized(_config_lock, shared=1)
+ def VerifyConfig(self):
+ """Verify function.
+
+ This is just a wrapper over L{_UnlockedVerifyConfig}.
+
+ @rtype: list
+ @return: a list of error messages; a non-empty list signifies
+ configuration errors
+
+ """
+ return self._UnlockedVerifyConfig()
+
def _UnlockedSetDiskID(self, disk, node_name):
"""Convert the unique ID to the ID needed on the target nodes.
self._WriteConfig()
return port
- def _ComputeDRBDMap(self, instance):
+ def _UnlockedComputeDRBDMap(self):
"""Compute the used DRBD minor/nodes.
- Return: dictionary of node_name: dict of minor: instance_name. The
- returned dict will have all the nodes in it (even if with an empty
- list).
+ @rtype: (dict, list)
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list), and a list of duplicates; if the duplicates
+ list is not empty, the configuration is corrupted and its caller
+ should raise an exception
"""
def _AppendUsedPorts(instance_name, disk, used):
+ duplicates = []
if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
- nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
- for node, port in ((nodeA, minorA), (nodeB, minorB)):
- assert node in used, "Instance node not found in node list"
+ node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
+ for node, port in ((node_a, minor_a), (node_b, minor_b)):
+ assert node in used, ("Node '%s' of instance '%s' not found"
+ " in node list" % (node, instance_name))
if port in used[node]:
- raise errors.ProgrammerError("DRBD minor already used:"
- " %s/%s, %s/%s" %
- (node, port, instance_name,
- used[node][port]))
-
- used[node][port] = instance_name
+ duplicates.append((node, port, instance_name, used[node][port]))
+ else:
+ used[node][port] = instance_name
if disk.children:
for child in disk.children:
- _AppendUsedPorts(instance_name, child, used)
+ duplicates.extend(_AppendUsedPorts(instance_name, child, used))
+ return duplicates
+ duplicates = []
my_dict = dict((node, {}) for node in self._config_data.nodes)
- for (node, minor), instance in self._temporary_drbds.iteritems():
- my_dict[node][minor] = instance
for instance in self._config_data.instances.itervalues():
for disk in instance.disks:
- _AppendUsedPorts(instance.name, disk, my_dict)
- return my_dict
+ duplicates.extend(_AppendUsedPorts(instance.name, disk, my_dict))
+ for (node, minor), instance in self._temporary_drbds.iteritems():
+ if minor in my_dict[node] and my_dict[node][minor] != instance:
+ duplicates.append((node, minor, instance, my_dict[node][minor]))
+ else:
+ my_dict[node][minor] = instance
+ return my_dict, duplicates
+
+ @locking.ssynchronized(_config_lock)
+ def ComputeDRBDMap(self):
+ """Compute the used DRBD minor/nodes.
+
+ This is just a wrapper over L{_UnlockedComputeDRBDMap}.
+
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list).
+
+ """
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
+ return d_map
@locking.ssynchronized(_config_lock)
def AllocateDRBDMinor(self, nodes, instance):
multiple minors. The result is the list of minors, in the same
order as the passed nodes.
+ @type instance: string
+ @param instance: the instance for which we allocate minors
+
"""
- d_map = self._ComputeDRBDMap(instance)
+ assert isinstance(instance, basestring), \
+ "Invalid argument '%s' passed to AllocateDRBDMinor" % instance
+
+ d_map, duplicates = self._UnlockedComputeDRBDMap()
+ if duplicates:
+ raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
+ str(duplicates))
result = []
for nname in nodes:
ndata = d_map[nname]
minor = keys[-1] + 1
else:
minor = ffree
- result.append(minor)
+ # double-check minor against current instances
+ assert minor not in d_map[nname], \
+ ("Attempt to reuse allocated DRBD minor %d on node %s,"
+ " already allocated to instance %s" %
+ (minor, nname, d_map[nname][minor]))
ndata[minor] = instance
- assert (nname, minor) not in self._temporary_drbds, \
- "Attempt to reuse reserved DRBD minor"
- self._temporary_drbds[(nname, minor)] = instance
+ # double-check minor against reservation
+ r_key = (nname, minor)
+ assert r_key not in self._temporary_drbds, \
+ ("Attempt to reuse reserved DRBD minor %d on node %s,"
+ " reserved for instance %s" %
+ (minor, nname, self._temporary_drbds[r_key]))
+ self._temporary_drbds[r_key] = instance
+ result.append(minor)
logging.debug("Request to allocate drbd minors, input: %s, returning %s",
nodes, result)
return result
- @locking.ssynchronized(_config_lock)
- def ReleaseDRBDMinors(self, instance):
+ def _UnlockedReleaseDRBDMinors(self, instance):
"""Release temporary drbd minors allocated for a given instance.
- This should be called on both the error paths and on the success
- paths (after the instance has been added or updated).
-
@type instance: string
@param instance: the instance for which temporary minors should be
released
"""
+ assert isinstance(instance, basestring), \
+ "Invalid argument passed to ReleaseDRBDMinors"
for key, name in self._temporary_drbds.items():
if name == instance:
del self._temporary_drbds[key]
+ @locking.ssynchronized(_config_lock)
+ def ReleaseDRBDMinors(self, instance):
+ """Release temporary drbd minors allocated for a given instance.
+
+ This should be called on the error paths, on the success paths
+ it's automatically called by the ConfigWriter add and update
+ functions.
+
+ This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
+
+ @type instance: string
+ @param instance: the instance for which temporary minors should be
+ released
+
+ """
+ self._UnlockedReleaseDRBDMinors(instance)
+
@locking.ssynchronized(_config_lock, shared=1)
def GetConfigVersion(self):
"""Get the configuration version.
def GetHostKey(self):
"""Return the rsa hostkey from the config.
- Args: None
+ @rtype: string
+ @return: the rsa hostkey
- Returns: rsa hostkey
"""
return self._config_data.cluster.rsahostkeypub
This should be used after creating a new instance.
- Args:
- instance: the instance object
+ @type instance: L{objects.Instance}
+ @param instance: the instance object
+
"""
if not isinstance(instance, objects.Instance):
raise errors.ProgrammerError("Invalid type passed to AddInstance")
all_lvs = instance.MapLVsByNode()
logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
+ all_macs = self._AllMACs()
+ for nic in instance.nics:
+ if nic.mac in all_macs:
+ raise errors.ConfigurationError("Cannot add instance %s:"
+ " MAC address '%s' already in use." % (instance.name, nic.mac))
+
instance.serial_no = 1
self._config_data.instances[instance.name] = instance
+ self._config_data.cluster.serial_no += 1
+ self._UnlockedReleaseDRBDMinors(instance.name)
+ for nic in instance.nics:
+ self._temporary_macs.discard(nic.mac)
self._WriteConfig()
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
"""
- if status not in ("up", "down"):
- raise errors.ProgrammerError("Invalid status '%s' passed to"
- " ConfigWriter._SetInstanceStatus()" %
- status)
+ assert isinstance(status, bool), \
+ "Invalid status '%s' passed to SetInstanceStatus" % (status,)
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" %
instance_name)
instance = self._config_data.instances[instance_name]
- if instance.status != status:
- instance.status = status
+ if instance.admin_up != status:
+ instance.admin_up = status
instance.serial_no += 1
self._WriteConfig()
"""Mark the instance status to up in the config.
"""
- self._SetInstanceStatus(instance_name, "up")
+ self._SetInstanceStatus(instance_name, True)
@locking.ssynchronized(_config_lock)
def RemoveInstance(self, instance_name):
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
del self._config_data.instances[instance_name]
+ self._config_data.cluster.serial_no += 1
self._WriteConfig()
@locking.ssynchronized(_config_lock)
"""Mark the status of an instance to down in the configuration.
"""
- self._SetInstanceStatus(instance_name, "down")
+ self._SetInstanceStatus(instance_name, False)
def _UnlockedGetInstanceList(self):
"""Get the list of instances.
def GetInstanceList(self):
"""Get the list of instances.
- Returns:
- array of instances, ex. ['instance2.example.com','instance1.example.com']
- these contains all the instances, also the ones in Admin_down state
+ @return: array of instances, ex. ['instance2.example.com',
+ 'instance1.example.com']
"""
return self._UnlockedGetInstanceList()
self._config_data.instances.keys())
def _UnlockedGetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
This function is for internal use, when the config lock is already held.
@locking.ssynchronized(_config_lock, shared=1)
def GetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
- It takes the information from the configuration file. Other informations of
+ It takes the information from the configuration file. Other information of
an instance are taken from the live systems.
- Args:
- instance: name of the instance, ex instance1.example.com
+ @param instance_name: name of the instance, e.g.
+ I{instance1.example.com}
- Returns:
- the instance object
+ @rtype: L{objects.Instance}
+ @return: the instance object
"""
return self._UnlockedGetInstanceInfo(instance_name)
"""Get the configuration of all instances.
@rtype: dict
- @returns: dict of (instance, instance_info), where instance_info is what
+ @return: dict of (instance, instance_info), where instance_info is what
would GetInstanceInfo return for the node
"""
def AddNode(self, node):
"""Add a node to the configuration.
- Args:
- node: an object.Node instance
+ @type node: L{objects.Node}
+ @param node: a Node instance
"""
logging.info("Adding node %s to configuration" % node.name)
def _UnlockedGetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
- Args: node: nodename (tuple) of the node
+ @param node_name: the node name, e.g. I{node1.example.com}
- Returns: the node object
+ @rtype: L{objects.Node}
+ @return: the node object
"""
if node_name not in self._config_data.nodes:
def GetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- Args: node: nodename (tuple) of the node
+ This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
- Returns: the node object
+ @param node_name: the node name, e.g. I{node1.example.com}
+
+ @rtype: L{objects.Node}
+ @return: the node object
"""
return self._UnlockedGetNodeInfo(node_name)
def _UnlockedGetNodeList(self):
"""Return the list of nodes which are in the configuration.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
+
+ @rtype: list
"""
return self._config_data.nodes.keys()
return self._UnlockedGetNodeList()
@locking.ssynchronized(_config_lock, shared=1)
+ def GetOnlineNodeList(self):
+ """Return the list of nodes which are online.
+
+ """
+ all_nodes = [self._UnlockedGetNodeInfo(node)
+ for node in self._UnlockedGetNodeList()]
+ return [node.name for node in all_nodes if not node.offline]
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetAllNodesInfo(self):
"""Get the configuration of all nodes.
@rtype: dict
- @returns: dict of (node, node_info), where node_info is what
+ @return: dict of (node, node_info), where node_info is what
would GetNodeInfo return for the node
"""
for node in self._UnlockedGetNodeList()])
return my_dict
+ def _UnlockedGetMasterCandidateStats(self, exceptions=None):
+ """Get the number of current and maximum desired and possible candidates.
+
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
+ @rtype: tuple
+ @return: tuple of (current, desired and possible)
+
+ """
+ mc_now = mc_max = 0
+ for node in self._config_data.nodes.values():
+ if exceptions and node.name in exceptions:
+ continue
+ if not (node.offline or node.drained):
+ mc_max += 1
+ if node.master_candidate:
+ mc_now += 1
+ mc_max = min(mc_max, self._config_data.cluster.candidate_pool_size)
+ return (mc_now, mc_max)
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterCandidateStats(self, exceptions=None):
+ """Get the number of current and maximum possible candidates.
+
+ This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
+
+ @type exceptions: list
+ @param exceptions: if passed, list of nodes that should be ignored
+ @rtype: tuple
+ @return: tuple of (current, max)
+
+ """
+ return self._UnlockedGetMasterCandidateStats(exceptions)
+
+ @locking.ssynchronized(_config_lock)
+ def MaintainCandidatePool(self):
+ """Try to grow the candidate pool to the desired size.
+
+ @rtype: list
+ @return: list with the adjusted nodes (L{objects.Node} instances)
+
+ """
+ mc_now, mc_max = self._UnlockedGetMasterCandidateStats()
+ mod_list = []
+ if mc_now < mc_max:
+ node_list = self._config_data.nodes.keys()
+ random.shuffle(node_list)
+ for name in node_list:
+ if mc_now >= mc_max:
+ break
+ node = self._config_data.nodes[name]
+ if node.master_candidate or node.offline or node.drained:
+ continue
+ mod_list.append(node)
+ node.master_candidate = True
+ node.serial_no += 1
+ mc_now += 1
+ if mc_now != mc_max:
+ # this should not happen
+ logging.warning("Warning: MaintainCandidatePool didn't manage to"
+ " fill the candidate pool (%d/%d)", mc_now, mc_max)
+ if mod_list:
+ self._config_data.cluster.serial_no += 1
+ self._WriteConfig()
+
+ return mod_list
+
def _BumpSerialNo(self):
"""Bump up the serial number of the config.
def _OpenConfig(self):
"""Read the config data from disk.
- In case we already have configuration data and the config file has
- the same mtime as when we read it, we skip the parsing of the
- file, since de-serialisation could be slow.
-
"""
f = open(self._cfg_file, 'r')
try:
not hasattr(data.cluster, 'rsahostkeypub')):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
+
+ # Upgrade configuration if needed
+ data.UpgradeConfig()
+
self._config_data = data
- # init the last serial as -1 so that the next write will cause
+ # reset the last serial as -1 so that the next write will cause
# ssconf update
self._last_cluster_serial = -1
"""Write the configuration data to persistent storage.
"""
+ # first, cleanup the _temporary_ids set, if an ID is now in the
+ # other objects it should be discarded to prevent unbounded growth
+ # of that structure
+ self._CleanupTemporaryIDs()
+ config_errors = self._UnlockedVerifyConfig()
+ if config_errors:
+ raise errors.ConfigurationError("Configuration data is not"
+ " consistent: %s" %
+ (", ".join(config_errors)))
if destination is None:
destination = self._cfg_file
self._BumpSerialNo()
associated value
"""
- node_list = utils.NiceSort(self._UnlockedGetNodeList())
- mc_list = [self._UnlockedGetNodeInfo(name) for name in node_list]
- mc_list = [node.name for node in mc_list if node.master_candidate]
- node_list = "\n".join(node_list)
- mc_list = "\n".join(mc_list)
+ fn = "\n".join
+ instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
+ node_names = utils.NiceSort(self._UnlockedGetNodeList())
+ node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
+
+ instance_data = fn(instance_names)
+ off_data = fn(node.name for node in node_info if node.offline)
+ on_data = fn(node.name for node in node_info if not node.offline)
+ mc_data = fn(node.name for node in node_info if node.master_candidate)
+ node_data = fn(node_names)
cluster = self._config_data.cluster
+ cluster_tags = fn(cluster.GetTags())
return {
constants.SS_CLUSTER_NAME: cluster.cluster_name,
+ constants.SS_CLUSTER_TAGS: cluster_tags,
constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
- constants.SS_MASTER_CANDIDATES: mc_list,
+ constants.SS_MASTER_CANDIDATES: mc_data,
constants.SS_MASTER_IP: cluster.master_ip,
constants.SS_MASTER_NETDEV: cluster.master_netdev,
constants.SS_MASTER_NODE: cluster.master_node,
- constants.SS_NODE_LIST: node_list,
+ constants.SS_NODE_LIST: node_data,
+ constants.SS_OFFLINE_NODES: off_data,
+ constants.SS_ONLINE_NODES: on_data,
+ constants.SS_INSTANCE_LIST: instance_data,
+ constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
}
- @locking.ssynchronized(_config_lock)
- def InitConfig(self, version, cluster_config, master_node_config):
- """Create the initial cluster configuration.
-
- It will contain the current node, which will also be the master
- node, and no instances.
-
- @type version: int
- @param version: Configuration version
- @type cluster_config: objects.Cluster
- @param cluster_config: Cluster configuration
- @type master_node_config: objects.Node
- @param master_node_config: Master node configuration
-
- """
- nodes = {
- master_node_config.name: master_node_config,
- }
-
- self._config_data = objects.ConfigData(version=version,
- cluster=cluster_config,
- nodes=nodes,
- instances={},
- serial_no=1)
- self._WriteConfig()
-
@locking.ssynchronized(_config_lock, shared=1)
def GetVGName(self):
"""Return the volume group name.
@locking.ssynchronized(_config_lock, shared=1)
def GetClusterInfo(self):
- """Returns informations about the cluster
+ """Returns information about the cluster
- Returns:
- the cluster object
+ @rtype: L{objects.Cluster}
+ @return: the cluster object
"""
return self._config_data.cluster
that all modified objects will be saved, but the target argument
is the one the caller wants to ensure that it's saved.
+ @param target: an instance of either L{objects.Cluster},
+ L{objects.Node} or L{objects.Instance} which is existing in
+ the cluster
+
"""
if self._config_data is None:
raise errors.ProgrammerError("Configuration file not read,"
# for node updates, we need to increase the cluster serial too
self._config_data.cluster.serial_no += 1
+ if isinstance(target, objects.Instance):
+ self._UnlockedReleaseDRBDMinors(target.name)
+ for nic in target.nics:
+ self._temporary_macs.discard(nic.mac)
+
self._WriteConfig()