#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
" iallocator.")
-class LUPostInitCluster(LogicalUnit):
+class LUClusterPostInit(LogicalUnit):
"""Logical unit for running hooks after cluster initialization.
"""
return True
-class LUDestroyCluster(LogicalUnit):
+class LUClusterDestroy(LogicalUnit):
"""Logical unit for destroying the cluster.
"""
def _VerifyCertificate(filename):
- """Verifies a certificate for LUVerifyCluster.
+ """Verifies a certificate for LUClusterVerify.
@type filename: string
@param filename: Path to PEM file
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(filename))
except Exception, err: # pylint: disable-msg=W0703
- return (LUVerifyCluster.ETYPE_ERROR,
+ return (LUClusterVerify.ETYPE_ERROR,
"Failed to load X509 certificate %s: %s" % (filename, err))
(errcode, msg) = \
if errcode is None:
return (None, fnamemsg)
elif errcode == utils.CERT_WARNING:
- return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
+ return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
elif errcode == utils.CERT_ERROR:
- return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
+ return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
-class LUVerifyCluster(LogicalUnit):
+class LUClusterVerify(LogicalUnit):
"""Verifies the cluster status.
"""
EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
+ EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
ENODEDRBD = (TNODE, "ENODEDRBD")
ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
node = ninfo.name
# We just have to verify the paths on master and/or master candidates
# as the oob helper is invoked on the master
- if ((ninfo.master_candidate or ninfo.master) and
+ if ((ninfo.master_candidate or ninfo.master_capable) and
constants.NV_OOB_PATHS in nresult):
for path_result in nresult[constants.NV_OOB_PATHS]:
self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
"""Verify integrity of cluster, performing various test on nodes.
"""
+ # This method has too many local variables. pylint: disable-msg=R0914
self.bad = False
_ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
verbose = self.op.verbose
cluster = self.cfg.GetClusterInfo()
nodelist = utils.NiceSort(self.cfg.GetNodeList())
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
+ nodeinfo_byname = dict(zip(nodelist, nodeinfo))
instancelist = utils.NiceSort(self.cfg.GetInstanceList())
instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
for iname in instancelist)
+ groupinfo = self.cfg.GetAllNodeGroupsInfo()
i_non_redundant = [] # Non redundant instances
i_non_a_balanced = [] # Non auto-balanced instances
n_offline = 0 # Count of offline nodes
# FIXME: does not support file-backed instances
if not inst_config.secondary_nodes:
i_non_redundant.append(instance)
+
_ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
instance, "instance has multiple secondary nodes: %s",
utils.CommaJoin(inst_config.secondary_nodes),
code=self.ETYPE_WARNING)
+ if inst_config.disk_template in constants.DTS_NET_MIRROR:
+ pnode = inst_config.primary_node
+ instance_nodes = utils.NiceSort(inst_config.all_nodes)
+ instance_groups = {}
+
+ for node in instance_nodes:
+ instance_groups.setdefault(nodeinfo_byname[node].group,
+ []).append(node)
+
+ pretty_list = [
+ "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
+ # Sort so that we always list the primary node first.
+ for group, nodes in sorted(instance_groups.items(),
+ key=lambda (_, nodes): pnode in nodes,
+ reverse=True)]
+
+ self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
+ instance, "instance has primary and secondary nodes in"
+ " different groups: %s", utils.CommaJoin(pretty_list),
+ code=self.ETYPE_WARNING)
+
if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
i_non_a_balanced.append(instance)
return lu_result
-class LUVerifyDisks(NoHooksLU):
+class LUClusterVerifyDisks(NoHooksLU):
"""Verifies the cluster disks status.
"""
return result
-class LURepairDiskSizes(NoHooksLU):
+class LUClusterRepairDiskSizes(NoHooksLU):
"""Verifies the cluster disks sizes.
"""
return changed
-class LURenameCluster(LogicalUnit):
+class LUClusterRename(LogicalUnit):
"""Rename the cluster.
"""
return clustername
-class LUSetClusterParams(LogicalUnit):
+class LUClusterSetParams(LogicalUnit):
"""Change the parameters of the cluster.
"""
_UploadHelper(lu, vm_nodes, fname)
-class LURedistributeConfig(NoHooksLU):
+class LUClusterRedistConf(NoHooksLU):
"""Force the redistribution of cluster configuration.
This is a very simple LU.
node_to_primary = None
node_to_secondary = None
+ if query.NQ_OOB in self.requested_data:
+ oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
+ for name, node in all_info.iteritems())
+ else:
+ oob_support = None
+
if query.NQ_GROUP in self.requested_data:
groups = lu.cfg.GetAllNodeGroupsInfo()
else:
return query.NodeQueryData([all_info[name] for name in nodenames],
live_data, lu.cfg.GetMasterNode(),
- node_to_primary, node_to_secondary, groups)
+ node_to_primary, node_to_secondary, groups,
+ oob_support, lu.cfg.GetClusterInfo())
class LUQueryNodes(NoHooksLU):
return result.payload
-class LUQueryClusterInfo(NoHooksLU):
+class LUClusterQuery(NoHooksLU):
"""Query cluster configuration.
"""
"beparams": cluster.beparams,
"osparams": cluster.osparams,
"nicparams": cluster.nicparams,
+ "ndparams": cluster.ndparams,
"candidate_pool_size": cluster.candidate_pool_size,
"master_netdev": cluster.master_netdev,
"volume_group_name": cluster.volume_group_name,
return result
-class LUQueryConfigValues(NoHooksLU):
+class LUClusterConfigQuery(NoHooksLU):
"""Return configuration values.
"""
return values
-class LUActivateInstanceDisks(NoHooksLU):
+class LUInstanceActivateDisks(NoHooksLU):
"""Bring up an instance's disks.
"""
raise errors.OpExecError("Disk consistency error")
-class LUDeactivateInstanceDisks(NoHooksLU):
+class LUInstanceDeactivateDisks(NoHooksLU):
"""Shutdown an instance's disks.
"""
or we cannot check the node
"""
- if req_sizes is not None:
- for vg, req_size in req_sizes.iteritems():
- _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
+ for vg, req_size in req_sizes.items():
+ _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
-class LUQueryInstances(NoHooksLU):
+class LUInstanceQuery(NoHooksLU):
"""Logical unit for querying instances.
"""
return self.iq.OldStyleQuery(self)
-class LUFailoverInstance(LogicalUnit):
+class LUInstanceFailover(LogicalUnit):
"""Failover an instance.
"""
(instance.name, target_node, msg))
-class LUMigrateInstance(LogicalUnit):
+class LUInstanceMigrate(LogicalUnit):
"""Migrate an instance.
This is migration without shutting down, compared to the failover,
return env, nl, nl_post
-class LUMoveInstance(LogicalUnit):
+class LUInstanceMove(LogicalUnit):
"""Move an instance by data-copying.
"""
"""
node = instance.primary_node
- for idx, device in enumerate(instance.disks):
- lu.LogInfo("* Wiping disk %d", idx)
- logging.info("Wiping disk %d for instance %s", idx, instance.name)
-
- # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
- # MAX_WIPE_CHUNK at max
- wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
- constants.MIN_WIPE_CHUNK_PERCENT)
-
- offset = 0
- size = device.size
- last_output = 0
- start_time = time.time()
-
- while offset < size:
- wipe_size = min(wipe_chunk_size, size - offset)
- result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
- result.Raise("Could not wipe disk %d at offset %d for size %d" %
- (idx, offset, wipe_size))
- now = time.time()
- offset += wipe_size
- if now - last_output >= 60:
- eta = _CalcEta(now - start_time, offset, size)
- lu.LogInfo(" - done: %.1f%% ETA: %s" %
- (offset / float(size) * 100, utils.FormatSeconds(eta)))
- last_output = now
+ logging.info("Pause sync of instance %s disks", instance.name)
+ result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
+
+ for idx, success in enumerate(result.payload):
+ if not success:
+ logging.warn("pause-sync of instance %s for disks %d failed",
+ instance.name, idx)
+
+ try:
+ for idx, device in enumerate(instance.disks):
+ lu.LogInfo("* Wiping disk %d", idx)
+ logging.info("Wiping disk %d for instance %s", idx, instance.name)
+
+ # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
+ # MAX_WIPE_CHUNK at max
+ wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
+ constants.MIN_WIPE_CHUNK_PERCENT)
+
+ offset = 0
+ size = device.size
+ last_output = 0
+ start_time = time.time()
+
+ while offset < size:
+ wipe_size = min(wipe_chunk_size, size - offset)
+ result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
+ result.Raise("Could not wipe disk %d at offset %d for size %d" %
+ (idx, offset, wipe_size))
+ now = time.time()
+ offset += wipe_size
+ if now - last_output >= 60:
+ eta = _CalcEta(now - start_time, offset, size)
+ lu.LogInfo(" - done: %.1f%% ETA: %s" %
+ (offset / float(size) * 100, utils.FormatSeconds(eta)))
+ last_output = now
+ finally:
+ logging.info("Resume sync of instance %s disks", instance.name)
+
+ result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
+
+ for idx, success in enumerate(result.payload):
+ if not success:
+ lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
+ " look at the status and troubleshoot the issue.", idx)
+ logging.warn("resume-sync of instance %s for disks %d failed",
+ instance.name, idx)
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
# Required free disk space as a function of disk and swap space
req_size_dict = {
- constants.DT_DISKLESS: None,
+ constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, 128),
- constants.DT_FILE: None,
+ constants.DT_FILE: {},
}
if disk_template not in req_size_dict:
osname, node)
-class LUCreateInstance(LogicalUnit):
+class LUInstanceCreate(LogicalUnit):
"""Create an instance.
"""
return list(iobj.all_nodes)
-class LUConnectConsole(NoHooksLU):
+class LUInstanceConsole(NoHooksLU):
"""Connect to an instance's console.
This is somewhat special in that it returns the command line that
# instance and then saving the defaults in the instance itself.
hvparams = cluster.FillHV(instance)
beparams = cluster.FillBE(instance)
- console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
+ console = hyper.GetInstanceConsole(instance, hvparams, beparams)
+
+ assert console.instance == instance.name
+ assert console.Validate()
- # build ssh cmdline
- return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
+ return console.ToDict()
class LUReplaceDisks(LogicalUnit):
return result
-class LUGrowDisk(LogicalUnit):
+class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"""
# TODO: check the free disk space for file, when that feature
# will be supported
_CheckNodesFreeDiskPerVG(self, nodenames,
- {self.disk.physical_id[0]: self.op.amount})
+ self.disk.ComputeGrowth(self.op.amount))
def Exec(self, feedback_fn):
"""Execute disk grow.
}
-class LUQueryExports(NoHooksLU):
+class LUBackupQuery(NoHooksLU):
"""Query the exports list
"""
return result
-class LUPrepareExport(NoHooksLU):
+class LUBackupPrepare(NoHooksLU):
"""Prepares an instance for an export and returns useful information.
"""
return None
-class LUExportInstance(LogicalUnit):
+class LUBackupExport(LogicalUnit):
"""Export an instance to an image in the cluster.
"""
nodelist.remove(self.dst_node.name)
# on one-node clusters nodelist will be empty after the removal
- # if we proceed the backup would be removed because OpQueryExports
+ # if we proceed the backup would be removed because OpBackupQuery
# substitutes an empty list with the full cluster node list.
iname = self.instance.name
if nodelist:
return fin_resu, dresults
-class LURemoveExport(NoHooksLU):
+class LUBackupRemove(NoHooksLU):
"""Remove exports related to the named instance.
"""
" Domain Name.")
-class LUAddGroup(LogicalUnit):
+class LUGroupAdd(LogicalUnit):
"""Logical unit for creating node groups.
"""
del self.remove_locks[locking.LEVEL_NODEGROUP]
-class LUQueryGroups(NoHooksLU):
- """Logical unit for querying node groups.
+class LUGroupAssignNodes(NoHooksLU):
+ """Logical unit for assigning nodes to groups.
"""
- # pylint: disable-msg=W0142
REQ_BGL = False
- _FIELDS_DYNAMIC = utils.FieldSet()
- _SIMPLE_FIELDS = ["name", "uuid", "alloc_policy",
- "ctime", "mtime", "serial_no"]
- _FIELDS_STATIC = utils.FieldSet("node_cnt", "node_list", "pinst_cnt",
- "pinst_list", *_SIMPLE_FIELDS)
-
- def CheckArguments(self):
- _CheckOutputFields(static=self._FIELDS_STATIC,
- dynamic=self._FIELDS_DYNAMIC,
- selected=self.op.output_fields)
def ExpandNames(self):
- self.needed_locks = {}
+ # These raise errors.OpPrereqError on their own:
+ self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
+ self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+
+ # We want to lock all the affected nodes and groups. We have readily
+ # available the list of nodes, and the *destination* group. To gather the
+ # list of "source" groups, we need to fetch node information.
+ self.node_data = self.cfg.GetAllNodesInfo()
+ affected_groups = set(self.node_data[node].group for node in self.op.nodes)
+ affected_groups.add(self.group_uuid)
+
+ self.needed_locks = {
+ locking.LEVEL_NODEGROUP: list(affected_groups),
+ locking.LEVEL_NODE: self.op.nodes,
+ }
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
+ instance_data = self.cfg.GetAllInstancesInfo()
+
+ if self.group is None:
+ raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
+ (self.op.group_name, self.group_uuid))
+
+ (new_splits, previous_splits) = \
+ self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
+ for node in self.op.nodes],
+ self.node_data, instance_data)
+
+ if new_splits:
+ fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
+
+ if not self.op.force:
+ raise errors.OpExecError("The following instances get split by this"
+ " change and --force was not given: %s" %
+ fmt_new_splits)
+ else:
+ self.LogWarning("This operation will split the following instances: %s",
+ fmt_new_splits)
+
+ if previous_splits:
+ self.LogWarning("In addition, these already-split instances continue"
+ " to be spit across groups: %s",
+ utils.CommaJoin(utils.NiceSort(previous_splits)))
def Exec(self, feedback_fn):
- """Computes the list of groups and their attributes.
+ """Assign nodes to a new group.
+
+ """
+ for node in self.op.nodes:
+ self.node_data[node].group = self.group_uuid
+
+ self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
+
+ @staticmethod
+ def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
+ """Check for split instances after a node assignment.
+
+ This method considers a series of node assignments as an atomic operation,
+ and returns information about split instances after applying the set of
+ changes.
+
+ In particular, it returns information about newly split instances, and
+ instances that were already split, and remain so after the change.
+
+ Only instances whose disk template is listed in constants.DTS_NET_MIRROR are
+ considered.
+
+ @type changes: list of (node_name, new_group_uuid) pairs.
+ @param changes: list of node assignments to consider.
+ @param node_data: a dict with data for all nodes
+ @param instance_data: a dict with all instances to consider
+ @rtype: a two-tuple
+ @return: a list of instances that were previously okay and result split as a
+ consequence of this change, and a list of instances that were previously
+ split and this change does not fix.
"""
- all_groups = self.cfg.GetAllNodeGroupsInfo()
- name_to_uuid = dict((g.name, g.uuid) for g in all_groups.values())
+ changed_nodes = dict((node, group) for node, group in changes
+ if node_data[node].group != group)
- if not self.op.names:
- sorted_names = utils.NiceSort(name_to_uuid.keys())
- my_groups = [name_to_uuid[n] for n in sorted_names]
+ all_split_instances = set()
+ previously_split_instances = set()
+
+ def InstanceNodes(instance):
+ return [instance.primary_node] + list(instance.secondary_nodes)
+
+ for inst in instance_data.values():
+ if inst.disk_template not in constants.DTS_NET_MIRROR:
+ continue
+
+ instance_nodes = InstanceNodes(inst)
+
+ if len(set(node_data[node].group for node in instance_nodes)) > 1:
+ previously_split_instances.add(inst.name)
+
+ if len(set(changed_nodes.get(node, node_data[node].group)
+ for node in instance_nodes)) > 1:
+ all_split_instances.add(inst.name)
+
+ return (list(all_split_instances - previously_split_instances),
+ list(previously_split_instances & all_split_instances))
+
+
+class _GroupQuery(_QueryBase):
+
+ FIELDS = query.GROUP_FIELDS
+
+ def ExpandNames(self, lu):
+ lu.needed_locks = {}
+
+ self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
+ name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
+
+ if not self.names:
+ self.wanted = [name_to_uuid[name]
+ for name in utils.NiceSort(name_to_uuid.keys())]
else:
# Accept names to be either names or UUIDs.
- all_uuid = frozenset(all_groups.keys())
- my_groups = []
missing = []
+ self.wanted = []
+ all_uuid = frozenset(self._all_groups.keys())
- for name in self.op.names:
+ for name in self.names:
if name in all_uuid:
- my_groups.append(name)
+ self.wanted.append(name)
elif name in name_to_uuid:
- my_groups.append(name_to_uuid[name])
+ self.wanted.append(name_to_uuid[name])
else:
missing.append(name)
raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
errors.ECODE_NOENT)
- do_nodes = bool(frozenset(["node_cnt", "node_list"]).
- intersection(self.op.output_fields))
+ def DeclareLocks(self, lu, level):
+ pass
- do_instances = bool(frozenset(["pinst_cnt", "pinst_list"]).
- intersection(self.op.output_fields))
+ def _GetQueryData(self, lu):
+ """Computes the list of node groups and their attributes.
+
+ """
+ do_nodes = query.GQ_NODE in self.requested_data
+ do_instances = query.GQ_INST in self.requested_data
- # We need to map group->[nodes], and group->[instances]. The former is
- # directly attainable, but the latter we have to do through instance->node,
- # hence we need to process nodes even if we only need instance information.
+ group_to_nodes = None
+ group_to_instances = None
+
+ # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
+ # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
+ # latter GetAllInstancesInfo() is not enough, for we have to go through
+ # instance->node. Hence, we will need to process nodes even if we only need
+ # instance information.
if do_nodes or do_instances:
- all_nodes = self.cfg.GetAllNodesInfo()
- group_to_nodes = dict((all_groups[name].uuid, []) for name in my_groups)
+ all_nodes = lu.cfg.GetAllNodesInfo()
+ group_to_nodes = dict((uuid, []) for uuid in self.wanted)
node_to_group = {}
for node in all_nodes.values():
node_to_group[node.name] = node.group
if do_instances:
- all_instances = self.cfg.GetAllInstancesInfo()
- group_to_instances = dict((all_groups[name].uuid, [])
- for name in my_groups)
+ all_instances = lu.cfg.GetAllInstancesInfo()
+ group_to_instances = dict((uuid, []) for uuid in self.wanted)
+
for instance in all_instances.values():
node = instance.primary_node
if node in node_to_group:
group_to_instances[node_to_group[node]].append(instance.name)
- output = []
+ if not do_nodes:
+ # Do not pass on node information if it was not requested.
+ group_to_nodes = None
- for uuid in my_groups:
- group = all_groups[uuid]
- group_output = []
+ return query.GroupQueryData([self._all_groups[uuid]
+ for uuid in self.wanted],
+ group_to_nodes, group_to_instances)
- for field in self.op.output_fields:
- if field in self._SIMPLE_FIELDS:
- val = getattr(group, field)
- elif field == "node_list":
- val = utils.NiceSort(group_to_nodes[group.uuid])
- elif field == "node_cnt":
- val = len(group_to_nodes[group.uuid])
- elif field == "pinst_list":
- val = utils.NiceSort(group_to_instances[group.uuid])
- elif field == "pinst_cnt":
- val = len(group_to_instances[group.uuid])
- else:
- raise errors.ParameterError(field)
- group_output.append(val)
- output.append(group_output)
- return output
+class LUGroupQuery(NoHooksLU):
+ """Logical unit for querying node groups.
+ """
+ REQ_BGL = False
-class LUSetGroupParams(LogicalUnit):
+ def CheckArguments(self):
+ self.gq = _GroupQuery(self.op.names, self.op.output_fields, False)
+
+ def ExpandNames(self):
+ self.gq.ExpandNames(self)
+
+ def Exec(self, feedback_fn):
+ return self.gq.OldStyleQuery(self)
+
+
+class LUGroupSetParams(LogicalUnit):
"""Modifies the parameters of a node group.
"""
(self.op.group_name, self.group_uuid))
if self.op.ndparams:
+ new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
- self.new_ndparams = self.group.SimpleFillND(self.op.ndparams)
+ self.new_ndparams = new_ndparams
def BuildHooksEnv(self):
"""Build hooks env.
-class LURemoveGroup(LogicalUnit):
+class LUGroupRemove(LogicalUnit):
HPATH = "group-remove"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
-class LURenameGroup(LogicalUnit):
+class LUGroupRename(LogicalUnit):
HPATH = "group-rename"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
# we don't have job IDs
}
+ ninfo = cfg.GetAllNodesInfo()
iinfo = cfg.GetAllInstancesInfo().values()
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
# node data
- node_list = cfg.GetNodeList()
+ node_list = [n.name for n in ninfo.values() if n.vm_capable]
if self.mode == constants.IALLOCATOR_MODE_ALLOC:
hypervisor_name = self.hypervisor
data["nodegroups"] = self._ComputeNodeGroupData(cfg)
- data["nodes"] = self._ComputeNodeData(cfg, node_data, node_iinfo, i_list)
+ config_ndata = self._ComputeBasicNodeData(ninfo)
+ data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
+ i_list, config_ndata)
+ assert len(data["nodes"]) == len(ninfo), \
+ "Incomplete node data computed"
data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
return ng
@staticmethod
- def _ComputeNodeData(cfg, node_data, node_iinfo, i_list):
+ def _ComputeBasicNodeData(node_cfg):
"""Compute global node data.
+ @rtype: dict
+ @returns: a dict of name: (node dict, node config)
+
"""
node_results = {}
- for nname, nresult in node_data.items():
- # first fill in static (config-based) values
- ninfo = cfg.GetNodeInfo(nname)
+ for ninfo in node_cfg.values():
+ # fill in static (config-based) values
pnr = {
"tags": list(ninfo.GetTags()),
"primary_ip": ninfo.primary_ip,
"vm_capable": ninfo.vm_capable,
}
+ node_results[ninfo.name] = pnr
+
+ return node_results
+
+ @staticmethod
+ def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
+ node_results):
+ """Compute global node data.
+
+ @param node_results: the basic node structures as filled from the config
+
+ """
+ # make a copy of the current dict
+ node_results = dict(node_results)
+ for nname, nresult in node_data.items():
+ assert nname in node_results, "Missing basic data for node %s" % nname
+ ninfo = node_cfg[nname]
+
if not (ninfo.offline or ninfo.drained):
nresult.Raise("Can't get data for node %s" % nname)
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
"i_pri_memory": i_p_mem,
"i_pri_up_memory": i_p_up_mem,
}
- pnr.update(pnr_dyn)
+ pnr_dyn.update(node_results[nname])
- node_results[nname] = pnr
+ node_results[nname] = pnr_dyn
return node_results
_QUERY_IMPL = {
constants.QR_INSTANCE: _InstanceQuery,
constants.QR_NODE: _NodeQuery,
+ constants.QR_GROUP: _GroupQuery,
}