fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
"nic.modes", "nic.links", "nic.bridges",
- "snodes"],
+ "snodes", "snodes.group", "snodes.group.uuid"],
(lambda value: ",".join(str(item)
for item in value),
False))
def ExpandNames(self, lu):
lu.needed_locks = {}
- lu.share_locks[locking.LEVEL_INSTANCE] = 1
- lu.share_locks[locking.LEVEL_NODE] = 1
+ lu.share_locks = _ShareAll()
if self.names:
self.wanted = _GetWantedInstances(lu, self.names)
query.IQ_LIVE in self.requested_data)
if self.do_locking:
lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
+ lu.needed_locks[locking.LEVEL_NODEGROUP] = []
lu.needed_locks[locking.LEVEL_NODE] = []
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ self.do_grouplocks = (self.do_locking and
+ query.IQ_NODES in self.requested_data)
+
def DeclareLocks(self, lu, level):
- if level == locking.LEVEL_NODE and self.do_locking:
- lu._LockInstancesNodes() # pylint: disable-msg=W0212
+ if self.do_locking:
+ if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
+ assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
+
+ # Lock all groups used by instances optimistically; this requires going
+ # via the node before it's locked, requiring verification later on
+ lu.needed_locks[locking.LEVEL_NODEGROUP] = \
+ set(group_uuid
+ for instance_name in
+ lu.glm.list_owned(locking.LEVEL_INSTANCE)
+ for group_uuid in
+ lu.cfg.GetInstanceNodeGroups(instance_name))
+ elif level == locking.LEVEL_NODE:
+ lu._LockInstancesNodes() # pylint: disable-msg=W0212
+
+ @staticmethod
+ def _CheckGroupLocks(lu):
+ owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP))
+
+ # Check if node groups for locked instances are still correct
+ for instance_name in owned_instances:
+ inst_groups = lu.cfg.GetInstanceNodeGroups(instance_name)
+ if not owned_groups.issuperset(inst_groups):
+ raise errors.OpPrereqError("Instance %s's node groups changed since"
+ " locks were acquired, current groups are"
+ " are '%s', owning groups '%s'; retry the"
+ " operation" %
+ (instance_name,
+ utils.CommaJoin(inst_groups),
+ utils.CommaJoin(owned_groups)),
+ errors.ECODE_STATE)
def _GetQueryData(self, lu):
"""Computes the list of instances and their attributes.
"""
+ if self.do_grouplocks:
+ self._CheckGroupLocks(lu)
+
cluster = lu.cfg.GetClusterInfo()
all_info = lu.cfg.GetAllInstancesInfo()
else:
consinfo = None
+ if query.IQ_NODES in self.requested_data:
+ node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
+ instance_list)))
+ nodes = dict((name, lu.cfg.GetNodeInfo(name)) for name in node_names)
+ groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
+ for uuid in set(map(operator.attrgetter("group"),
+ nodes.values())))
+ else:
+ nodes = None
+ groups = None
+
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
disk_usage, offline_nodes, bad_nodes,
- live_data, wrongnode_inst, consinfo)
+ live_data, wrongnode_inst, consinfo,
+ nodes, groups)
class LUQuery(NoHooksLU):
(IQ_CONFIG,
IQ_LIVE,
IQ_DISKUSAGE,
- IQ_CONSOLE) = range(100, 104)
+ IQ_CONSOLE,
+ IQ_NODES) = range(100, 105)
(LQ_MODE,
LQ_OWNER,
"""
def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes,
- live_data, wrongnode_inst, console):
+ live_data, wrongnode_inst, console, nodes, groups):
"""Initializes this class.
@param instances: List of instance objects
@param wrongnode_inst: Set of instances running on wrong node(s)
@type console: dict; instance name as key
@param console: Per-instance console information
+ @type nodes: dict; node name as key
+ @param nodes: Node objects
"""
assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \
self.live_data = live_data
self.wrongnode_inst = wrongnode_inst
self.console = console
+ self.nodes = nodes
+ self.groups = groups
# Used for individual rows
self.inst_hvparams = None
}
+def _GetInstNodeGroup(ctx, default, node_name):
+ """Gets group UUID of an instance node.
+
+ @type ctx: L{InstanceQueryData}
+ @param default: Default value
+ @type node_name: string
+ @param node_name: Node name
+
+ """
+ try:
+ node = ctx.nodes[node_name]
+ except KeyError:
+ return default
+ else:
+ return node.group
+
+
+def _GetInstNodeGroupName(ctx, default, node_name):
+ """Gets group name of an instance node.
+
+ @type ctx: L{InstanceQueryData}
+ @param default: Default value
+ @type node_name: string
+ @param node_name: Node name
+
+ """
+ try:
+ node = ctx.nodes[node_name]
+ except KeyError:
+ return default
+
+ try:
+ group = ctx.groups[node.group]
+ except KeyError:
+ return default
+
+ return group.name
+
+
def _BuildInstanceFields():
"""Builds list of fields for instance queries.
fields = [
(_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"),
IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")),
+ (_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT,
+ "Primary node's group"),
+ IQ_NODES, 0,
+ lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL,
+ inst.primary_node)),
+ (_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT,
+ "Primary node's group UUID"),
+ IQ_NODES, 0,
+ lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)),
# TODO: Allow filtering by secondary node as hostname
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
"Secondary nodes; usually this will just be one node"),
IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)),
+ (_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
+ "Node groups of secondary nodes"),
+ IQ_NODES, 0,
+ lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None),
+ inst.secondary_nodes)),
+ (_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER,
+ "Node group UUIDs of secondary nodes"),
+ IQ_NODES, 0,
+ lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
+ inst.secondary_nodes)),
(_MakeField("admin_state", "Autostart", QFT_BOOL,
"Desired state of instance (if set, the instance should be"
" up)"),
]
iqd = query.InstanceQueryData(instances, cluster, None, [], [], {},
- set(), {})
+ set(), {}, None, None)
self.assertEqual(q.Query(iqd),
[[(constants.RS_NORMAL, "inst1"),
(constants.RS_NORMAL, 128),
q = self._Create(selected)
self.assertEqual(q.RequestedData(),
set([query.IQ_CONFIG, query.IQ_LIVE, query.IQ_DISKUSAGE,
- query.IQ_CONSOLE]))
+ query.IQ_CONSOLE, query.IQ_NODES]))
cluster = objects.Cluster(cluster_name="testcluster",
hvparams=constants.HVC_DEFAULTS,
iqd = query.InstanceQueryData(instances, cluster, disk_usage,
offline_nodes, bad_nodes, live_data,
- wrongnode_inst, consinfo)
+ wrongnode_inst, consinfo, {}, {})
result = q.Query(iqd)
self.assertEqual(len(result), len(instances))
self.assert_(compat.all(len(row) == len(selected)