instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
instance_list = [all_info[name] for name in instance_names]
- nodes = frozenset([inst.primary_node for inst in instance_list])
+ nodes = frozenset(itertools.chain(*(inst.all_nodes
+ for inst in instance_list)))
hv_list = list(set([inst.hypervisor for inst in instance_list]))
bad_nodes = []
offline_nodes = []
+ wrongnode_inst = set()
# Gather data as requested
if query.IQ_LIVE in self.requested_data:
if result.fail_msg:
bad_nodes.append(name)
elif result.payload:
- live_data.update(result.payload)
+ for inst in result.payload:
+ if all_info[inst].primary_node == name:
+ live_data.update(result.payload)
+ else:
+ wrongnode_inst.add(inst)
# else no instance is alive
else:
live_data = {}
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
disk_usage, offline_nodes, bad_nodes,
- live_data)
+ live_data, wrongnode_inst)
class LUQuery(NoHooksLU):
"""
def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes,
- live_data):
+ live_data, wrongnode_inst):
"""Initializes this class.
@param instances: List of instance objects
@param bad_nodes: List of faulty nodes
@type live_data: dict; instance name as key
@param live_data: Per-instance live data
+ @type wrongnode_inst: set
+ @param wrongnode_inst: Set of instances running on wrong node(s)
"""
assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \
self.offline_nodes = offline_nodes
self.bad_nodes = bad_nodes
self.live_data = live_data
+ self.wrongnode_inst = wrongnode_inst
# Used for individual rows
self.inst_hvparams = None
return "ERROR_nodedown"
if bool(ctx.live_data.get(inst.name)):
- if inst.admin_up:
+ if inst.name in ctx.wrongnode_inst:
+ return "ERROR_wrongnode"
+ elif inst.admin_up:
return "running"
else:
return "ERROR_up"
"running", "stopped", "(node down)"
status
- combined form of admin\_state and oper\_stat; this can be one of:
- ERROR\_nodedown if the node of the instance is down, ERROR\_down if
- the instance should run but is down, ERROR\_up if the instance
- should be stopped but is actually running, ADMIN\_down if the
- instance has been stopped (and is stopped) and running if the
- instance is set to be running (and is running)
+ combined form of ``admin_state`` and ``oper_stat``; this can be one of:
+ ``ERROR_nodedown`` if the node of the instance is down, ``ERROR_down`` if
+ the instance should run but is down, ``ERROR_up`` if the instance should be
+ stopped but is actually running, ``ERROR_wrongnode`` if the instance is
+ running but not on the primary, ``ADMIN_down`` if the instance has been
+ stopped (and is stopped) and ``running`` if the instance is set to be
+ running (and is running)
oper\_ram
the actual memory usage of the instance as seen by the hypervisor
nics=[objects.NIC(ip="192.0.2.99", nicparams={})]),
]
- iqd = query.InstanceQueryData(instances, cluster, None, [], [], {})
+ iqd = query.InstanceQueryData(instances, cluster, None, [], [], {}, set())
self.assertEqual(q.Query(iqd),
[[(constants.RS_NORMAL, "inst1"),
(constants.RS_NORMAL, 128),
"memory": 768,
},
}
+ wrongnode_inst = set("inst2")
iqd = query.InstanceQueryData(instances, cluster, disk_usage,
- offline_nodes, bad_nodes, live_data)
+ offline_nodes, bad_nodes, live_data,
+ wrongnode_inst)
result = q.Query(iqd)
self.assertEqual(len(result), len(instances))
self.assert_(compat.all(len(row) == len(selected)
elif inst.primary_node in bad_nodes:
exp_status = "ERROR_nodedown"
elif inst.name in live_data:
- if inst.admin_up:
+ if inst.name in wrongnode_inst:
+ exp_status = "ERROR_wrongnode"
+ elif inst.admin_up:
exp_status = "running"
else:
exp_status = "ERROR_up"