Revision e431074f
b/lib/cmdlib.py | ||
---|---|---|
3873 | 3873 |
instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE) |
3874 | 3874 |
|
3875 | 3875 |
instance_list = [all_info[name] for name in instance_names] |
3876 |
nodes = frozenset([inst.primary_node for inst in instance_list]) |
|
3876 |
nodes = frozenset(itertools.chain(*(inst.all_nodes |
|
3877 |
for inst in instance_list))) |
|
3877 | 3878 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
3878 | 3879 |
bad_nodes = [] |
3879 | 3880 |
offline_nodes = [] |
3881 |
wrongnode_inst = set() |
|
3880 | 3882 |
|
3881 | 3883 |
# Gather data as requested |
3882 | 3884 |
if query.IQ_LIVE in self.requested_data: |
... | ... | |
3891 | 3893 |
if result.fail_msg: |
3892 | 3894 |
bad_nodes.append(name) |
3893 | 3895 |
elif result.payload: |
3894 |
live_data.update(result.payload) |
|
3896 |
for inst in result.payload: |
|
3897 |
if all_info[inst].primary_node == name: |
|
3898 |
live_data.update(result.payload) |
|
3899 |
else: |
|
3900 |
wrongnode_inst.add(inst) |
|
3895 | 3901 |
# else no instance is alive |
3896 | 3902 |
else: |
3897 | 3903 |
live_data = {} |
... | ... | |
3907 | 3913 |
|
3908 | 3914 |
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), |
3909 | 3915 |
disk_usage, offline_nodes, bad_nodes, |
3910 |
live_data) |
|
3916 |
live_data, wrongnode_inst)
|
|
3911 | 3917 |
|
3912 | 3918 |
|
3913 | 3919 |
class LUQuery(NoHooksLU): |
b/lib/query.py | ||
---|---|---|
664 | 664 |
|
665 | 665 |
""" |
666 | 666 |
def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes, |
667 |
live_data): |
|
667 |
live_data, wrongnode_inst):
|
|
668 | 668 |
"""Initializes this class. |
669 | 669 |
|
670 | 670 |
@param instances: List of instance objects |
... | ... | |
677 | 677 |
@param bad_nodes: List of faulty nodes |
678 | 678 |
@type live_data: dict; instance name as key |
679 | 679 |
@param live_data: Per-instance live data |
680 |
@type wrongnode_inst: set |
|
681 |
@param wrongnode_inst: Set of instances running on wrong node(s) |
|
680 | 682 |
|
681 | 683 |
""" |
682 | 684 |
assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \ |
... | ... | |
690 | 692 |
self.offline_nodes = offline_nodes |
691 | 693 |
self.bad_nodes = bad_nodes |
692 | 694 |
self.live_data = live_data |
695 |
self.wrongnode_inst = wrongnode_inst |
|
693 | 696 |
|
694 | 697 |
# Used for individual rows |
695 | 698 |
self.inst_hvparams = None |
... | ... | |
774 | 777 |
return "ERROR_nodedown" |
775 | 778 |
|
776 | 779 |
if bool(ctx.live_data.get(inst.name)): |
777 |
if inst.admin_up: |
|
780 |
if inst.name in ctx.wrongnode_inst: |
|
781 |
return "ERROR_wrongnode" |
|
782 |
elif inst.admin_up: |
|
778 | 783 |
return "running" |
779 | 784 |
else: |
780 | 785 |
return "ERROR_up" |
b/man/gnt-instance.rst | ||
---|---|---|
672 | 672 |
"running", "stopped", "(node down)" |
673 | 673 |
|
674 | 674 |
status |
675 |
combined form of admin\_state and oper\_stat; this can be one of: |
|
676 |
ERROR\_nodedown if the node of the instance is down, ERROR\_down if |
|
677 |
the instance should run but is down, ERROR\_up if the instance |
|
678 |
should be stopped but is actually running, ADMIN\_down if the |
|
679 |
instance has been stopped (and is stopped) and running if the |
|
680 |
instance is set to be running (and is running) |
|
675 |
combined form of ``admin_state`` and ``oper_stat``; this can be one of: |
|
676 |
``ERROR_nodedown`` if the node of the instance is down, ``ERROR_down`` if |
|
677 |
the instance should run but is down, ``ERROR_up`` if the instance should be |
|
678 |
stopped but is actually running, ``ERROR_wrongnode`` if the instance is |
|
679 |
running but not on the primary, ``ADMIN_down`` if the instance has been |
|
680 |
stopped (and is stopped) and ``running`` if the instance is set to be |
|
681 |
running (and is running) |
|
681 | 682 |
|
682 | 683 |
oper\_ram |
683 | 684 |
the actual memory usage of the instance as seen by the hypervisor |
b/test/ganeti.query_unittest.py | ||
---|---|---|
539 | 539 |
nics=[objects.NIC(ip="192.0.2.99", nicparams={})]), |
540 | 540 |
] |
541 | 541 |
|
542 |
iqd = query.InstanceQueryData(instances, cluster, None, [], [], {}) |
|
542 |
iqd = query.InstanceQueryData(instances, cluster, None, [], [], {}, set())
|
|
543 | 543 |
self.assertEqual(q.Query(iqd), |
544 | 544 |
[[(constants.RS_NORMAL, "inst1"), |
545 | 545 |
(constants.RS_NORMAL, 128), |
... | ... | |
694 | 694 |
"memory": 768, |
695 | 695 |
}, |
696 | 696 |
} |
697 |
wrongnode_inst = set("inst2") |
|
697 | 698 |
|
698 | 699 |
iqd = query.InstanceQueryData(instances, cluster, disk_usage, |
699 |
offline_nodes, bad_nodes, live_data) |
|
700 |
offline_nodes, bad_nodes, live_data, |
|
701 |
wrongnode_inst) |
|
700 | 702 |
result = q.Query(iqd) |
701 | 703 |
self.assertEqual(len(result), len(instances)) |
702 | 704 |
self.assert_(compat.all(len(row) == len(selected) |
... | ... | |
718 | 720 |
elif inst.primary_node in bad_nodes: |
719 | 721 |
exp_status = "ERROR_nodedown" |
720 | 722 |
elif inst.name in live_data: |
721 |
if inst.admin_up: |
|
723 |
if inst.name in wrongnode_inst: |
|
724 |
exp_status = "ERROR_wrongnode" |
|
725 |
elif inst.admin_up: |
|
722 | 726 |
exp_status = "running" |
723 | 727 |
else: |
724 | 728 |
exp_status = "ERROR_up" |
Also available in: Unified diff