Revision da4a52a3 lib/cmdlib/instance_query.py
b/lib/cmdlib/instance_query.py | ||
---|---|---|
47 | 47 |
lu.share_locks = ShareAll() |
48 | 48 |
|
49 | 49 |
if self.names: |
50 |
self.wanted = GetWantedInstances(lu, self.names)
|
|
50 |
(_, self.wanted) = GetWantedInstances(lu, self.names)
|
|
51 | 51 |
else: |
52 | 52 |
self.wanted = locking.ALL_SET |
53 | 53 |
|
... | ... | |
73 | 73 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = \ |
74 | 74 |
set(group_uuid |
75 | 75 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
76 |
for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name)) |
|
76 |
for group_uuid in |
|
77 |
lu.cfg.GetInstanceNodeGroups( |
|
78 |
lu.cfg.GetInstanceInfoByName(instance_name).uuid)) |
|
77 | 79 |
elif level == locking.LEVEL_NODE: |
78 | 80 |
lu._LockInstancesNodes() # pylint: disable=W0212 |
79 | 81 |
|
... | ... | |
81 | 83 |
lu.needed_locks[locking.LEVEL_NETWORK] = \ |
82 | 84 |
frozenset(net_uuid |
83 | 85 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
84 |
for net_uuid in lu.cfg.GetInstanceNetworks(instance_name)) |
|
86 |
for net_uuid in |
|
87 |
lu.cfg.GetInstanceNetworks( |
|
88 |
lu.cfg.GetInstanceInfoByName(instance_name).uuid)) |
|
85 | 89 |
|
86 | 90 |
@staticmethod |
87 | 91 |
def _CheckGroupLocks(lu): |
88 |
owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE)) |
|
92 |
owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
|
|
89 | 93 |
owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP)) |
90 | 94 |
|
91 | 95 |
# Check if node groups for locked instances are still correct |
92 |
for instance_name in owned_instances: |
|
93 |
CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups) |
|
96 |
for instance_name in owned_instance_names: |
|
97 |
instance = lu.cfg.GetInstanceInfoByName(instance_name) |
|
98 |
CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups) |
|
94 | 99 |
|
95 | 100 |
def _GetQueryData(self, lu): |
96 | 101 |
"""Computes the list of instances and their attributes. |
... | ... | |
100 | 105 |
self._CheckGroupLocks(lu) |
101 | 106 |
|
102 | 107 |
cluster = lu.cfg.GetClusterInfo() |
103 |
all_info = lu.cfg.GetAllInstancesInfo() |
|
108 |
insts_by_name = dict((inst.name, inst) for |
|
109 |
inst in lu.cfg.GetAllInstancesInfo().values()) |
|
104 | 110 |
|
105 |
instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE) |
|
111 |
instance_names = self._GetNames(lu, insts_by_name.keys(), |
|
112 |
locking.LEVEL_INSTANCE) |
|
106 | 113 |
|
107 |
instance_list = [all_info[node] for node in instance_names]
|
|
114 |
instance_list = [insts_by_name[node] for node in instance_names]
|
|
108 | 115 |
node_uuids = frozenset(itertools.chain(*(inst.all_nodes |
109 | 116 |
for inst in instance_list))) |
110 | 117 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
111 | 118 |
bad_node_uuids = [] |
112 | 119 |
offline_node_uuids = [] |
113 |
wrongnode_inst = set() |
|
120 |
wrongnode_inst_uuids = set()
|
|
114 | 121 |
|
115 | 122 |
# Gather data as requested |
116 | 123 |
if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]): |
... | ... | |
126 | 133 |
if result.fail_msg: |
127 | 134 |
bad_node_uuids.append(node_uuid) |
128 | 135 |
elif result.payload: |
129 |
for inst in result.payload: |
|
130 |
if inst in all_info: |
|
131 |
if all_info[inst].primary_node == node_uuid: |
|
132 |
live_data.update(result.payload) |
|
136 |
for inst_name in result.payload: |
|
137 |
if inst_name in insts_by_name: |
|
138 |
instance = insts_by_name[inst_name] |
|
139 |
if instance.primary_node == node_uuid: |
|
140 |
for iname in result.payload: |
|
141 |
live_data[insts_by_name[iname].uuid] = result.payload[iname] |
|
133 | 142 |
else: |
134 |
wrongnode_inst.add(inst)
|
|
143 |
wrongnode_inst_uuids.add(instance.uuid)
|
|
135 | 144 |
else: |
136 | 145 |
# orphan instance; we don't list it here as we don't |
137 | 146 |
# handle this case yet in the output of instance listing |
138 | 147 |
logging.warning("Orphan instance '%s' found on node %s", |
139 |
inst, lu.cfg.GetNodeName(node_uuid)) |
|
148 |
inst_name, lu.cfg.GetNodeName(node_uuid))
|
|
140 | 149 |
# else no instance is alive |
141 | 150 |
else: |
142 | 151 |
live_data = {} |
143 | 152 |
|
144 | 153 |
if query.IQ_DISKUSAGE in self.requested_data: |
145 | 154 |
gmi = ganeti.masterd.instance |
146 |
disk_usage = dict((inst.name,
|
|
155 |
disk_usage = dict((inst.uuid,
|
|
147 | 156 |
gmi.ComputeDiskSize(inst.disk_template, |
148 | 157 |
[{constants.IDISK_SIZE: disk.size} |
149 | 158 |
for disk in inst.disks])) |
... | ... | |
154 | 163 |
if query.IQ_CONSOLE in self.requested_data: |
155 | 164 |
consinfo = {} |
156 | 165 |
for inst in instance_list: |
157 |
if inst.name in live_data:
|
|
166 |
if inst.uuid in live_data:
|
|
158 | 167 |
# Instance is running |
159 |
consinfo[inst.name] = \
|
|
168 |
consinfo[inst.uuid] = \
|
|
160 | 169 |
GetInstanceConsole(cluster, inst, |
161 | 170 |
lu.cfg.GetNodeInfo(inst.primary_node)) |
162 | 171 |
else: |
163 |
consinfo[inst.name] = None |
|
164 |
assert set(consinfo.keys()) == set(instance_names) |
|
172 |
consinfo[inst.uuid] = None |
|
165 | 173 |
else: |
166 | 174 |
consinfo = None |
167 | 175 |
|
... | ... | |
175 | 183 |
groups = None |
176 | 184 |
|
177 | 185 |
if query.IQ_NETWORKS in self.requested_data: |
178 |
net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
|
|
186 |
net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
|
|
179 | 187 |
for i in instance_list)) |
180 | 188 |
networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids) |
181 | 189 |
else: |
... | ... | |
183 | 191 |
|
184 | 192 |
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), |
185 | 193 |
disk_usage, offline_node_uuids, |
186 |
bad_node_uuids, live_data, wrongnode_inst, |
|
187 |
consinfo, nodes, groups, networks) |
|
194 |
bad_node_uuids, live_data, |
|
195 |
wrongnode_inst_uuids, consinfo, nodes, |
|
196 |
groups, networks) |
|
188 | 197 |
|
189 | 198 |
|
190 | 199 |
class LUInstanceQuery(NoHooksLU): |
... | ... | |
224 | 233 |
|
225 | 234 |
if self.op.instances or not self.op.use_locking: |
226 | 235 |
# Expand instance names right here |
227 |
self.wanted_names = GetWantedInstances(self, self.op.instances)
|
|
236 |
(_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
|
|
228 | 237 |
else: |
229 | 238 |
# Will use acquired locks |
230 | 239 |
self.wanted_names = None |
... | ... | |
244 | 253 |
|
245 | 254 |
def DeclareLocks(self, level): |
246 | 255 |
if self.op.use_locking: |
247 |
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE) |
|
256 |
owned_instances = dict(self.cfg.GetMultiInstanceInfoByName( |
|
257 |
self.owned_locks(locking.LEVEL_INSTANCE))) |
|
248 | 258 |
if level == locking.LEVEL_NODEGROUP: |
249 | 259 |
|
250 | 260 |
# Lock all groups used by instances optimistically; this requires going |
251 | 261 |
# via the node before it's locked, requiring verification later on |
252 | 262 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \ |
253 | 263 |
frozenset(group_uuid |
254 |
for instance_name in owned_instances
|
|
264 |
for instance_uuid in owned_instances.keys()
|
|
255 | 265 |
for group_uuid in |
256 |
self.cfg.GetInstanceNodeGroups(instance_name))
|
|
266 |
self.cfg.GetInstanceNodeGroups(instance_uuid))
|
|
257 | 267 |
|
258 | 268 |
elif level == locking.LEVEL_NODE: |
259 | 269 |
self._LockInstancesNodes() |
... | ... | |
261 | 271 |
elif level == locking.LEVEL_NETWORK: |
262 | 272 |
self.needed_locks[locking.LEVEL_NETWORK] = \ |
263 | 273 |
frozenset(net_uuid |
264 |
for instance_name in owned_instances
|
|
274 |
for instance_uuid in owned_instances.keys()
|
|
265 | 275 |
for net_uuid in |
266 |
self.cfg.GetInstanceNetworks(instance_name))
|
|
276 |
self.cfg.GetInstanceNetworks(instance_uuid))
|
|
267 | 277 |
|
268 | 278 |
def CheckPrereq(self): |
269 | 279 |
"""Check prerequisites. |
... | ... | |
280 | 290 |
assert self.op.use_locking, "Locking was not used" |
281 | 291 |
self.wanted_names = owned_instances |
282 | 292 |
|
283 |
instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names)) |
|
293 |
instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
|
|
284 | 294 |
|
285 | 295 |
if self.op.use_locking: |
286 | 296 |
CheckInstancesNodeGroups(self.cfg, instances, owned_groups, |
Also available in: Unified diff