root / lib / cmdlib / instance_query.py @ 0d3f52da
History | View | Annotate | Download (16.5 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Logical units for querying instances."""
|
23 |
|
24 |
import itertools |
25 |
import logging |
26 |
import operator |
27 |
|
28 |
from ganeti import compat |
29 |
from ganeti import constants |
30 |
from ganeti import locking |
31 |
from ganeti import qlang |
32 |
from ganeti import query |
33 |
from ganeti.cmdlib.base import QueryBase, NoHooksLU |
34 |
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \ |
35 |
CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams |
36 |
from ganeti.cmdlib.instance_operation import GetInstanceConsole |
37 |
from ganeti.cmdlib.instance_utils import NICListToTuple |
38 |
from ganeti.hypervisor import hv_base |
39 |
|
40 |
import ganeti.masterd.instance |
41 |
|
42 |
|
43 |
class InstanceQuery(QueryBase): |
44 |
FIELDS = query.INSTANCE_FIELDS |
45 |
|
46 |
def ExpandNames(self, lu): |
47 |
lu.needed_locks = {} |
48 |
lu.share_locks = ShareAll() |
49 |
|
50 |
if self.names: |
51 |
(_, self.wanted) = GetWantedInstances(lu, self.names) |
52 |
else:
|
53 |
self.wanted = locking.ALL_SET
|
54 |
|
55 |
self.do_locking = (self.use_locking and |
56 |
query.IQ_LIVE in self.requested_data) |
57 |
if self.do_locking: |
58 |
lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
|
59 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = [] |
60 |
lu.needed_locks[locking.LEVEL_NODE] = [] |
61 |
lu.needed_locks[locking.LEVEL_NETWORK] = [] |
62 |
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
63 |
|
64 |
self.do_grouplocks = (self.do_locking and |
65 |
query.IQ_NODES in self.requested_data) |
66 |
|
67 |
def DeclareLocks(self, lu, level): |
68 |
if self.do_locking: |
69 |
if level == locking.LEVEL_NODEGROUP and self.do_grouplocks: |
70 |
assert not lu.needed_locks[locking.LEVEL_NODEGROUP] |
71 |
|
72 |
# Lock all groups used by instances optimistically; this requires going
|
73 |
# via the node before it's locked, requiring verification later on
|
74 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = \ |
75 |
set(group_uuid
|
76 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
77 |
for group_uuid in |
78 |
lu.cfg.GetInstanceNodeGroups( |
79 |
lu.cfg.GetInstanceInfoByName(instance_name).uuid)) |
80 |
elif level == locking.LEVEL_NODE:
|
81 |
lu._LockInstancesNodes() # pylint: disable=W0212
|
82 |
|
83 |
elif level == locking.LEVEL_NETWORK:
|
84 |
lu.needed_locks[locking.LEVEL_NETWORK] = \ |
85 |
frozenset(net_uuid
|
86 |
for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) |
87 |
for net_uuid in |
88 |
lu.cfg.GetInstanceNetworks( |
89 |
lu.cfg.GetInstanceInfoByName(instance_name).uuid)) |
90 |
|
91 |
@staticmethod
|
92 |
def _CheckGroupLocks(lu): |
93 |
owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
|
94 |
owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
|
95 |
|
96 |
# Check if node groups for locked instances are still correct
|
97 |
for instance_name in owned_instance_names: |
98 |
instance = lu.cfg.GetInstanceInfoByName(instance_name) |
99 |
CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups) |
100 |
|
101 |
def _GetQueryData(self, lu): |
102 |
"""Computes the list of instances and their attributes.
|
103 |
|
104 |
"""
|
105 |
if self.do_grouplocks: |
106 |
self._CheckGroupLocks(lu)
|
107 |
|
108 |
cluster = lu.cfg.GetClusterInfo() |
109 |
insts_by_name = dict((inst.name, inst) for |
110 |
inst in lu.cfg.GetAllInstancesInfo().values())
|
111 |
|
112 |
instance_names = self._GetNames(lu, insts_by_name.keys(),
|
113 |
locking.LEVEL_INSTANCE) |
114 |
|
115 |
instance_list = [insts_by_name[node] for node in instance_names] |
116 |
node_uuids = frozenset(itertools.chain(*(inst.all_nodes
|
117 |
for inst in instance_list))) |
118 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
119 |
bad_node_uuids = [] |
120 |
offline_node_uuids = [] |
121 |
wrongnode_inst_uuids = set()
|
122 |
|
123 |
# Gather data as requested
|
124 |
if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]): |
125 |
live_data = {} |
126 |
node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list, |
127 |
cluster.hvparams) |
128 |
for node_uuid in node_uuids: |
129 |
result = node_data[node_uuid] |
130 |
if result.offline:
|
131 |
# offline nodes will be in both lists
|
132 |
assert result.fail_msg
|
133 |
offline_node_uuids.append(node_uuid) |
134 |
if result.fail_msg:
|
135 |
bad_node_uuids.append(node_uuid) |
136 |
elif result.payload:
|
137 |
for inst_name in result.payload: |
138 |
if inst_name in insts_by_name: |
139 |
instance = insts_by_name[inst_name] |
140 |
if instance.primary_node == node_uuid:
|
141 |
for iname in result.payload: |
142 |
live_data[insts_by_name[iname].uuid] = result.payload[iname] |
143 |
else:
|
144 |
wrongnode_inst_uuids.add(instance.uuid) |
145 |
else:
|
146 |
# orphan instance; we don't list it here as we don't
|
147 |
# handle this case yet in the output of instance listing
|
148 |
logging.warning("Orphan instance '%s' found on node %s",
|
149 |
inst_name, lu.cfg.GetNodeName(node_uuid)) |
150 |
# else no instance is alive
|
151 |
else:
|
152 |
live_data = {} |
153 |
|
154 |
if query.IQ_DISKUSAGE in self.requested_data: |
155 |
gmi = ganeti.masterd.instance |
156 |
disk_usage = dict((inst.uuid,
|
157 |
gmi.ComputeDiskSize(inst.disk_template, |
158 |
[{constants.IDISK_SIZE: disk.size} |
159 |
for disk in inst.disks])) |
160 |
for inst in instance_list) |
161 |
else:
|
162 |
disk_usage = None
|
163 |
|
164 |
if query.IQ_CONSOLE in self.requested_data: |
165 |
consinfo = {} |
166 |
for inst in instance_list: |
167 |
if inst.uuid in live_data: |
168 |
# Instance is running
|
169 |
consinfo[inst.uuid] = \ |
170 |
GetInstanceConsole(cluster, inst, |
171 |
lu.cfg.GetNodeInfo(inst.primary_node)) |
172 |
else:
|
173 |
consinfo[inst.uuid] = None
|
174 |
else:
|
175 |
consinfo = None
|
176 |
|
177 |
if query.IQ_NODES in self.requested_data: |
178 |
nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
|
179 |
groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
|
180 |
for uuid in set(map(operator.attrgetter("group"), |
181 |
nodes.values()))) |
182 |
else:
|
183 |
nodes = None
|
184 |
groups = None
|
185 |
|
186 |
if query.IQ_NETWORKS in self.requested_data: |
187 |
net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid) |
188 |
for i in instance_list)) |
189 |
networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids) |
190 |
else:
|
191 |
networks = None
|
192 |
|
193 |
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
|
194 |
disk_usage, offline_node_uuids, |
195 |
bad_node_uuids, live_data, |
196 |
wrongnode_inst_uuids, consinfo, nodes, |
197 |
groups, networks) |
198 |
|
199 |
|
200 |
class LUInstanceQuery(NoHooksLU): |
201 |
"""Logical unit for querying instances.
|
202 |
|
203 |
"""
|
204 |
# pylint: disable=W0142
|
205 |
REQ_BGL = False
|
206 |
|
207 |
def CheckArguments(self): |
208 |
self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), |
209 |
self.op.output_fields, self.op.use_locking) |
210 |
|
211 |
def ExpandNames(self): |
212 |
self.iq.ExpandNames(self) |
213 |
|
214 |
def DeclareLocks(self, level): |
215 |
self.iq.DeclareLocks(self, level) |
216 |
|
217 |
def Exec(self, feedback_fn): |
218 |
return self.iq.OldStyleQuery(self) |
219 |
|
220 |
|
221 |
class LUInstanceQueryData(NoHooksLU): |
222 |
"""Query runtime instance data.
|
223 |
|
224 |
"""
|
225 |
REQ_BGL = False
|
226 |
|
227 |
def ExpandNames(self): |
228 |
self.needed_locks = {}
|
229 |
|
230 |
# Use locking if requested or when non-static information is wanted
|
231 |
if not (self.op.static or self.op.use_locking): |
232 |
self.LogWarning("Non-static data requested, locks need to be acquired") |
233 |
self.op.use_locking = True |
234 |
|
235 |
if self.op.instances or not self.op.use_locking: |
236 |
# Expand instance names right here
|
237 |
(_, self.wanted_names) = GetWantedInstances(self, self.op.instances) |
238 |
else:
|
239 |
# Will use acquired locks
|
240 |
self.wanted_names = None |
241 |
|
242 |
if self.op.use_locking: |
243 |
self.share_locks = ShareAll()
|
244 |
|
245 |
if self.wanted_names is None: |
246 |
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
|
247 |
else:
|
248 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names |
249 |
|
250 |
self.needed_locks[locking.LEVEL_NODEGROUP] = []
|
251 |
self.needed_locks[locking.LEVEL_NODE] = []
|
252 |
self.needed_locks[locking.LEVEL_NETWORK] = []
|
253 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
|
254 |
|
255 |
def DeclareLocks(self, level): |
256 |
if self.op.use_locking: |
257 |
owned_instances = dict(self.cfg.GetMultiInstanceInfoByName( |
258 |
self.owned_locks(locking.LEVEL_INSTANCE)))
|
259 |
if level == locking.LEVEL_NODEGROUP:
|
260 |
|
261 |
# Lock all groups used by instances optimistically; this requires going
|
262 |
# via the node before it's locked, requiring verification later on
|
263 |
self.needed_locks[locking.LEVEL_NODEGROUP] = \
|
264 |
frozenset(group_uuid
|
265 |
for instance_uuid in owned_instances.keys() |
266 |
for group_uuid in |
267 |
self.cfg.GetInstanceNodeGroups(instance_uuid))
|
268 |
|
269 |
elif level == locking.LEVEL_NODE:
|
270 |
self._LockInstancesNodes()
|
271 |
|
272 |
elif level == locking.LEVEL_NETWORK:
|
273 |
self.needed_locks[locking.LEVEL_NETWORK] = \
|
274 |
frozenset(net_uuid
|
275 |
for instance_uuid in owned_instances.keys() |
276 |
for net_uuid in |
277 |
self.cfg.GetInstanceNetworks(instance_uuid))
|
278 |
|
279 |
def CheckPrereq(self): |
280 |
"""Check prerequisites.
|
281 |
|
282 |
This only checks the optional instance list against the existing names.
|
283 |
|
284 |
"""
|
285 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
286 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
287 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
288 |
owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK)) |
289 |
|
290 |
if self.wanted_names is None: |
291 |
assert self.op.use_locking, "Locking was not used" |
292 |
self.wanted_names = owned_instances
|
293 |
|
294 |
instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names)) |
295 |
|
296 |
if self.op.use_locking: |
297 |
CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
|
298 |
owned_node_uuids, None)
|
299 |
else:
|
300 |
assert not (owned_instances or owned_groups or |
301 |
owned_node_uuids or owned_networks)
|
302 |
|
303 |
self.wanted_instances = instances.values()
|
304 |
|
305 |
def _ComputeBlockdevStatus(self, node_uuid, instance, dev): |
306 |
"""Returns the status of a block device
|
307 |
|
308 |
"""
|
309 |
if self.op.static or not node_uuid: |
310 |
return None |
311 |
|
312 |
result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
|
313 |
if result.offline:
|
314 |
return None |
315 |
|
316 |
result.Raise("Can't compute disk status for %s" % instance.name)
|
317 |
|
318 |
status = result.payload |
319 |
if status is None: |
320 |
return None |
321 |
|
322 |
return (status.dev_path, status.major, status.minor,
|
323 |
status.sync_percent, status.estimated_time, |
324 |
status.is_degraded, status.ldisk_status) |
325 |
|
326 |
def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev): |
327 |
"""Compute block device status.
|
328 |
|
329 |
"""
|
330 |
(anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
|
331 |
|
332 |
return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn, |
333 |
anno_dev) |
334 |
|
335 |
def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn, |
336 |
dev): |
337 |
"""Compute block device status.
|
338 |
|
339 |
@attention: The device has to be annotated already.
|
340 |
|
341 |
"""
|
342 |
drbd_info = None
|
343 |
if dev.dev_type in constants.DTS_DRBD: |
344 |
# we change the snode then (otherwise we use the one passed in)
|
345 |
if dev.logical_id[0] == instance.primary_node: |
346 |
snode_uuid = dev.logical_id[1]
|
347 |
else:
|
348 |
snode_uuid = dev.logical_id[0]
|
349 |
drbd_info = { |
350 |
"primary_node": node_uuid2name_fn(instance.primary_node),
|
351 |
"primary_minor": dev.logical_id[3], |
352 |
"secondary_node": node_uuid2name_fn(snode_uuid),
|
353 |
"secondary_minor": dev.logical_id[4], |
354 |
"port": dev.logical_id[2], |
355 |
"secret": dev.logical_id[5], |
356 |
} |
357 |
|
358 |
dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
|
359 |
instance, dev) |
360 |
dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
|
361 |
|
362 |
if dev.children:
|
363 |
dev_children = map(compat.partial(self._ComputeDiskStatusInner, |
364 |
instance, snode_uuid, |
365 |
node_uuid2name_fn), |
366 |
dev.children) |
367 |
else:
|
368 |
dev_children = [] |
369 |
|
370 |
return {
|
371 |
"iv_name": dev.iv_name,
|
372 |
"dev_type": dev.dev_type,
|
373 |
"logical_id": dev.logical_id,
|
374 |
"drbd_info": drbd_info,
|
375 |
"pstatus": dev_pstatus,
|
376 |
"sstatus": dev_sstatus,
|
377 |
"children": dev_children,
|
378 |
"mode": dev.mode,
|
379 |
"size": dev.size,
|
380 |
"spindles": dev.spindles,
|
381 |
"name": dev.name,
|
382 |
"uuid": dev.uuid,
|
383 |
} |
384 |
|
385 |
def Exec(self, feedback_fn): |
386 |
"""Gather and return data"""
|
387 |
result = {} |
388 |
|
389 |
cluster = self.cfg.GetClusterInfo()
|
390 |
|
391 |
node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances)) |
392 |
nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids)) |
393 |
|
394 |
groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group |
395 |
for node in nodes.values())) |
396 |
|
397 |
for instance in self.wanted_instances: |
398 |
pnode = nodes[instance.primary_node] |
399 |
|
400 |
if self.op.static or pnode.offline: |
401 |
remote_state = None
|
402 |
if pnode.offline:
|
403 |
self.LogWarning("Primary node %s is marked offline, returning static" |
404 |
" information only for instance %s" %
|
405 |
(pnode.name, instance.name)) |
406 |
else:
|
407 |
remote_info = self.rpc.call_instance_info(
|
408 |
instance.primary_node, instance.name, instance.hypervisor, |
409 |
cluster.hvparams[instance.hypervisor]) |
410 |
remote_info.Raise("Error checking node %s" % pnode.name)
|
411 |
remote_info = remote_info.payload |
412 |
if remote_info and "state" in remote_info: |
413 |
if hv_base.HvInstanceState.IsShutdown(remote_info["state"]): |
414 |
remote_state = "user down"
|
415 |
else:
|
416 |
remote_state = "up"
|
417 |
else:
|
418 |
if instance.admin_state == constants.ADMINST_UP:
|
419 |
remote_state = "down"
|
420 |
else:
|
421 |
remote_state = instance.admin_state |
422 |
|
423 |
group2name_fn = lambda uuid: groups[uuid].name
|
424 |
node_uuid2name_fn = lambda uuid: nodes[uuid].name
|
425 |
|
426 |
disks = map(compat.partial(self._ComputeDiskStatus, instance, |
427 |
node_uuid2name_fn), |
428 |
instance.disks) |
429 |
|
430 |
snodes_group_uuids = [nodes[snode_uuid].group |
431 |
for snode_uuid in instance.secondary_nodes] |
432 |
|
433 |
result[instance.name] = { |
434 |
"name": instance.name,
|
435 |
"config_state": instance.admin_state,
|
436 |
"run_state": remote_state,
|
437 |
"pnode": pnode.name,
|
438 |
"pnode_group_uuid": pnode.group,
|
439 |
"pnode_group_name": group2name_fn(pnode.group),
|
440 |
"snodes": map(node_uuid2name_fn, instance.secondary_nodes), |
441 |
"snodes_group_uuids": snodes_group_uuids,
|
442 |
"snodes_group_names": map(group2name_fn, snodes_group_uuids), |
443 |
"os": instance.os,
|
444 |
# this happens to be the same format used for hooks
|
445 |
"nics": NICListToTuple(self, instance.nics), |
446 |
"disk_template": instance.disk_template,
|
447 |
"disks": disks,
|
448 |
"hypervisor": instance.hypervisor,
|
449 |
"network_port": instance.network_port,
|
450 |
"hv_instance": instance.hvparams,
|
451 |
"hv_actual": cluster.FillHV(instance, skip_globals=True), |
452 |
"be_instance": instance.beparams,
|
453 |
"be_actual": cluster.FillBE(instance),
|
454 |
"os_instance": instance.osparams,
|
455 |
"os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
|
456 |
"serial_no": instance.serial_no,
|
457 |
"mtime": instance.mtime,
|
458 |
"ctime": instance.ctime,
|
459 |
"uuid": instance.uuid,
|
460 |
} |
461 |
|
462 |
return result
|