4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units for querying instances."""
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
39 import ganeti.masterd.instance
42 class InstanceQuery(QueryBase):
43 FIELDS = query.INSTANCE_FIELDS
45 def ExpandNames(self, lu):
47 lu.share_locks = ShareAll()
50 self.wanted = GetWantedInstances(lu, self.names)
52 self.wanted = locking.ALL_SET
54 self.do_locking = (self.use_locking and
55 query.IQ_LIVE in self.requested_data)
57 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59 lu.needed_locks[locking.LEVEL_NODE] = []
60 lu.needed_locks[locking.LEVEL_NETWORK] = []
61 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
63 self.do_grouplocks = (self.do_locking and
64 query.IQ_NODES in self.requested_data)
66 def DeclareLocks(self, lu, level):
68 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
71 # Lock all groups used by instances optimistically; this requires going
72 # via the node before it's locked, requiring verification later on
73 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
75 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77 elif level == locking.LEVEL_NODE:
78 lu._LockInstancesNodes() # pylint: disable=W0212
80 elif level == locking.LEVEL_NETWORK:
81 lu.needed_locks[locking.LEVEL_NETWORK] = \
83 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84 for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
87 def _CheckGroupLocks(lu):
88 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
91 # Check if node groups for locked instances are still correct
92 for instance_name in owned_instances:
93 CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
95 def _GetQueryData(self, lu):
96 """Computes the list of instances and their attributes.
99 if self.do_grouplocks:
100 self._CheckGroupLocks(lu)
102 cluster = lu.cfg.GetClusterInfo()
103 all_info = lu.cfg.GetAllInstancesInfo()
105 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
107 instance_list = [all_info[name] for name in instance_names]
108 nodes = frozenset(itertools.chain(*(inst.all_nodes
109 for inst in instance_list)))
110 hv_list = list(set([inst.hypervisor for inst in instance_list]))
113 wrongnode_inst = set()
115 # Gather data as requested
116 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
118 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
120 result = node_data[name]
122 # offline nodes will be in both lists
123 assert result.fail_msg
124 offline_nodes.append(name)
126 bad_nodes.append(name)
128 for inst in result.payload:
130 if all_info[inst].primary_node == name:
131 live_data.update(result.payload)
133 wrongnode_inst.add(inst)
135 # orphan instance; we don't list it here as we don't
136 # handle this case yet in the output of instance listing
137 logging.warning("Orphan instance '%s' found on node %s",
139 # else no instance is alive
143 if query.IQ_DISKUSAGE in self.requested_data:
144 gmi = ganeti.masterd.instance
145 disk_usage = dict((inst.name,
146 gmi.ComputeDiskSize(inst.disk_template,
147 [{constants.IDISK_SIZE: disk.size}
148 for disk in inst.disks]))
149 for inst in instance_list)
153 if query.IQ_CONSOLE in self.requested_data:
155 for inst in instance_list:
156 if inst.name in live_data:
157 # Instance is running
158 consinfo[inst.name] = GetInstanceConsole(cluster, inst)
160 consinfo[inst.name] = None
161 assert set(consinfo.keys()) == set(instance_names)
165 if query.IQ_NODES in self.requested_data:
166 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
168 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
169 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
170 for uuid in set(map(operator.attrgetter("group"),
176 if query.IQ_NETWORKS in self.requested_data:
177 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
178 for i in instance_list))
179 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
183 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
184 disk_usage, offline_nodes, bad_nodes,
185 live_data, wrongnode_inst, consinfo,
186 nodes, groups, networks)
189 class LUInstanceQuery(NoHooksLU):
190 """Logical unit for querying instances.
193 # pylint: disable=W0142
196 def CheckArguments(self):
197 self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
198 self.op.output_fields, self.op.use_locking)
200 def ExpandNames(self):
201 self.iq.ExpandNames(self)
203 def DeclareLocks(self, level):
204 self.iq.DeclareLocks(self, level)
206 def Exec(self, feedback_fn):
207 return self.iq.OldStyleQuery(self)
210 class LUInstanceQueryData(NoHooksLU):
211 """Query runtime instance data.
216 def ExpandNames(self):
217 self.needed_locks = {}
219 # Use locking if requested or when non-static information is wanted
220 if not (self.op.static or self.op.use_locking):
221 self.LogWarning("Non-static data requested, locks need to be acquired")
222 self.op.use_locking = True
224 if self.op.instances or not self.op.use_locking:
225 # Expand instance names right here
226 self.wanted_names = GetWantedInstances(self, self.op.instances)
228 # Will use acquired locks
229 self.wanted_names = None
231 if self.op.use_locking:
232 self.share_locks = ShareAll()
234 if self.wanted_names is None:
235 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
237 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
239 self.needed_locks[locking.LEVEL_NODEGROUP] = []
240 self.needed_locks[locking.LEVEL_NODE] = []
241 self.needed_locks[locking.LEVEL_NETWORK] = []
242 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
244 def DeclareLocks(self, level):
245 if self.op.use_locking:
246 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
247 if level == locking.LEVEL_NODEGROUP:
249 # Lock all groups used by instances optimistically; this requires going
250 # via the node before it's locked, requiring verification later on
251 self.needed_locks[locking.LEVEL_NODEGROUP] = \
253 for instance_name in owned_instances
255 self.cfg.GetInstanceNodeGroups(instance_name))
257 elif level == locking.LEVEL_NODE:
258 self._LockInstancesNodes()
260 elif level == locking.LEVEL_NETWORK:
261 self.needed_locks[locking.LEVEL_NETWORK] = \
263 for instance_name in owned_instances
265 self.cfg.GetInstanceNetworks(instance_name))
267 def CheckPrereq(self):
268 """Check prerequisites.
270 This only checks the optional instance list against the existing names.
273 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
274 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
275 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
276 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
278 if self.wanted_names is None:
279 assert self.op.use_locking, "Locking was not used"
280 self.wanted_names = owned_instances
282 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
284 if self.op.use_locking:
285 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
288 assert not (owned_instances or owned_groups or
289 owned_nodes or owned_networks)
291 self.wanted_instances = instances.values()
293 def _ComputeBlockdevStatus(self, node, instance, dev):
294 """Returns the status of a block device
297 if self.op.static or not node:
300 self.cfg.SetDiskID(dev, node)
302 result = self.rpc.call_blockdev_find(node, dev)
306 result.Raise("Can't compute disk status for %s" % instance.name)
308 status = result.payload
312 return (status.dev_path, status.major, status.minor,
313 status.sync_percent, status.estimated_time,
314 status.is_degraded, status.ldisk_status)
316 def _ComputeDiskStatus(self, instance, snode, dev):
317 """Compute block device status.
320 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
322 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
324 def _ComputeDiskStatusInner(self, instance, snode, dev):
325 """Compute block device status.
327 @attention: The device has to be annotated already.
330 if dev.dev_type in constants.LDS_DRBD:
331 # we change the snode then (otherwise we use the one passed in)
332 if dev.logical_id[0] == instance.primary_node:
333 snode = dev.logical_id[1]
335 snode = dev.logical_id[0]
337 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
339 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
342 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
349 "iv_name": dev.iv_name,
350 "dev_type": dev.dev_type,
351 "logical_id": dev.logical_id,
352 "physical_id": dev.physical_id,
353 "pstatus": dev_pstatus,
354 "sstatus": dev_sstatus,
355 "children": dev_children,
362 def Exec(self, feedback_fn):
363 """Gather and return data"""
366 cluster = self.cfg.GetClusterInfo()
368 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
369 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
371 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
372 for node in nodes.values()))
374 group2name_fn = lambda uuid: groups[uuid].name
375 for instance in self.wanted_instances:
376 pnode = nodes[instance.primary_node]
378 if self.op.static or pnode.offline:
381 self.LogWarning("Primary node %s is marked offline, returning static"
382 " information only for instance %s" %
383 (pnode.name, instance.name))
385 remote_info = self.rpc.call_instance_info(instance.primary_node,
388 remote_info.Raise("Error checking node %s" % instance.primary_node)
389 remote_info = remote_info.payload
390 if remote_info and "state" in remote_info:
393 if instance.admin_state == constants.ADMINST_UP:
394 remote_state = "down"
396 remote_state = instance.admin_state
398 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
401 snodes_group_uuids = [nodes[snode_name].group
402 for snode_name in instance.secondary_nodes]
404 result[instance.name] = {
405 "name": instance.name,
406 "config_state": instance.admin_state,
407 "run_state": remote_state,
408 "pnode": instance.primary_node,
409 "pnode_group_uuid": pnode.group,
410 "pnode_group_name": group2name_fn(pnode.group),
411 "snodes": instance.secondary_nodes,
412 "snodes_group_uuids": snodes_group_uuids,
413 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
415 # this happens to be the same format used for hooks
416 "nics": NICListToTuple(self, instance.nics),
417 "disk_template": instance.disk_template,
419 "hypervisor": instance.hypervisor,
420 "network_port": instance.network_port,
421 "hv_instance": instance.hvparams,
422 "hv_actual": cluster.FillHV(instance, skip_globals=True),
423 "be_instance": instance.beparams,
424 "be_actual": cluster.FillBE(instance),
425 "os_instance": instance.osparams,
426 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
427 "serial_no": instance.serial_no,
428 "mtime": instance.mtime,
429 "ctime": instance.ctime,
430 "uuid": instance.uuid,