4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units for querying instances."""
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
39 import ganeti.masterd.instance
42 class InstanceQuery(QueryBase):
43 FIELDS = query.INSTANCE_FIELDS
45 def ExpandNames(self, lu):
47 lu.share_locks = ShareAll()
50 self.wanted = GetWantedInstances(lu, self.names)
52 self.wanted = locking.ALL_SET
54 self.do_locking = (self.use_locking and
55 query.IQ_LIVE in self.requested_data)
57 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59 lu.needed_locks[locking.LEVEL_NODE] = []
60 lu.needed_locks[locking.LEVEL_NETWORK] = []
61 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
63 self.do_grouplocks = (self.do_locking and
64 query.IQ_NODES in self.requested_data)
66 def DeclareLocks(self, lu, level):
68 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
71 # Lock all groups used by instances optimistically; this requires going
72 # via the node before it's locked, requiring verification later on
73 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
75 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77 elif level == locking.LEVEL_NODE:
78 lu._LockInstancesNodes() # pylint: disable=W0212
80 elif level == locking.LEVEL_NETWORK:
81 lu.needed_locks[locking.LEVEL_NETWORK] = \
83 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84 for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
87 def _CheckGroupLocks(lu):
88 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
91 # Check if node groups for locked instances are still correct
92 for instance_name in owned_instances:
93 CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
95 def _GetQueryData(self, lu):
96 """Computes the list of instances and their attributes.
99 if self.do_grouplocks:
100 self._CheckGroupLocks(lu)
102 cluster = lu.cfg.GetClusterInfo()
103 all_info = lu.cfg.GetAllInstancesInfo()
105 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
107 instance_list = [all_info[name] for name in instance_names]
108 nodes = frozenset(itertools.chain(*(inst.all_nodes
109 for inst in instance_list)))
110 hv_list = list(set([inst.hypervisor for inst in instance_list]))
113 wrongnode_inst = set()
115 # Gather data as requested
116 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
118 node_data = lu.rpc.call_all_instances_info(nodes, hv_list,
121 result = node_data[name]
123 # offline nodes will be in both lists
124 assert result.fail_msg
125 offline_nodes.append(name)
127 bad_nodes.append(name)
129 for inst in result.payload:
131 if all_info[inst].primary_node == name:
132 live_data.update(result.payload)
134 wrongnode_inst.add(inst)
136 # orphan instance; we don't list it here as we don't
137 # handle this case yet in the output of instance listing
138 logging.warning("Orphan instance '%s' found on node %s",
140 # else no instance is alive
144 if query.IQ_DISKUSAGE in self.requested_data:
145 gmi = ganeti.masterd.instance
146 disk_usage = dict((inst.name,
147 gmi.ComputeDiskSize(inst.disk_template,
148 [{constants.IDISK_SIZE: disk.size}
149 for disk in inst.disks]))
150 for inst in instance_list)
154 if query.IQ_CONSOLE in self.requested_data:
156 for inst in instance_list:
157 if inst.name in live_data:
158 # Instance is running
159 consinfo[inst.name] = GetInstanceConsole(cluster, inst)
161 consinfo[inst.name] = None
162 assert set(consinfo.keys()) == set(instance_names)
166 if query.IQ_NODES in self.requested_data:
167 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
169 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
170 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
171 for uuid in set(map(operator.attrgetter("group"),
177 if query.IQ_NETWORKS in self.requested_data:
178 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
179 for i in instance_list))
180 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
184 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
185 disk_usage, offline_nodes, bad_nodes,
186 live_data, wrongnode_inst, consinfo,
187 nodes, groups, networks)
190 class LUInstanceQuery(NoHooksLU):
191 """Logical unit for querying instances.
194 # pylint: disable=W0142
197 def CheckArguments(self):
198 self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
199 self.op.output_fields, self.op.use_locking)
201 def ExpandNames(self):
202 self.iq.ExpandNames(self)
204 def DeclareLocks(self, level):
205 self.iq.DeclareLocks(self, level)
207 def Exec(self, feedback_fn):
208 return self.iq.OldStyleQuery(self)
211 class LUInstanceQueryData(NoHooksLU):
212 """Query runtime instance data.
217 def ExpandNames(self):
218 self.needed_locks = {}
220 # Use locking if requested or when non-static information is wanted
221 if not (self.op.static or self.op.use_locking):
222 self.LogWarning("Non-static data requested, locks need to be acquired")
223 self.op.use_locking = True
225 if self.op.instances or not self.op.use_locking:
226 # Expand instance names right here
227 self.wanted_names = GetWantedInstances(self, self.op.instances)
229 # Will use acquired locks
230 self.wanted_names = None
232 if self.op.use_locking:
233 self.share_locks = ShareAll()
235 if self.wanted_names is None:
236 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
238 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
240 self.needed_locks[locking.LEVEL_NODEGROUP] = []
241 self.needed_locks[locking.LEVEL_NODE] = []
242 self.needed_locks[locking.LEVEL_NETWORK] = []
243 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
245 def DeclareLocks(self, level):
246 if self.op.use_locking:
247 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
248 if level == locking.LEVEL_NODEGROUP:
250 # Lock all groups used by instances optimistically; this requires going
251 # via the node before it's locked, requiring verification later on
252 self.needed_locks[locking.LEVEL_NODEGROUP] = \
254 for instance_name in owned_instances
256 self.cfg.GetInstanceNodeGroups(instance_name))
258 elif level == locking.LEVEL_NODE:
259 self._LockInstancesNodes()
261 elif level == locking.LEVEL_NETWORK:
262 self.needed_locks[locking.LEVEL_NETWORK] = \
264 for instance_name in owned_instances
266 self.cfg.GetInstanceNetworks(instance_name))
268 def CheckPrereq(self):
269 """Check prerequisites.
271 This only checks the optional instance list against the existing names.
274 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
275 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
276 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
277 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
279 if self.wanted_names is None:
280 assert self.op.use_locking, "Locking was not used"
281 self.wanted_names = owned_instances
283 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
285 if self.op.use_locking:
286 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
289 assert not (owned_instances or owned_groups or
290 owned_nodes or owned_networks)
292 self.wanted_instances = instances.values()
294 def _ComputeBlockdevStatus(self, node, instance, dev):
295 """Returns the status of a block device
298 if self.op.static or not node:
301 self.cfg.SetDiskID(dev, node)
303 result = self.rpc.call_blockdev_find(node, dev)
307 result.Raise("Can't compute disk status for %s" % instance.name)
309 status = result.payload
313 return (status.dev_path, status.major, status.minor,
314 status.sync_percent, status.estimated_time,
315 status.is_degraded, status.ldisk_status)
317 def _ComputeDiskStatus(self, instance, snode, dev):
318 """Compute block device status.
321 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
323 return self._ComputeDiskStatusInner(instance, snode, anno_dev)
325 def _ComputeDiskStatusInner(self, instance, snode, dev):
326 """Compute block device status.
328 @attention: The device has to be annotated already.
331 if dev.dev_type in constants.LDS_DRBD:
332 # we change the snode then (otherwise we use the one passed in)
333 if dev.logical_id[0] == instance.primary_node:
334 snode = dev.logical_id[1]
336 snode = dev.logical_id[0]
338 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
340 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
343 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
350 "iv_name": dev.iv_name,
351 "dev_type": dev.dev_type,
352 "logical_id": dev.logical_id,
353 "physical_id": dev.physical_id,
354 "pstatus": dev_pstatus,
355 "sstatus": dev_sstatus,
356 "children": dev_children,
359 "spindles": dev.spindles,
364 def Exec(self, feedback_fn):
365 """Gather and return data"""
368 cluster = self.cfg.GetClusterInfo()
370 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
371 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
373 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
374 for node in nodes.values()))
376 group2name_fn = lambda uuid: groups[uuid].name
377 for instance in self.wanted_instances:
378 pnode = nodes[instance.primary_node]
380 if self.op.static or pnode.offline:
383 self.LogWarning("Primary node %s is marked offline, returning static"
384 " information only for instance %s" %
385 (pnode.name, instance.name))
387 remote_info = self.rpc.call_instance_info(
388 instance.primary_node, instance.name, instance.hypervisor,
389 cluster.hvparams[instance.hypervisor])
390 remote_info.Raise("Error checking node %s" % instance.primary_node)
391 remote_info = remote_info.payload
392 if remote_info and "state" in remote_info:
395 if instance.admin_state == constants.ADMINST_UP:
396 remote_state = "down"
398 remote_state = instance.admin_state
400 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
403 snodes_group_uuids = [nodes[snode_name].group
404 for snode_name in instance.secondary_nodes]
406 result[instance.name] = {
407 "name": instance.name,
408 "config_state": instance.admin_state,
409 "run_state": remote_state,
410 "pnode": instance.primary_node,
411 "pnode_group_uuid": pnode.group,
412 "pnode_group_name": group2name_fn(pnode.group),
413 "snodes": instance.secondary_nodes,
414 "snodes_group_uuids": snodes_group_uuids,
415 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
417 # this happens to be the same format used for hooks
418 "nics": NICListToTuple(self, instance.nics),
419 "disk_template": instance.disk_template,
421 "hypervisor": instance.hypervisor,
422 "network_port": instance.network_port,
423 "hv_instance": instance.hvparams,
424 "hv_actual": cluster.FillHV(instance, skip_globals=True),
425 "be_instance": instance.beparams,
426 "be_actual": cluster.FillBE(instance),
427 "os_instance": instance.osparams,
428 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
429 "serial_no": instance.serial_no,
430 "mtime": instance.mtime,
431 "ctime": instance.ctime,
432 "uuid": instance.uuid,