Use hvparams in GetAllInstancesInfo
[ganeti-local] / lib / cmdlib / instance_query.py
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Logical units for querying instances."""
23
24 import itertools
25 import logging
26 import operator
27
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35   CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
38
39 import ganeti.masterd.instance
40
41
42 class InstanceQuery(QueryBase):
43   FIELDS = query.INSTANCE_FIELDS
44
45   def ExpandNames(self, lu):
46     lu.needed_locks = {}
47     lu.share_locks = ShareAll()
48
49     if self.names:
50       self.wanted = GetWantedInstances(lu, self.names)
51     else:
52       self.wanted = locking.ALL_SET
53
54     self.do_locking = (self.use_locking and
55                        query.IQ_LIVE in self.requested_data)
56     if self.do_locking:
57       lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58       lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59       lu.needed_locks[locking.LEVEL_NODE] = []
60       lu.needed_locks[locking.LEVEL_NETWORK] = []
61       lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62
63     self.do_grouplocks = (self.do_locking and
64                           query.IQ_NODES in self.requested_data)
65
66   def DeclareLocks(self, lu, level):
67     if self.do_locking:
68       if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69         assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70
71         # Lock all groups used by instances optimistically; this requires going
72         # via the node before it's locked, requiring verification later on
73         lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74           set(group_uuid
75               for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76               for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77       elif level == locking.LEVEL_NODE:
78         lu._LockInstancesNodes() # pylint: disable=W0212
79
80       elif level == locking.LEVEL_NETWORK:
81         lu.needed_locks[locking.LEVEL_NETWORK] = \
82           frozenset(net_uuid
83                     for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84                     for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
85
86   @staticmethod
87   def _CheckGroupLocks(lu):
88     owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89     owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
90
91     # Check if node groups for locked instances are still correct
92     for instance_name in owned_instances:
93       CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
94
95   def _GetQueryData(self, lu):
96     """Computes the list of instances and their attributes.
97
98     """
99     if self.do_grouplocks:
100       self._CheckGroupLocks(lu)
101
102     cluster = lu.cfg.GetClusterInfo()
103     all_info = lu.cfg.GetAllInstancesInfo()
104
105     instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106
107     instance_list = [all_info[name] for name in instance_names]
108     nodes = frozenset(itertools.chain(*(inst.all_nodes
109                                         for inst in instance_list)))
110     hv_list = list(set([inst.hypervisor for inst in instance_list]))
111     bad_nodes = []
112     offline_nodes = []
113     wrongnode_inst = set()
114
115     # Gather data as requested
116     if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117       live_data = {}
118       node_data = lu.rpc.call_all_instances_info(nodes, hv_list,
119                                                  cluster.hvparams)
120       for name in nodes:
121         result = node_data[name]
122         if result.offline:
123           # offline nodes will be in both lists
124           assert result.fail_msg
125           offline_nodes.append(name)
126         if result.fail_msg:
127           bad_nodes.append(name)
128         elif result.payload:
129           for inst in result.payload:
130             if inst in all_info:
131               if all_info[inst].primary_node == name:
132                 live_data.update(result.payload)
133               else:
134                 wrongnode_inst.add(inst)
135             else:
136               # orphan instance; we don't list it here as we don't
137               # handle this case yet in the output of instance listing
138               logging.warning("Orphan instance '%s' found on node %s",
139                               inst, name)
140               # else no instance is alive
141     else:
142       live_data = {}
143
144     if query.IQ_DISKUSAGE in self.requested_data:
145       gmi = ganeti.masterd.instance
146       disk_usage = dict((inst.name,
147                          gmi.ComputeDiskSize(inst.disk_template,
148                                              [{constants.IDISK_SIZE: disk.size}
149                                               for disk in inst.disks]))
150                         for inst in instance_list)
151     else:
152       disk_usage = None
153
154     if query.IQ_CONSOLE in self.requested_data:
155       consinfo = {}
156       for inst in instance_list:
157         if inst.name in live_data:
158           # Instance is running
159           consinfo[inst.name] = GetInstanceConsole(cluster, inst)
160         else:
161           consinfo[inst.name] = None
162       assert set(consinfo.keys()) == set(instance_names)
163     else:
164       consinfo = None
165
166     if query.IQ_NODES in self.requested_data:
167       node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
168                                             instance_list)))
169       nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
170       groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
171                     for uuid in set(map(operator.attrgetter("group"),
172                                         nodes.values())))
173     else:
174       nodes = None
175       groups = None
176
177     if query.IQ_NETWORKS in self.requested_data:
178       net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
179                                     for i in instance_list))
180       networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
181     else:
182       networks = None
183
184     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
185                                    disk_usage, offline_nodes, bad_nodes,
186                                    live_data, wrongnode_inst, consinfo,
187                                    nodes, groups, networks)
188
189
190 class LUInstanceQuery(NoHooksLU):
191   """Logical unit for querying instances.
192
193   """
194   # pylint: disable=W0142
195   REQ_BGL = False
196
197   def CheckArguments(self):
198     self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
199                              self.op.output_fields, self.op.use_locking)
200
201   def ExpandNames(self):
202     self.iq.ExpandNames(self)
203
204   def DeclareLocks(self, level):
205     self.iq.DeclareLocks(self, level)
206
207   def Exec(self, feedback_fn):
208     return self.iq.OldStyleQuery(self)
209
210
211 class LUInstanceQueryData(NoHooksLU):
212   """Query runtime instance data.
213
214   """
215   REQ_BGL = False
216
217   def ExpandNames(self):
218     self.needed_locks = {}
219
220     # Use locking if requested or when non-static information is wanted
221     if not (self.op.static or self.op.use_locking):
222       self.LogWarning("Non-static data requested, locks need to be acquired")
223       self.op.use_locking = True
224
225     if self.op.instances or not self.op.use_locking:
226       # Expand instance names right here
227       self.wanted_names = GetWantedInstances(self, self.op.instances)
228     else:
229       # Will use acquired locks
230       self.wanted_names = None
231
232     if self.op.use_locking:
233       self.share_locks = ShareAll()
234
235       if self.wanted_names is None:
236         self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
237       else:
238         self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
239
240       self.needed_locks[locking.LEVEL_NODEGROUP] = []
241       self.needed_locks[locking.LEVEL_NODE] = []
242       self.needed_locks[locking.LEVEL_NETWORK] = []
243       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
244
245   def DeclareLocks(self, level):
246     if self.op.use_locking:
247       owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
248       if level == locking.LEVEL_NODEGROUP:
249
250         # Lock all groups used by instances optimistically; this requires going
251         # via the node before it's locked, requiring verification later on
252         self.needed_locks[locking.LEVEL_NODEGROUP] = \
253           frozenset(group_uuid
254                     for instance_name in owned_instances
255                     for group_uuid in
256                     self.cfg.GetInstanceNodeGroups(instance_name))
257
258       elif level == locking.LEVEL_NODE:
259         self._LockInstancesNodes()
260
261       elif level == locking.LEVEL_NETWORK:
262         self.needed_locks[locking.LEVEL_NETWORK] = \
263           frozenset(net_uuid
264                     for instance_name in owned_instances
265                     for net_uuid in
266                     self.cfg.GetInstanceNetworks(instance_name))
267
268   def CheckPrereq(self):
269     """Check prerequisites.
270
271     This only checks the optional instance list against the existing names.
272
273     """
274     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
275     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
276     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
277     owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
278
279     if self.wanted_names is None:
280       assert self.op.use_locking, "Locking was not used"
281       self.wanted_names = owned_instances
282
283     instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
284
285     if self.op.use_locking:
286       CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
287                                None)
288     else:
289       assert not (owned_instances or owned_groups or
290                   owned_nodes or owned_networks)
291
292     self.wanted_instances = instances.values()
293
294   def _ComputeBlockdevStatus(self, node, instance, dev):
295     """Returns the status of a block device
296
297     """
298     if self.op.static or not node:
299       return None
300
301     self.cfg.SetDiskID(dev, node)
302
303     result = self.rpc.call_blockdev_find(node, dev)
304     if result.offline:
305       return None
306
307     result.Raise("Can't compute disk status for %s" % instance.name)
308
309     status = result.payload
310     if status is None:
311       return None
312
313     return (status.dev_path, status.major, status.minor,
314             status.sync_percent, status.estimated_time,
315             status.is_degraded, status.ldisk_status)
316
317   def _ComputeDiskStatus(self, instance, snode, dev):
318     """Compute block device status.
319
320     """
321     (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
322
323     return self._ComputeDiskStatusInner(instance, snode, anno_dev)
324
325   def _ComputeDiskStatusInner(self, instance, snode, dev):
326     """Compute block device status.
327
328     @attention: The device has to be annotated already.
329
330     """
331     if dev.dev_type in constants.LDS_DRBD:
332       # we change the snode then (otherwise we use the one passed in)
333       if dev.logical_id[0] == instance.primary_node:
334         snode = dev.logical_id[1]
335       else:
336         snode = dev.logical_id[0]
337
338     dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
339                                               instance, dev)
340     dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
341
342     if dev.children:
343       dev_children = map(compat.partial(self._ComputeDiskStatusInner,
344                                         instance, snode),
345                          dev.children)
346     else:
347       dev_children = []
348
349     return {
350       "iv_name": dev.iv_name,
351       "dev_type": dev.dev_type,
352       "logical_id": dev.logical_id,
353       "physical_id": dev.physical_id,
354       "pstatus": dev_pstatus,
355       "sstatus": dev_sstatus,
356       "children": dev_children,
357       "mode": dev.mode,
358       "size": dev.size,
359       "spindles": dev.spindles,
360       "name": dev.name,
361       "uuid": dev.uuid,
362       }
363
364   def Exec(self, feedback_fn):
365     """Gather and return data"""
366     result = {}
367
368     cluster = self.cfg.GetClusterInfo()
369
370     node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
371     nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
372
373     groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
374                                                  for node in nodes.values()))
375
376     group2name_fn = lambda uuid: groups[uuid].name
377     for instance in self.wanted_instances:
378       pnode = nodes[instance.primary_node]
379
380       if self.op.static or pnode.offline:
381         remote_state = None
382         if pnode.offline:
383           self.LogWarning("Primary node %s is marked offline, returning static"
384                           " information only for instance %s" %
385                           (pnode.name, instance.name))
386       else:
387         remote_info = self.rpc.call_instance_info(
388             instance.primary_node, instance.name, instance.hypervisor,
389             cluster.hvparams[instance.hypervisor])
390         remote_info.Raise("Error checking node %s" % instance.primary_node)
391         remote_info = remote_info.payload
392         if remote_info and "state" in remote_info:
393           remote_state = "up"
394         else:
395           if instance.admin_state == constants.ADMINST_UP:
396             remote_state = "down"
397           else:
398             remote_state = instance.admin_state
399
400       disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
401                   instance.disks)
402
403       snodes_group_uuids = [nodes[snode_name].group
404                             for snode_name in instance.secondary_nodes]
405
406       result[instance.name] = {
407         "name": instance.name,
408         "config_state": instance.admin_state,
409         "run_state": remote_state,
410         "pnode": instance.primary_node,
411         "pnode_group_uuid": pnode.group,
412         "pnode_group_name": group2name_fn(pnode.group),
413         "snodes": instance.secondary_nodes,
414         "snodes_group_uuids": snodes_group_uuids,
415         "snodes_group_names": map(group2name_fn, snodes_group_uuids),
416         "os": instance.os,
417         # this happens to be the same format used for hooks
418         "nics": NICListToTuple(self, instance.nics),
419         "disk_template": instance.disk_template,
420         "disks": disks,
421         "hypervisor": instance.hypervisor,
422         "network_port": instance.network_port,
423         "hv_instance": instance.hvparams,
424         "hv_actual": cluster.FillHV(instance, skip_globals=True),
425         "be_instance": instance.beparams,
426         "be_actual": cluster.FillBE(instance),
427         "os_instance": instance.osparams,
428         "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
429         "serial_no": instance.serial_no,
430         "mtime": instance.mtime,
431         "ctime": instance.ctime,
432         "uuid": instance.uuid,
433         }
434
435     return result