Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 44ffd981

History | View | Annotate | Download (16.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25
import logging
26
import operator
27

    
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35
  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import NICListToTuple
38
from ganeti.hypervisor import hv_base
39

    
40
import ganeti.masterd.instance
41

    
42

    
43
class InstanceQuery(QueryBase):
44
  FIELDS = query.INSTANCE_FIELDS
45

    
46
  def ExpandNames(self, lu):
47
    lu.needed_locks = {}
48
    lu.share_locks = ShareAll()
49

    
50
    if self.names:
51
      (_, self.wanted) = GetWantedInstances(lu, self.names)
52
    else:
53
      self.wanted = locking.ALL_SET
54

    
55
    self.do_locking = (self.use_locking and
56
                       query.IQ_LIVE in self.requested_data)
57
    if self.do_locking:
58
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
59
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
60
      lu.needed_locks[locking.LEVEL_NODE] = []
61
      lu.needed_locks[locking.LEVEL_NETWORK] = []
62
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
63

    
64
    self.do_grouplocks = (self.do_locking and
65
                          query.IQ_NODES in self.requested_data)
66

    
67
  def DeclareLocks(self, lu, level):
68
    if self.do_locking:
69
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
70
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
71

    
72
        # Lock all groups used by instances optimistically; this requires going
73
        # via the node before it's locked, requiring verification later on
74
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
75
          set(group_uuid
76
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
77
              for group_uuid in
78
                lu.cfg.GetInstanceNodeGroups(
79
                  lu.cfg.GetInstanceInfoByName(instance_name).uuid))
80
      elif level == locking.LEVEL_NODE:
81
        lu._LockInstancesNodes() # pylint: disable=W0212
82

    
83
      elif level == locking.LEVEL_NETWORK:
84
        lu.needed_locks[locking.LEVEL_NETWORK] = \
85
          frozenset(net_uuid
86
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
87
                    for net_uuid in
88
                      lu.cfg.GetInstanceNetworks(
89
                        lu.cfg.GetInstanceInfoByName(instance_name).uuid))
90

    
91
  @staticmethod
92
  def _CheckGroupLocks(lu):
93
    owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
94
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
95

    
96
    # Check if node groups for locked instances are still correct
97
    for instance_name in owned_instance_names:
98
      instance = lu.cfg.GetInstanceInfoByName(instance_name)
99
      CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups)
100

    
101
  def _GetQueryData(self, lu):
102
    """Computes the list of instances and their attributes.
103

104
    """
105
    if self.do_grouplocks:
106
      self._CheckGroupLocks(lu)
107

    
108
    cluster = lu.cfg.GetClusterInfo()
109
    insts_by_name = dict((inst.name, inst) for
110
                         inst in lu.cfg.GetAllInstancesInfo().values())
111

    
112
    instance_names = self._GetNames(lu, insts_by_name.keys(),
113
                                    locking.LEVEL_INSTANCE)
114

    
115
    instance_list = [insts_by_name[node] for node in instance_names]
116
    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
117
                                             for inst in instance_list)))
118
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
119
    bad_node_uuids = []
120
    offline_node_uuids = []
121
    wrongnode_inst_uuids = set()
122

    
123
    # Gather data as requested
124
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
125
      live_data = {}
126
      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
127
                                                 cluster.hvparams)
128
      for node_uuid in node_uuids:
129
        result = node_data[node_uuid]
130
        if result.offline:
131
          # offline nodes will be in both lists
132
          assert result.fail_msg
133
          offline_node_uuids.append(node_uuid)
134
        if result.fail_msg:
135
          bad_node_uuids.append(node_uuid)
136
        elif result.payload:
137
          for inst_name in result.payload:
138
            if inst_name in insts_by_name:
139
              instance = insts_by_name[inst_name]
140
              if instance.primary_node == node_uuid:
141
                for iname in result.payload:
142
                  live_data[insts_by_name[iname].uuid] = result.payload[iname]
143
              else:
144
                wrongnode_inst_uuids.add(instance.uuid)
145
            else:
146
              # orphan instance; we don't list it here as we don't
147
              # handle this case yet in the output of instance listing
148
              logging.warning("Orphan instance '%s' found on node %s",
149
                              inst_name, lu.cfg.GetNodeName(node_uuid))
150
              # else no instance is alive
151
    else:
152
      live_data = {}
153

    
154
    if query.IQ_DISKUSAGE in self.requested_data:
155
      gmi = ganeti.masterd.instance
156
      disk_usage = dict((inst.uuid,
157
                         gmi.ComputeDiskSize(inst.disk_template,
158
                                             [{constants.IDISK_SIZE: disk.size}
159
                                              for disk in inst.disks]))
160
                        for inst in instance_list)
161
    else:
162
      disk_usage = None
163

    
164
    if query.IQ_CONSOLE in self.requested_data:
165
      consinfo = {}
166
      for inst in instance_list:
167
        if inst.uuid in live_data:
168
          # Instance is running
169
          node = lu.cfg.GetNodeInfo(inst.primary_node)
170
          group = lu.cfg.GetNodeGroup(node.group)
171
          consinfo[inst.uuid] = \
172
            GetInstanceConsole(cluster, inst, node, group)
173
        else:
174
          consinfo[inst.uuid] = None
175
    else:
176
      consinfo = None
177

    
178
    if query.IQ_NODES in self.requested_data:
179
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
180
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
181
                    for uuid in set(map(operator.attrgetter("group"),
182
                                        nodes.values())))
183
    else:
184
      nodes = None
185
      groups = None
186

    
187
    if query.IQ_NETWORKS in self.requested_data:
188
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
189
                                    for i in instance_list))
190
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
191
    else:
192
      networks = None
193

    
194
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
195
                                   disk_usage, offline_node_uuids,
196
                                   bad_node_uuids, live_data,
197
                                   wrongnode_inst_uuids, consinfo, nodes,
198
                                   groups, networks)
199

    
200

    
201
class LUInstanceQuery(NoHooksLU):
202
  """Logical unit for querying instances.
203

204
  """
205
  # pylint: disable=W0142
206
  REQ_BGL = False
207

    
208
  def CheckArguments(self):
209
    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
210
                             self.op.output_fields, self.op.use_locking)
211

    
212
  def ExpandNames(self):
213
    self.iq.ExpandNames(self)
214

    
215
  def DeclareLocks(self, level):
216
    self.iq.DeclareLocks(self, level)
217

    
218
  def Exec(self, feedback_fn):
219
    return self.iq.OldStyleQuery(self)
220

    
221

    
222
class LUInstanceQueryData(NoHooksLU):
223
  """Query runtime instance data.
224

225
  """
226
  REQ_BGL = False
227

    
228
  def ExpandNames(self):
229
    self.needed_locks = {}
230

    
231
    # Use locking if requested or when non-static information is wanted
232
    if not (self.op.static or self.op.use_locking):
233
      self.LogWarning("Non-static data requested, locks need to be acquired")
234
      self.op.use_locking = True
235

    
236
    if self.op.instances or not self.op.use_locking:
237
      # Expand instance names right here
238
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
239
    else:
240
      # Will use acquired locks
241
      self.wanted_names = None
242

    
243
    if self.op.use_locking:
244
      self.share_locks = ShareAll()
245

    
246
      if self.wanted_names is None:
247
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
248
      else:
249
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
250

    
251
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
252
      self.needed_locks[locking.LEVEL_NODE] = []
253
      self.needed_locks[locking.LEVEL_NETWORK] = []
254
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
255

    
256
  def DeclareLocks(self, level):
257
    if self.op.use_locking:
258
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
259
                               self.owned_locks(locking.LEVEL_INSTANCE)))
260
      if level == locking.LEVEL_NODEGROUP:
261

    
262
        # Lock all groups used by instances optimistically; this requires going
263
        # via the node before it's locked, requiring verification later on
264
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
265
          frozenset(group_uuid
266
                    for instance_uuid in owned_instances.keys()
267
                    for group_uuid in
268
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
269

    
270
      elif level == locking.LEVEL_NODE:
271
        self._LockInstancesNodes()
272

    
273
      elif level == locking.LEVEL_NETWORK:
274
        self.needed_locks[locking.LEVEL_NETWORK] = \
275
          frozenset(net_uuid
276
                    for instance_uuid in owned_instances.keys()
277
                    for net_uuid in
278
                    self.cfg.GetInstanceNetworks(instance_uuid))
279

    
280
  def CheckPrereq(self):
281
    """Check prerequisites.
282

283
    This only checks the optional instance list against the existing names.
284

285
    """
286
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
287
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
288
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
289
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
290

    
291
    if self.wanted_names is None:
292
      assert self.op.use_locking, "Locking was not used"
293
      self.wanted_names = owned_instances
294

    
295
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
296

    
297
    if self.op.use_locking:
298
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
299
                               owned_node_uuids, None)
300
    else:
301
      assert not (owned_instances or owned_groups or
302
                  owned_node_uuids or owned_networks)
303

    
304
    self.wanted_instances = instances.values()
305

    
306
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
307
    """Returns the status of a block device
308

309
    """
310
    if self.op.static or not node_uuid:
311
      return None
312

    
313
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
314
    if result.offline:
315
      return None
316

    
317
    result.Raise("Can't compute disk status for %s" % instance.name)
318

    
319
    status = result.payload
320
    if status is None:
321
      return None
322

    
323
    return (status.dev_path, status.major, status.minor,
324
            status.sync_percent, status.estimated_time,
325
            status.is_degraded, status.ldisk_status)
326

    
327
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
328
    """Compute block device status.
329

330
    """
331
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
332

    
333
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
334
                                        anno_dev)
335

    
336
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
337
                              dev):
338
    """Compute block device status.
339

340
    @attention: The device has to be annotated already.
341

342
    """
343
    drbd_info = None
344
    if dev.dev_type in constants.DTS_DRBD:
345
      # we change the snode then (otherwise we use the one passed in)
346
      if dev.logical_id[0] == instance.primary_node:
347
        snode_uuid = dev.logical_id[1]
348
      else:
349
        snode_uuid = dev.logical_id[0]
350
      drbd_info = {
351
        "primary_node": node_uuid2name_fn(instance.primary_node),
352
        "primary_minor": dev.logical_id[3],
353
        "secondary_node": node_uuid2name_fn(snode_uuid),
354
        "secondary_minor": dev.logical_id[4],
355
        "port": dev.logical_id[2],
356
        "secret": dev.logical_id[5],
357
      }
358

    
359
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
360
                                              instance, dev)
361
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
362

    
363
    if dev.children:
364
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
365
                                        instance, snode_uuid,
366
                                        node_uuid2name_fn),
367
                         dev.children)
368
    else:
369
      dev_children = []
370

    
371
    return {
372
      "iv_name": dev.iv_name,
373
      "dev_type": dev.dev_type,
374
      "logical_id": dev.logical_id,
375
      "drbd_info": drbd_info,
376
      "pstatus": dev_pstatus,
377
      "sstatus": dev_sstatus,
378
      "children": dev_children,
379
      "mode": dev.mode,
380
      "size": dev.size,
381
      "spindles": dev.spindles,
382
      "name": dev.name,
383
      "uuid": dev.uuid,
384
      }
385

    
386
  def Exec(self, feedback_fn):
387
    """Gather and return data"""
388
    result = {}
389

    
390
    cluster = self.cfg.GetClusterInfo()
391

    
392
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
393
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
394

    
395
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
396
                                                 for node in nodes.values()))
397

    
398
    for instance in self.wanted_instances:
399
      pnode = nodes[instance.primary_node]
400

    
401
      if self.op.static or pnode.offline:
402
        remote_state = None
403
        if pnode.offline:
404
          self.LogWarning("Primary node %s is marked offline, returning static"
405
                          " information only for instance %s" %
406
                          (pnode.name, instance.name))
407
      else:
408
        remote_info = self.rpc.call_instance_info(
409
            instance.primary_node, instance.name, instance.hypervisor,
410
            cluster.hvparams[instance.hypervisor])
411
        remote_info.Raise("Error checking node %s" % pnode.name)
412
        remote_info = remote_info.payload
413
        if remote_info and "state" in remote_info:
414
          if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
415
            remote_state = "user down"
416
          else:
417
            remote_state = "up"
418
        else:
419
          if instance.admin_state == constants.ADMINST_UP:
420
            remote_state = "down"
421
          else:
422
            remote_state = instance.admin_state
423

    
424
      group2name_fn = lambda uuid: groups[uuid].name
425
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
426

    
427
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
428
                                 node_uuid2name_fn),
429
                  instance.disks)
430

    
431
      snodes_group_uuids = [nodes[snode_uuid].group
432
                            for snode_uuid in instance.secondary_nodes]
433

    
434
      result[instance.name] = {
435
        "name": instance.name,
436
        "config_state": instance.admin_state,
437
        "run_state": remote_state,
438
        "pnode": pnode.name,
439
        "pnode_group_uuid": pnode.group,
440
        "pnode_group_name": group2name_fn(pnode.group),
441
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
442
        "snodes_group_uuids": snodes_group_uuids,
443
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
444
        "os": instance.os,
445
        # this happens to be the same format used for hooks
446
        "nics": NICListToTuple(self, instance.nics),
447
        "disk_template": instance.disk_template,
448
        "disks": disks,
449
        "hypervisor": instance.hypervisor,
450
        "network_port": instance.network_port,
451
        "hv_instance": instance.hvparams,
452
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
453
        "be_instance": instance.beparams,
454
        "be_actual": cluster.FillBE(instance),
455
        "os_instance": instance.osparams,
456
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
457
        "serial_no": instance.serial_no,
458
        "mtime": instance.mtime,
459
        "ctime": instance.ctime,
460
        "uuid": instance.uuid,
461
        }
462

    
463
    return result