Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 0c3d9c7c

History | View | Annotate | Download (16.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25
import logging
26
import operator
27

    
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35
  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import NICListToTuple
38

    
39
import ganeti.masterd.instance
40

    
41

    
42
class InstanceQuery(QueryBase):
43
  FIELDS = query.INSTANCE_FIELDS
44

    
45
  def ExpandNames(self, lu):
46
    lu.needed_locks = {}
47
    lu.share_locks = ShareAll()
48

    
49
    if self.names:
50
      (_, self.wanted) = GetWantedInstances(lu, self.names)
51
    else:
52
      self.wanted = locking.ALL_SET
53

    
54
    self.do_locking = (self.use_locking and
55
                       query.IQ_LIVE in self.requested_data)
56
    if self.do_locking:
57
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59
      lu.needed_locks[locking.LEVEL_NODE] = []
60
      lu.needed_locks[locking.LEVEL_NETWORK] = []
61
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62

    
63
    self.do_grouplocks = (self.do_locking and
64
                          query.IQ_NODES in self.requested_data)
65

    
66
  def DeclareLocks(self, lu, level):
67
    if self.do_locking:
68
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70

    
71
        # Lock all groups used by instances optimistically; this requires going
72
        # via the node before it's locked, requiring verification later on
73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74
          set(group_uuid
75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in
77
                lu.cfg.GetInstanceNodeGroups(
78
                  lu.cfg.GetInstanceInfoByName(instance_name).uuid))
79
      elif level == locking.LEVEL_NODE:
80
        lu._LockInstancesNodes() # pylint: disable=W0212
81

    
82
      elif level == locking.LEVEL_NETWORK:
83
        lu.needed_locks[locking.LEVEL_NETWORK] = \
84
          frozenset(net_uuid
85
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
86
                    for net_uuid in
87
                      lu.cfg.GetInstanceNetworks(
88
                        lu.cfg.GetInstanceInfoByName(instance_name).uuid))
89

    
90
  @staticmethod
91
  def _CheckGroupLocks(lu):
92
    owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
93
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
94

    
95
    # Check if node groups for locked instances are still correct
96
    for instance_name in owned_instance_names:
97
      instance = lu.cfg.GetInstanceInfoByName(instance_name)
98
      CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups)
99

    
100
  def _GetQueryData(self, lu):
101
    """Computes the list of instances and their attributes.
102

103
    """
104
    if self.do_grouplocks:
105
      self._CheckGroupLocks(lu)
106

    
107
    cluster = lu.cfg.GetClusterInfo()
108
    insts_by_name = dict((inst.name, inst) for
109
                         inst in lu.cfg.GetAllInstancesInfo().values())
110

    
111
    instance_names = self._GetNames(lu, insts_by_name.keys(),
112
                                    locking.LEVEL_INSTANCE)
113

    
114
    instance_list = [insts_by_name[node] for node in instance_names]
115
    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
116
                                             for inst in instance_list)))
117
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
118
    bad_node_uuids = []
119
    offline_node_uuids = []
120
    wrongnode_inst_uuids = set()
121

    
122
    # Gather data as requested
123
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
124
      live_data = {}
125
      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
126
                                                 cluster.hvparams)
127
      for node_uuid in node_uuids:
128
        result = node_data[node_uuid]
129
        if result.offline:
130
          # offline nodes will be in both lists
131
          assert result.fail_msg
132
          offline_node_uuids.append(node_uuid)
133
        if result.fail_msg:
134
          bad_node_uuids.append(node_uuid)
135
        elif result.payload:
136
          for inst_name in result.payload:
137
            if inst_name in insts_by_name:
138
              instance = insts_by_name[inst_name]
139
              if instance.primary_node == node_uuid:
140
                for iname in result.payload:
141
                  live_data[insts_by_name[iname].uuid] = result.payload[iname]
142
              else:
143
                wrongnode_inst_uuids.add(instance.uuid)
144
            else:
145
              # orphan instance; we don't list it here as we don't
146
              # handle this case yet in the output of instance listing
147
              logging.warning("Orphan instance '%s' found on node %s",
148
                              inst_name, lu.cfg.GetNodeName(node_uuid))
149
              # else no instance is alive
150
    else:
151
      live_data = {}
152

    
153
    if query.IQ_DISKUSAGE in self.requested_data:
154
      gmi = ganeti.masterd.instance
155
      disk_usage = dict((inst.uuid,
156
                         gmi.ComputeDiskSize(inst.disk_template,
157
                                             [{constants.IDISK_SIZE: disk.size}
158
                                              for disk in inst.disks]))
159
                        for inst in instance_list)
160
    else:
161
      disk_usage = None
162

    
163
    if query.IQ_CONSOLE in self.requested_data:
164
      consinfo = {}
165
      for inst in instance_list:
166
        if inst.uuid in live_data:
167
          # Instance is running
168
          consinfo[inst.uuid] = \
169
            GetInstanceConsole(cluster, inst,
170
                               lu.cfg.GetNodeInfo(inst.primary_node))
171
        else:
172
          consinfo[inst.uuid] = None
173
    else:
174
      consinfo = None
175

    
176
    if query.IQ_NODES in self.requested_data:
177
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
178
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
179
                    for uuid in set(map(operator.attrgetter("group"),
180
                                        nodes.values())))
181
    else:
182
      nodes = None
183
      groups = None
184

    
185
    if query.IQ_NETWORKS in self.requested_data:
186
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
187
                                    for i in instance_list))
188
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
189
    else:
190
      networks = None
191

    
192
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
193
                                   disk_usage, offline_node_uuids,
194
                                   bad_node_uuids, live_data,
195
                                   wrongnode_inst_uuids, consinfo, nodes,
196
                                   groups, networks)
197

    
198

    
199
class LUInstanceQuery(NoHooksLU):
200
  """Logical unit for querying instances.
201

202
  """
203
  # pylint: disable=W0142
204
  REQ_BGL = False
205

    
206
  def CheckArguments(self):
207
    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
208
                             self.op.output_fields, self.op.use_locking)
209

    
210
  def ExpandNames(self):
211
    self.iq.ExpandNames(self)
212

    
213
  def DeclareLocks(self, level):
214
    self.iq.DeclareLocks(self, level)
215

    
216
  def Exec(self, feedback_fn):
217
    return self.iq.OldStyleQuery(self)
218

    
219

    
220
class LUInstanceQueryData(NoHooksLU):
221
  """Query runtime instance data.
222

223
  """
224
  REQ_BGL = False
225

    
226
  def ExpandNames(self):
227
    self.needed_locks = {}
228

    
229
    # Use locking if requested or when non-static information is wanted
230
    if not (self.op.static or self.op.use_locking):
231
      self.LogWarning("Non-static data requested, locks need to be acquired")
232
      self.op.use_locking = True
233

    
234
    if self.op.instances or not self.op.use_locking:
235
      # Expand instance names right here
236
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
237
    else:
238
      # Will use acquired locks
239
      self.wanted_names = None
240

    
241
    if self.op.use_locking:
242
      self.share_locks = ShareAll()
243

    
244
      if self.wanted_names is None:
245
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
246
      else:
247
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
248

    
249
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
250
      self.needed_locks[locking.LEVEL_NODE] = []
251
      self.needed_locks[locking.LEVEL_NETWORK] = []
252
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
253

    
254
  def DeclareLocks(self, level):
255
    if self.op.use_locking:
256
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
257
                               self.owned_locks(locking.LEVEL_INSTANCE)))
258
      if level == locking.LEVEL_NODEGROUP:
259

    
260
        # Lock all groups used by instances optimistically; this requires going
261
        # via the node before it's locked, requiring verification later on
262
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
263
          frozenset(group_uuid
264
                    for instance_uuid in owned_instances.keys()
265
                    for group_uuid in
266
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
267

    
268
      elif level == locking.LEVEL_NODE:
269
        self._LockInstancesNodes()
270

    
271
      elif level == locking.LEVEL_NETWORK:
272
        self.needed_locks[locking.LEVEL_NETWORK] = \
273
          frozenset(net_uuid
274
                    for instance_uuid in owned_instances.keys()
275
                    for net_uuid in
276
                    self.cfg.GetInstanceNetworks(instance_uuid))
277

    
278
  def CheckPrereq(self):
279
    """Check prerequisites.
280

281
    This only checks the optional instance list against the existing names.
282

283
    """
284
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
285
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
286
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
287
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
288

    
289
    if self.wanted_names is None:
290
      assert self.op.use_locking, "Locking was not used"
291
      self.wanted_names = owned_instances
292

    
293
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
294

    
295
    if self.op.use_locking:
296
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
297
                               owned_node_uuids, None)
298
    else:
299
      assert not (owned_instances or owned_groups or
300
                  owned_node_uuids or owned_networks)
301

    
302
    self.wanted_instances = instances.values()
303

    
304
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
305
    """Returns the status of a block device
306

307
    """
308
    if self.op.static or not node_uuid:
309
      return None
310

    
311
    self.cfg.SetDiskID(dev, node_uuid)
312
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
313
    if result.offline:
314
      return None
315

    
316
    result.Raise("Can't compute disk status for %s" % instance.name)
317

    
318
    status = result.payload
319
    if status is None:
320
      return None
321

    
322
    return (status.dev_path, status.major, status.minor,
323
            status.sync_percent, status.estimated_time,
324
            status.is_degraded, status.ldisk_status)
325

    
326
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
327
    """Compute block device status.
328

329
    """
330
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
331

    
332
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
333
                                        anno_dev)
334

    
335
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
336
                              dev):
337
    """Compute block device status.
338

339
    @attention: The device has to be annotated already.
340

341
    """
342
    drbd_info = None
343
    if dev.dev_type in constants.LDS_DRBD:
344
      # we change the snode then (otherwise we use the one passed in)
345
      if dev.logical_id[0] == instance.primary_node:
346
        snode_uuid = dev.logical_id[1]
347
      else:
348
        snode_uuid = dev.logical_id[0]
349
      drbd_info = {
350
        "primary_node": node_uuid2name_fn(instance.primary_node),
351
        "primary_minor": dev.logical_id[3],
352
        "secondary_node": node_uuid2name_fn(snode_uuid),
353
        "secondary_minor": dev.logical_id[4],
354
        "port": dev.logical_id[2],
355
        "secret": dev.logical_id[5],
356
      }
357

    
358
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
359
                                              instance, dev)
360
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
361

    
362
    if dev.children:
363
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
364
                                        instance, snode_uuid,
365
                                        node_uuid2name_fn),
366
                         dev.children)
367
    else:
368
      dev_children = []
369

    
370
    return {
371
      "iv_name": dev.iv_name,
372
      "dev_type": dev.dev_type,
373
      "logical_id": dev.logical_id,
374
      "drbd_info": drbd_info,
375
      "physical_id": dev.physical_id,
376
      "pstatus": dev_pstatus,
377
      "sstatus": dev_sstatus,
378
      "children": dev_children,
379
      "mode": dev.mode,
380
      "size": dev.size,
381
      "spindles": dev.spindles,
382
      "name": dev.name,
383
      "uuid": dev.uuid,
384
      }
385

    
386
  def Exec(self, feedback_fn):
387
    """Gather and return data"""
388
    result = {}
389

    
390
    cluster = self.cfg.GetClusterInfo()
391

    
392
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
393
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
394

    
395
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
396
                                                 for node in nodes.values()))
397

    
398
    for instance in self.wanted_instances:
399
      pnode = nodes[instance.primary_node]
400

    
401
      if self.op.static or pnode.offline:
402
        remote_state = None
403
        if pnode.offline:
404
          self.LogWarning("Primary node %s is marked offline, returning static"
405
                          " information only for instance %s" %
406
                          (pnode.name, instance.name))
407
      else:
408
        remote_info = self.rpc.call_instance_info(
409
            instance.primary_node, instance.name, instance.hypervisor,
410
            cluster.hvparams[instance.hypervisor])
411
        remote_info.Raise("Error checking node %s" % pnode.name)
412
        remote_info = remote_info.payload
413
        if remote_info and "state" in remote_info:
414
          remote_state = "up"
415
        else:
416
          if instance.admin_state == constants.ADMINST_UP:
417
            remote_state = "down"
418
          else:
419
            remote_state = instance.admin_state
420

    
421
      group2name_fn = lambda uuid: groups[uuid].name
422
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
423

    
424
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
425
                                 node_uuid2name_fn),
426
                  instance.disks)
427

    
428
      snodes_group_uuids = [nodes[snode_uuid].group
429
                            for snode_uuid in instance.secondary_nodes]
430

    
431
      result[instance.name] = {
432
        "name": instance.name,
433
        "config_state": instance.admin_state,
434
        "run_state": remote_state,
435
        "pnode": pnode.name,
436
        "pnode_group_uuid": pnode.group,
437
        "pnode_group_name": group2name_fn(pnode.group),
438
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
439
        "snodes_group_uuids": snodes_group_uuids,
440
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
441
        "os": instance.os,
442
        # this happens to be the same format used for hooks
443
        "nics": NICListToTuple(self, instance.nics),
444
        "disk_template": instance.disk_template,
445
        "disks": disks,
446
        "hypervisor": instance.hypervisor,
447
        "network_port": instance.network_port,
448
        "hv_instance": instance.hvparams,
449
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
450
        "be_instance": instance.beparams,
451
        "be_actual": cluster.FillBE(instance),
452
        "os_instance": instance.osparams,
453
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
454
        "serial_no": instance.serial_no,
455
        "mtime": instance.mtime,
456
        "ctime": instance.ctime,
457
        "uuid": instance.uuid,
458
        }
459

    
460
    return result