Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 70b634e6

History | View | Annotate | Download (16.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25
import logging
26
import operator
27

    
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35
  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import NICListToTuple
38

    
39
import ganeti.masterd.instance
40

    
41

    
42
class InstanceQuery(QueryBase):
43
  FIELDS = query.INSTANCE_FIELDS
44

    
45
  def ExpandNames(self, lu):
46
    lu.needed_locks = {}
47
    lu.share_locks = ShareAll()
48

    
49
    if self.names:
50
      (_, self.wanted) = GetWantedInstances(lu, self.names)
51
    else:
52
      self.wanted = locking.ALL_SET
53

    
54
    self.do_locking = (self.use_locking and
55
                       query.IQ_LIVE in self.requested_data)
56
    if self.do_locking:
57
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59
      lu.needed_locks[locking.LEVEL_NODE] = []
60
      lu.needed_locks[locking.LEVEL_NETWORK] = []
61
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62

    
63
    self.do_grouplocks = (self.do_locking and
64
                          query.IQ_NODES in self.requested_data)
65

    
66
  def DeclareLocks(self, lu, level):
67
    if self.do_locking:
68
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70

    
71
        # Lock all groups used by instances optimistically; this requires going
72
        # via the node before it's locked, requiring verification later on
73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74
          set(group_uuid
75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in
77
                lu.cfg.GetInstanceNodeGroups(
78
                  lu.cfg.GetInstanceInfoByName(instance_name).uuid))
79
      elif level == locking.LEVEL_NODE:
80
        lu._LockInstancesNodes() # pylint: disable=W0212
81

    
82
      elif level == locking.LEVEL_NETWORK:
83
        lu.needed_locks[locking.LEVEL_NETWORK] = \
84
          frozenset(net_uuid
85
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
86
                    for net_uuid in
87
                      lu.cfg.GetInstanceNetworks(
88
                        lu.cfg.GetInstanceInfoByName(instance_name).uuid))
89

    
90
  @staticmethod
91
  def _CheckGroupLocks(lu):
92
    owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
93
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
94

    
95
    # Check if node groups for locked instances are still correct
96
    for instance_name in owned_instance_names:
97
      instance = lu.cfg.GetInstanceInfoByName(instance_name)
98
      CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups)
99

    
100
  def _GetQueryData(self, lu):
101
    """Computes the list of instances and their attributes.
102

103
    """
104
    if self.do_grouplocks:
105
      self._CheckGroupLocks(lu)
106

    
107
    cluster = lu.cfg.GetClusterInfo()
108
    insts_by_name = dict((inst.name, inst) for
109
                         inst in lu.cfg.GetAllInstancesInfo().values())
110

    
111
    instance_names = self._GetNames(lu, insts_by_name.keys(),
112
                                    locking.LEVEL_INSTANCE)
113

    
114
    instance_list = [insts_by_name[node] for node in instance_names]
115
    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
116
                                             for inst in instance_list)))
117
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
118
    bad_node_uuids = []
119
    offline_node_uuids = []
120
    wrongnode_inst_uuids = set()
121

    
122
    # Gather data as requested
123
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
124
      live_data = {}
125
      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
126
                                                 cluster.hvparams)
127
      for node_uuid in node_uuids:
128
        result = node_data[node_uuid]
129
        if result.offline:
130
          # offline nodes will be in both lists
131
          assert result.fail_msg
132
          offline_node_uuids.append(node_uuid)
133
        if result.fail_msg:
134
          bad_node_uuids.append(node_uuid)
135
        elif result.payload:
136
          for inst_name in result.payload:
137
            if inst_name in insts_by_name:
138
              instance = insts_by_name[inst_name]
139
              if instance.primary_node == node_uuid:
140
                for iname in result.payload:
141
                  live_data[insts_by_name[iname].uuid] = result.payload[iname]
142
              else:
143
                wrongnode_inst_uuids.add(instance.uuid)
144
            else:
145
              # orphan instance; we don't list it here as we don't
146
              # handle this case yet in the output of instance listing
147
              logging.warning("Orphan instance '%s' found on node %s",
148
                              inst_name, lu.cfg.GetNodeName(node_uuid))
149
              # else no instance is alive
150
    else:
151
      live_data = {}
152

    
153
    if query.IQ_DISKUSAGE in self.requested_data:
154
      gmi = ganeti.masterd.instance
155
      disk_usage = dict((inst.uuid,
156
                         gmi.ComputeDiskSize(inst.disk_template,
157
                                             [{constants.IDISK_SIZE: disk.size}
158
                                              for disk in inst.disks]))
159
                        for inst in instance_list)
160
    else:
161
      disk_usage = None
162

    
163
    if query.IQ_CONSOLE in self.requested_data:
164
      consinfo = {}
165
      for inst in instance_list:
166
        if inst.uuid in live_data:
167
          # Instance is running
168
          consinfo[inst.uuid] = \
169
            GetInstanceConsole(cluster, inst,
170
                               lu.cfg.GetNodeInfo(inst.primary_node))
171
        else:
172
          consinfo[inst.uuid] = None
173
    else:
174
      consinfo = None
175

    
176
    if query.IQ_NODES in self.requested_data:
177
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
178
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
179
                    for uuid in set(map(operator.attrgetter("group"),
180
                                        nodes.values())))
181
    else:
182
      nodes = None
183
      groups = None
184

    
185
    if query.IQ_NETWORKS in self.requested_data:
186
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
187
                                    for i in instance_list))
188
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
189
    else:
190
      networks = None
191

    
192
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
193
                                   disk_usage, offline_node_uuids,
194
                                   bad_node_uuids, live_data,
195
                                   wrongnode_inst_uuids, consinfo, nodes,
196
                                   groups, networks)
197

    
198

    
199
class LUInstanceQuery(NoHooksLU):
200
  """Logical unit for querying instances.
201

202
  """
203
  # pylint: disable=W0142
204
  REQ_BGL = False
205

    
206
  def CheckArguments(self):
207
    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
208
                             self.op.output_fields, self.op.use_locking)
209

    
210
  def ExpandNames(self):
211
    self.iq.ExpandNames(self)
212

    
213
  def DeclareLocks(self, level):
214
    self.iq.DeclareLocks(self, level)
215

    
216
  def Exec(self, feedback_fn):
217
    return self.iq.OldStyleQuery(self)
218

    
219

    
220
class LUInstanceQueryData(NoHooksLU):
221
  """Query runtime instance data.
222

223
  """
224
  REQ_BGL = False
225

    
226
  def ExpandNames(self):
227
    self.needed_locks = {}
228

    
229
    # Use locking if requested or when non-static information is wanted
230
    if not (self.op.static or self.op.use_locking):
231
      self.LogWarning("Non-static data requested, locks need to be acquired")
232
      self.op.use_locking = True
233

    
234
    if self.op.instances or not self.op.use_locking:
235
      # Expand instance names right here
236
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
237
    else:
238
      # Will use acquired locks
239
      self.wanted_names = None
240

    
241
    if self.op.use_locking:
242
      self.share_locks = ShareAll()
243

    
244
      if self.wanted_names is None:
245
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
246
      else:
247
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
248

    
249
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
250
      self.needed_locks[locking.LEVEL_NODE] = []
251
      self.needed_locks[locking.LEVEL_NETWORK] = []
252
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
253

    
254
  def DeclareLocks(self, level):
255
    if self.op.use_locking:
256
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
257
                               self.owned_locks(locking.LEVEL_INSTANCE)))
258
      if level == locking.LEVEL_NODEGROUP:
259

    
260
        # Lock all groups used by instances optimistically; this requires going
261
        # via the node before it's locked, requiring verification later on
262
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
263
          frozenset(group_uuid
264
                    for instance_uuid in owned_instances.keys()
265
                    for group_uuid in
266
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
267

    
268
      elif level == locking.LEVEL_NODE:
269
        self._LockInstancesNodes()
270

    
271
      elif level == locking.LEVEL_NETWORK:
272
        self.needed_locks[locking.LEVEL_NETWORK] = \
273
          frozenset(net_uuid
274
                    for instance_uuid in owned_instances.keys()
275
                    for net_uuid in
276
                    self.cfg.GetInstanceNetworks(instance_uuid))
277

    
278
  def CheckPrereq(self):
279
    """Check prerequisites.
280

281
    This only checks the optional instance list against the existing names.
282

283
    """
284
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
285
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
286
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
287
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
288

    
289
    if self.wanted_names is None:
290
      assert self.op.use_locking, "Locking was not used"
291
      self.wanted_names = owned_instances
292

    
293
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
294

    
295
    if self.op.use_locking:
296
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
297
                               owned_node_uuids, None)
298
    else:
299
      assert not (owned_instances or owned_groups or
300
                  owned_node_uuids or owned_networks)
301

    
302
    self.wanted_instances = instances.values()
303

    
304
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
305
    """Returns the status of a block device
306

307
    """
308
    if self.op.static or not node_uuid:
309
      return None
310

    
311
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
312
    if result.offline:
313
      return None
314

    
315
    result.Raise("Can't compute disk status for %s" % instance.name)
316

    
317
    status = result.payload
318
    if status is None:
319
      return None
320

    
321
    return (status.dev_path, status.major, status.minor,
322
            status.sync_percent, status.estimated_time,
323
            status.is_degraded, status.ldisk_status)
324

    
325
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
326
    """Compute block device status.
327

328
    """
329
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
330

    
331
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
332
                                        anno_dev)
333

    
334
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
335
                              dev):
336
    """Compute block device status.
337

338
    @attention: The device has to be annotated already.
339

340
    """
341
    drbd_info = None
342
    if dev.dev_type in constants.DTS_DRBD:
343
      # we change the snode then (otherwise we use the one passed in)
344
      if dev.logical_id[0] == instance.primary_node:
345
        snode_uuid = dev.logical_id[1]
346
      else:
347
        snode_uuid = dev.logical_id[0]
348
      drbd_info = {
349
        "primary_node": node_uuid2name_fn(instance.primary_node),
350
        "primary_minor": dev.logical_id[3],
351
        "secondary_node": node_uuid2name_fn(snode_uuid),
352
        "secondary_minor": dev.logical_id[4],
353
        "port": dev.logical_id[2],
354
        "secret": dev.logical_id[5],
355
      }
356

    
357
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
358
                                              instance, dev)
359
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
360

    
361
    if dev.children:
362
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
363
                                        instance, snode_uuid,
364
                                        node_uuid2name_fn),
365
                         dev.children)
366
    else:
367
      dev_children = []
368

    
369
    return {
370
      "iv_name": dev.iv_name,
371
      "dev_type": dev.dev_type,
372
      "logical_id": dev.logical_id,
373
      "drbd_info": drbd_info,
374
      "pstatus": dev_pstatus,
375
      "sstatus": dev_sstatus,
376
      "children": dev_children,
377
      "mode": dev.mode,
378
      "size": dev.size,
379
      "spindles": dev.spindles,
380
      "name": dev.name,
381
      "uuid": dev.uuid,
382
      }
383

    
384
  def Exec(self, feedback_fn):
385
    """Gather and return data"""
386
    result = {}
387

    
388
    cluster = self.cfg.GetClusterInfo()
389

    
390
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
391
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
392

    
393
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
394
                                                 for node in nodes.values()))
395

    
396
    for instance in self.wanted_instances:
397
      pnode = nodes[instance.primary_node]
398

    
399
      if self.op.static or pnode.offline:
400
        remote_state = None
401
        if pnode.offline:
402
          self.LogWarning("Primary node %s is marked offline, returning static"
403
                          " information only for instance %s" %
404
                          (pnode.name, instance.name))
405
      else:
406
        remote_info = self.rpc.call_instance_info(
407
            instance.primary_node, instance.name, instance.hypervisor,
408
            cluster.hvparams[instance.hypervisor])
409
        remote_info.Raise("Error checking node %s" % pnode.name)
410
        remote_info = remote_info.payload
411
        if remote_info and "state" in remote_info:
412
          remote_state = "up"
413
        else:
414
          if instance.admin_state == constants.ADMINST_UP:
415
            remote_state = "down"
416
          else:
417
            remote_state = instance.admin_state
418

    
419
      group2name_fn = lambda uuid: groups[uuid].name
420
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
421

    
422
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
423
                                 node_uuid2name_fn),
424
                  instance.disks)
425

    
426
      snodes_group_uuids = [nodes[snode_uuid].group
427
                            for snode_uuid in instance.secondary_nodes]
428

    
429
      result[instance.name] = {
430
        "name": instance.name,
431
        "config_state": instance.admin_state,
432
        "run_state": remote_state,
433
        "pnode": pnode.name,
434
        "pnode_group_uuid": pnode.group,
435
        "pnode_group_name": group2name_fn(pnode.group),
436
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
437
        "snodes_group_uuids": snodes_group_uuids,
438
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
439
        "os": instance.os,
440
        # this happens to be the same format used for hooks
441
        "nics": NICListToTuple(self, instance.nics),
442
        "disk_template": instance.disk_template,
443
        "disks": disks,
444
        "hypervisor": instance.hypervisor,
445
        "network_port": instance.network_port,
446
        "hv_instance": instance.hvparams,
447
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
448
        "be_instance": instance.beparams,
449
        "be_actual": cluster.FillBE(instance),
450
        "os_instance": instance.osparams,
451
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
452
        "serial_no": instance.serial_no,
453
        "mtime": instance.mtime,
454
        "ctime": instance.ctime,
455
        "uuid": instance.uuid,
456
        }
457

    
458
    return result