Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 1c3231aa

History | View | Annotate | Download (15.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25
import logging
26
import operator
27

    
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35
  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import NICListToTuple
38

    
39
import ganeti.masterd.instance
40

    
41

    
42
class InstanceQuery(QueryBase):
43
  FIELDS = query.INSTANCE_FIELDS
44

    
45
  def ExpandNames(self, lu):
46
    lu.needed_locks = {}
47
    lu.share_locks = ShareAll()
48

    
49
    if self.names:
50
      self.wanted = GetWantedInstances(lu, self.names)
51
    else:
52
      self.wanted = locking.ALL_SET
53

    
54
    self.do_locking = (self.use_locking and
55
                       query.IQ_LIVE in self.requested_data)
56
    if self.do_locking:
57
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59
      lu.needed_locks[locking.LEVEL_NODE] = []
60
      lu.needed_locks[locking.LEVEL_NETWORK] = []
61
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62

    
63
    self.do_grouplocks = (self.do_locking and
64
                          query.IQ_NODES in self.requested_data)
65

    
66
  def DeclareLocks(self, lu, level):
67
    if self.do_locking:
68
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70

    
71
        # Lock all groups used by instances optimistically; this requires going
72
        # via the node before it's locked, requiring verification later on
73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74
          set(group_uuid
75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77
      elif level == locking.LEVEL_NODE:
78
        lu._LockInstancesNodes() # pylint: disable=W0212
79

    
80
      elif level == locking.LEVEL_NETWORK:
81
        lu.needed_locks[locking.LEVEL_NETWORK] = \
82
          frozenset(net_uuid
83
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
85

    
86
  @staticmethod
87
  def _CheckGroupLocks(lu):
88
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
90

    
91
    # Check if node groups for locked instances are still correct
92
    for instance_name in owned_instances:
93
      CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
94

    
95
  def _GetQueryData(self, lu):
96
    """Computes the list of instances and their attributes.
97

98
    """
99
    if self.do_grouplocks:
100
      self._CheckGroupLocks(lu)
101

    
102
    cluster = lu.cfg.GetClusterInfo()
103
    all_info = lu.cfg.GetAllInstancesInfo()
104

    
105
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106

    
107
    instance_list = [all_info[node] for node in instance_names]
108
    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
109
                                             for inst in instance_list)))
110
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
111
    bad_node_uuids = []
112
    offline_node_uuids = []
113
    wrongnode_inst = set()
114

    
115
    # Gather data as requested
116
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117
      live_data = {}
118
      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
119
                                                 cluster.hvparams)
120
      for node_uuid in node_uuids:
121
        result = node_data[node_uuid]
122
        if result.offline:
123
          # offline nodes will be in both lists
124
          assert result.fail_msg
125
          offline_node_uuids.append(node_uuid)
126
        if result.fail_msg:
127
          bad_node_uuids.append(node_uuid)
128
        elif result.payload:
129
          for inst in result.payload:
130
            if inst in all_info:
131
              if all_info[inst].primary_node == node_uuid:
132
                live_data.update(result.payload)
133
              else:
134
                wrongnode_inst.add(inst)
135
            else:
136
              # orphan instance; we don't list it here as we don't
137
              # handle this case yet in the output of instance listing
138
              logging.warning("Orphan instance '%s' found on node %s",
139
                              inst, lu.cfg.GetNodeName(node_uuid))
140
              # else no instance is alive
141
    else:
142
      live_data = {}
143

    
144
    if query.IQ_DISKUSAGE in self.requested_data:
145
      gmi = ganeti.masterd.instance
146
      disk_usage = dict((inst.name,
147
                         gmi.ComputeDiskSize(inst.disk_template,
148
                                             [{constants.IDISK_SIZE: disk.size}
149
                                              for disk in inst.disks]))
150
                        for inst in instance_list)
151
    else:
152
      disk_usage = None
153

    
154
    if query.IQ_CONSOLE in self.requested_data:
155
      consinfo = {}
156
      for inst in instance_list:
157
        if inst.name in live_data:
158
          # Instance is running
159
          consinfo[inst.name] = \
160
            GetInstanceConsole(cluster, inst,
161
                               lu.cfg.GetNodeInfo(inst.primary_node))
162
        else:
163
          consinfo[inst.name] = None
164
      assert set(consinfo.keys()) == set(instance_names)
165
    else:
166
      consinfo = None
167

    
168
    if query.IQ_NODES in self.requested_data:
169
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
170
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
171
                    for uuid in set(map(operator.attrgetter("group"),
172
                                        nodes.values())))
173
    else:
174
      nodes = None
175
      groups = None
176

    
177
    if query.IQ_NETWORKS in self.requested_data:
178
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
179
                                    for i in instance_list))
180
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
181
    else:
182
      networks = None
183

    
184
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
185
                                   disk_usage, offline_node_uuids,
186
                                   bad_node_uuids, live_data, wrongnode_inst,
187
                                   consinfo, nodes, groups, networks)
188

    
189

    
190
class LUInstanceQuery(NoHooksLU):
191
  """Logical unit for querying instances.
192

193
  """
194
  # pylint: disable=W0142
195
  REQ_BGL = False
196

    
197
  def CheckArguments(self):
198
    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
199
                             self.op.output_fields, self.op.use_locking)
200

    
201
  def ExpandNames(self):
202
    self.iq.ExpandNames(self)
203

    
204
  def DeclareLocks(self, level):
205
    self.iq.DeclareLocks(self, level)
206

    
207
  def Exec(self, feedback_fn):
208
    return self.iq.OldStyleQuery(self)
209

    
210

    
211
class LUInstanceQueryData(NoHooksLU):
212
  """Query runtime instance data.
213

214
  """
215
  REQ_BGL = False
216

    
217
  def ExpandNames(self):
218
    self.needed_locks = {}
219

    
220
    # Use locking if requested or when non-static information is wanted
221
    if not (self.op.static or self.op.use_locking):
222
      self.LogWarning("Non-static data requested, locks need to be acquired")
223
      self.op.use_locking = True
224

    
225
    if self.op.instances or not self.op.use_locking:
226
      # Expand instance names right here
227
      self.wanted_names = GetWantedInstances(self, self.op.instances)
228
    else:
229
      # Will use acquired locks
230
      self.wanted_names = None
231

    
232
    if self.op.use_locking:
233
      self.share_locks = ShareAll()
234

    
235
      if self.wanted_names is None:
236
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
237
      else:
238
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
239

    
240
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
241
      self.needed_locks[locking.LEVEL_NODE] = []
242
      self.needed_locks[locking.LEVEL_NETWORK] = []
243
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
244

    
245
  def DeclareLocks(self, level):
246
    if self.op.use_locking:
247
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
248
      if level == locking.LEVEL_NODEGROUP:
249

    
250
        # Lock all groups used by instances optimistically; this requires going
251
        # via the node before it's locked, requiring verification later on
252
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
253
          frozenset(group_uuid
254
                    for instance_name in owned_instances
255
                    for group_uuid in
256
                    self.cfg.GetInstanceNodeGroups(instance_name))
257

    
258
      elif level == locking.LEVEL_NODE:
259
        self._LockInstancesNodes()
260

    
261
      elif level == locking.LEVEL_NETWORK:
262
        self.needed_locks[locking.LEVEL_NETWORK] = \
263
          frozenset(net_uuid
264
                    for instance_name in owned_instances
265
                    for net_uuid in
266
                    self.cfg.GetInstanceNetworks(instance_name))
267

    
268
  def CheckPrereq(self):
269
    """Check prerequisites.
270

271
    This only checks the optional instance list against the existing names.
272

273
    """
274
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
275
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
276
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
277
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
278

    
279
    if self.wanted_names is None:
280
      assert self.op.use_locking, "Locking was not used"
281
      self.wanted_names = owned_instances
282

    
283
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
284

    
285
    if self.op.use_locking:
286
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
287
                               owned_node_uuids, None)
288
    else:
289
      assert not (owned_instances or owned_groups or
290
                  owned_node_uuids or owned_networks)
291

    
292
    self.wanted_instances = instances.values()
293

    
294
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
295
    """Returns the status of a block device
296

297
    """
298
    if self.op.static or not node_uuid:
299
      return None
300

    
301
    self.cfg.SetDiskID(dev, node_uuid)
302

    
303
    result = self.rpc.call_blockdev_find(node_uuid, dev)
304
    if result.offline:
305
      return None
306

    
307
    result.Raise("Can't compute disk status for %s" % instance.name)
308

    
309
    status = result.payload
310
    if status is None:
311
      return None
312

    
313
    return (status.dev_path, status.major, status.minor,
314
            status.sync_percent, status.estimated_time,
315
            status.is_degraded, status.ldisk_status)
316

    
317
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
318
    """Compute block device status.
319

320
    """
321
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
322

    
323
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
324
                                        anno_dev)
325

    
326
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
327
                              dev):
328
    """Compute block device status.
329

330
    @attention: The device has to be annotated already.
331

332
    """
333
    drbd_info = None
334
    if dev.dev_type in constants.LDS_DRBD:
335
      # we change the snode then (otherwise we use the one passed in)
336
      if dev.logical_id[0] == instance.primary_node:
337
        snode_uuid = dev.logical_id[1]
338
      else:
339
        snode_uuid = dev.logical_id[0]
340
      drbd_info = {
341
        "primary_node": node_uuid2name_fn(instance.primary_node),
342
        "primary_minor": dev.logical_id[3],
343
        "secondary_node": node_uuid2name_fn(snode_uuid),
344
        "secondary_minor": dev.logical_id[4],
345
        "port": dev.logical_id[2],
346
        "secret": dev.logical_id[5],
347
      }
348

    
349
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
350
                                              instance, dev)
351
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
352

    
353
    if dev.children:
354
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
355
                                        instance, snode_uuid,
356
                                        node_uuid2name_fn),
357
                         dev.children)
358
    else:
359
      dev_children = []
360

    
361
    return {
362
      "iv_name": dev.iv_name,
363
      "dev_type": dev.dev_type,
364
      "logical_id": dev.logical_id,
365
      "drbd_info": drbd_info,
366
      "physical_id": dev.physical_id,
367
      "pstatus": dev_pstatus,
368
      "sstatus": dev_sstatus,
369
      "children": dev_children,
370
      "mode": dev.mode,
371
      "size": dev.size,
372
      "spindles": dev.spindles,
373
      "name": dev.name,
374
      "uuid": dev.uuid,
375
      }
376

    
377
  def Exec(self, feedback_fn):
378
    """Gather and return data"""
379
    result = {}
380

    
381
    cluster = self.cfg.GetClusterInfo()
382

    
383
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
384
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
385

    
386
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
387
                                                 for node in nodes.values()))
388

    
389
    for instance in self.wanted_instances:
390
      pnode = nodes[instance.primary_node]
391

    
392
      if self.op.static or pnode.offline:
393
        remote_state = None
394
        if pnode.offline:
395
          self.LogWarning("Primary node %s is marked offline, returning static"
396
                          " information only for instance %s" %
397
                          (pnode.name, instance.name))
398
      else:
399
        remote_info = self.rpc.call_instance_info(
400
            instance.primary_node, instance.name, instance.hypervisor,
401
            cluster.hvparams[instance.hypervisor])
402
        remote_info.Raise("Error checking node %s" % pnode.name)
403
        remote_info = remote_info.payload
404
        if remote_info and "state" in remote_info:
405
          remote_state = "up"
406
        else:
407
          if instance.admin_state == constants.ADMINST_UP:
408
            remote_state = "down"
409
          else:
410
            remote_state = instance.admin_state
411

    
412
      group2name_fn = lambda uuid: groups[uuid].name
413
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
414

    
415
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
416
                                 node_uuid2name_fn),
417
                  instance.disks)
418

    
419
      snodes_group_uuids = [nodes[snode_uuid].group
420
                            for snode_uuid in instance.secondary_nodes]
421

    
422
      result[instance.name] = {
423
        "name": instance.name,
424
        "config_state": instance.admin_state,
425
        "run_state": remote_state,
426
        "pnode": pnode.name,
427
        "pnode_group_uuid": pnode.group,
428
        "pnode_group_name": group2name_fn(pnode.group),
429
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
430
        "snodes_group_uuids": snodes_group_uuids,
431
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
432
        "os": instance.os,
433
        # this happens to be the same format used for hooks
434
        "nics": NICListToTuple(self, instance.nics),
435
        "disk_template": instance.disk_template,
436
        "disks": disks,
437
        "hypervisor": instance.hypervisor,
438
        "network_port": instance.network_port,
439
        "hv_instance": instance.hvparams,
440
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
441
        "be_instance": instance.beparams,
442
        "be_actual": cluster.FillBE(instance),
443
        "os_instance": instance.osparams,
444
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
445
        "serial_no": instance.serial_no,
446
        "mtime": instance.mtime,
447
        "ctime": instance.ctime,
448
        "uuid": instance.uuid,
449
        }
450

    
451
    return result