Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 0bbec3af

History | View | Annotate | Download (15 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25
import logging
26
import operator
27

    
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35
  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import NICListToTuple
38

    
39
import ganeti.masterd.instance
40

    
41

    
42
class InstanceQuery(QueryBase):
43
  FIELDS = query.INSTANCE_FIELDS
44

    
45
  def ExpandNames(self, lu):
46
    lu.needed_locks = {}
47
    lu.share_locks = ShareAll()
48

    
49
    if self.names:
50
      self.wanted = GetWantedInstances(lu, self.names)
51
    else:
52
      self.wanted = locking.ALL_SET
53

    
54
    self.do_locking = (self.use_locking and
55
                       query.IQ_LIVE in self.requested_data)
56
    if self.do_locking:
57
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59
      lu.needed_locks[locking.LEVEL_NODE] = []
60
      lu.needed_locks[locking.LEVEL_NETWORK] = []
61
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62

    
63
    self.do_grouplocks = (self.do_locking and
64
                          query.IQ_NODES in self.requested_data)
65

    
66
  def DeclareLocks(self, lu, level):
67
    if self.do_locking:
68
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70

    
71
        # Lock all groups used by instances optimistically; this requires going
72
        # via the node before it's locked, requiring verification later on
73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74
          set(group_uuid
75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77
      elif level == locking.LEVEL_NODE:
78
        lu._LockInstancesNodes() # pylint: disable=W0212
79

    
80
      elif level == locking.LEVEL_NETWORK:
81
        lu.needed_locks[locking.LEVEL_NETWORK] = \
82
          frozenset(net_uuid
83
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
85

    
86
  @staticmethod
87
  def _CheckGroupLocks(lu):
88
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
90

    
91
    # Check if node groups for locked instances are still correct
92
    for instance_name in owned_instances:
93
      CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
94

    
95
  def _GetQueryData(self, lu):
96
    """Computes the list of instances and their attributes.
97

98
    """
99
    if self.do_grouplocks:
100
      self._CheckGroupLocks(lu)
101

    
102
    cluster = lu.cfg.GetClusterInfo()
103
    all_info = lu.cfg.GetAllInstancesInfo()
104

    
105
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106

    
107
    instance_list = [all_info[name] for name in instance_names]
108
    nodes = frozenset(itertools.chain(*(inst.all_nodes
109
                                        for inst in instance_list)))
110
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
111
    bad_nodes = []
112
    offline_nodes = []
113
    wrongnode_inst = set()
114

    
115
    # Gather data as requested
116
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117
      live_data = {}
118
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
119
      for name in nodes:
120
        result = node_data[name]
121
        if result.offline:
122
          # offline nodes will be in both lists
123
          assert result.fail_msg
124
          offline_nodes.append(name)
125
        if result.fail_msg:
126
          bad_nodes.append(name)
127
        elif result.payload:
128
          for inst in result.payload:
129
            if inst in all_info:
130
              if all_info[inst].primary_node == name:
131
                live_data.update(result.payload)
132
              else:
133
                wrongnode_inst.add(inst)
134
            else:
135
              # orphan instance; we don't list it here as we don't
136
              # handle this case yet in the output of instance listing
137
              logging.warning("Orphan instance '%s' found on node %s",
138
                              inst, name)
139
              # else no instance is alive
140
    else:
141
      live_data = {}
142

    
143
    if query.IQ_DISKUSAGE in self.requested_data:
144
      gmi = ganeti.masterd.instance
145
      disk_usage = dict((inst.name,
146
                         gmi.ComputeDiskSize(inst.disk_template,
147
                                             [{constants.IDISK_SIZE: disk.size}
148
                                              for disk in inst.disks]))
149
                        for inst in instance_list)
150
    else:
151
      disk_usage = None
152

    
153
    if query.IQ_CONSOLE in self.requested_data:
154
      consinfo = {}
155
      for inst in instance_list:
156
        if inst.name in live_data:
157
          # Instance is running
158
          consinfo[inst.name] = GetInstanceConsole(cluster, inst)
159
        else:
160
          consinfo[inst.name] = None
161
      assert set(consinfo.keys()) == set(instance_names)
162
    else:
163
      consinfo = None
164

    
165
    if query.IQ_NODES in self.requested_data:
166
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
167
                                            instance_list)))
168
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
169
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
170
                    for uuid in set(map(operator.attrgetter("group"),
171
                                        nodes.values())))
172
    else:
173
      nodes = None
174
      groups = None
175

    
176
    if query.IQ_NETWORKS in self.requested_data:
177
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
178
                                    for i in instance_list))
179
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
180
    else:
181
      networks = None
182

    
183
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
184
                                   disk_usage, offline_nodes, bad_nodes,
185
                                   live_data, wrongnode_inst, consinfo,
186
                                   nodes, groups, networks)
187

    
188

    
189
class LUInstanceQuery(NoHooksLU):
190
  """Logical unit for querying instances.
191

192
  """
193
  # pylint: disable=W0142
194
  REQ_BGL = False
195

    
196
  def CheckArguments(self):
197
    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
198
                             self.op.output_fields, self.op.use_locking)
199

    
200
  def ExpandNames(self):
201
    self.iq.ExpandNames(self)
202

    
203
  def DeclareLocks(self, level):
204
    self.iq.DeclareLocks(self, level)
205

    
206
  def Exec(self, feedback_fn):
207
    return self.iq.OldStyleQuery(self)
208

    
209

    
210
class LUInstanceQueryData(NoHooksLU):
211
  """Query runtime instance data.
212

213
  """
214
  REQ_BGL = False
215

    
216
  def ExpandNames(self):
217
    self.needed_locks = {}
218

    
219
    # Use locking if requested or when non-static information is wanted
220
    if not (self.op.static or self.op.use_locking):
221
      self.LogWarning("Non-static data requested, locks need to be acquired")
222
      self.op.use_locking = True
223

    
224
    if self.op.instances or not self.op.use_locking:
225
      # Expand instance names right here
226
      self.wanted_names = GetWantedInstances(self, self.op.instances)
227
    else:
228
      # Will use acquired locks
229
      self.wanted_names = None
230

    
231
    if self.op.use_locking:
232
      self.share_locks = ShareAll()
233

    
234
      if self.wanted_names is None:
235
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
236
      else:
237
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
238

    
239
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
240
      self.needed_locks[locking.LEVEL_NODE] = []
241
      self.needed_locks[locking.LEVEL_NETWORK] = []
242
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
243

    
244
  def DeclareLocks(self, level):
245
    if self.op.use_locking:
246
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
247
      if level == locking.LEVEL_NODEGROUP:
248

    
249
        # Lock all groups used by instances optimistically; this requires going
250
        # via the node before it's locked, requiring verification later on
251
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
252
          frozenset(group_uuid
253
                    for instance_name in owned_instances
254
                    for group_uuid in
255
                    self.cfg.GetInstanceNodeGroups(instance_name))
256

    
257
      elif level == locking.LEVEL_NODE:
258
        self._LockInstancesNodes()
259

    
260
      elif level == locking.LEVEL_NETWORK:
261
        self.needed_locks[locking.LEVEL_NETWORK] = \
262
          frozenset(net_uuid
263
                    for instance_name in owned_instances
264
                    for net_uuid in
265
                    self.cfg.GetInstanceNetworks(instance_name))
266

    
267
  def CheckPrereq(self):
268
    """Check prerequisites.
269

270
    This only checks the optional instance list against the existing names.
271

272
    """
273
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
274
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
275
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
276
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
277

    
278
    if self.wanted_names is None:
279
      assert self.op.use_locking, "Locking was not used"
280
      self.wanted_names = owned_instances
281

    
282
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
283

    
284
    if self.op.use_locking:
285
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
286
                               None)
287
    else:
288
      assert not (owned_instances or owned_groups or
289
                  owned_nodes or owned_networks)
290

    
291
    self.wanted_instances = instances.values()
292

    
293
  def _ComputeBlockdevStatus(self, node, instance, dev):
294
    """Returns the status of a block device
295

296
    """
297
    if self.op.static or not node:
298
      return None
299

    
300
    self.cfg.SetDiskID(dev, node)
301

    
302
    result = self.rpc.call_blockdev_find(node, dev)
303
    if result.offline:
304
      return None
305

    
306
    result.Raise("Can't compute disk status for %s" % instance.name)
307

    
308
    status = result.payload
309
    if status is None:
310
      return None
311

    
312
    return (status.dev_path, status.major, status.minor,
313
            status.sync_percent, status.estimated_time,
314
            status.is_degraded, status.ldisk_status)
315

    
316
  def _ComputeDiskStatus(self, instance, snode, dev):
317
    """Compute block device status.
318

319
    """
320
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
321

    
322
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
323

    
324
  def _ComputeDiskStatusInner(self, instance, snode, dev):
325
    """Compute block device status.
326

327
    @attention: The device has to be annotated already.
328

329
    """
330
    if dev.dev_type in constants.LDS_DRBD:
331
      # we change the snode then (otherwise we use the one passed in)
332
      if dev.logical_id[0] == instance.primary_node:
333
        snode = dev.logical_id[1]
334
      else:
335
        snode = dev.logical_id[0]
336

    
337
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
338
                                              instance, dev)
339
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
340

    
341
    if dev.children:
342
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
343
                                        instance, snode),
344
                         dev.children)
345
    else:
346
      dev_children = []
347

    
348
    return {
349
      "iv_name": dev.iv_name,
350
      "dev_type": dev.dev_type,
351
      "logical_id": dev.logical_id,
352
      "physical_id": dev.physical_id,
353
      "pstatus": dev_pstatus,
354
      "sstatus": dev_sstatus,
355
      "children": dev_children,
356
      "mode": dev.mode,
357
      "size": dev.size,
358
      "spindles": dev.spindles,
359
      "name": dev.name,
360
      "uuid": dev.uuid,
361
      }
362

    
363
  def Exec(self, feedback_fn):
364
    """Gather and return data"""
365
    result = {}
366

    
367
    cluster = self.cfg.GetClusterInfo()
368

    
369
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
370
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
371

    
372
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
373
                                                 for node in nodes.values()))
374

    
375
    group2name_fn = lambda uuid: groups[uuid].name
376
    for instance in self.wanted_instances:
377
      pnode = nodes[instance.primary_node]
378

    
379
      if self.op.static or pnode.offline:
380
        remote_state = None
381
        if pnode.offline:
382
          self.LogWarning("Primary node %s is marked offline, returning static"
383
                          " information only for instance %s" %
384
                          (pnode.name, instance.name))
385
      else:
386
        remote_info = self.rpc.call_instance_info(
387
            instance.primary_node, instance.name, instance.hypervisor,
388
            cluster.hvparams[instance.hypervisor])
389
        remote_info.Raise("Error checking node %s" % instance.primary_node)
390
        remote_info = remote_info.payload
391
        if remote_info and "state" in remote_info:
392
          remote_state = "up"
393
        else:
394
          if instance.admin_state == constants.ADMINST_UP:
395
            remote_state = "down"
396
          else:
397
            remote_state = instance.admin_state
398

    
399
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
400
                  instance.disks)
401

    
402
      snodes_group_uuids = [nodes[snode_name].group
403
                            for snode_name in instance.secondary_nodes]
404

    
405
      result[instance.name] = {
406
        "name": instance.name,
407
        "config_state": instance.admin_state,
408
        "run_state": remote_state,
409
        "pnode": instance.primary_node,
410
        "pnode_group_uuid": pnode.group,
411
        "pnode_group_name": group2name_fn(pnode.group),
412
        "snodes": instance.secondary_nodes,
413
        "snodes_group_uuids": snodes_group_uuids,
414
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
415
        "os": instance.os,
416
        # this happens to be the same format used for hooks
417
        "nics": NICListToTuple(self, instance.nics),
418
        "disk_template": instance.disk_template,
419
        "disks": disks,
420
        "hypervisor": instance.hypervisor,
421
        "network_port": instance.network_port,
422
        "hv_instance": instance.hvparams,
423
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
424
        "be_instance": instance.beparams,
425
        "be_actual": cluster.FillBE(instance),
426
        "os_instance": instance.osparams,
427
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
428
        "serial_no": instance.serial_no,
429
        "mtime": instance.mtime,
430
        "ctime": instance.ctime,
431
        "uuid": instance.uuid,
432
        }
433

    
434
    return result