Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 4e7f986e

History | View | Annotate | Download (9.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25

    
26
from ganeti import compat
27
from ganeti import constants
28
from ganeti import locking
29
from ganeti.cmdlib.base import NoHooksLU
30
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
31
  CheckInstancesNodeGroups, AnnotateDiskParams
32
from ganeti.cmdlib.instance_utils import NICListToTuple
33
from ganeti.hypervisor import hv_base
34

    
35

    
36
class LUInstanceQueryData(NoHooksLU):
37
  """Query runtime instance data.
38

39
  """
40
  REQ_BGL = False
41

    
42
  def ExpandNames(self):
43
    self.needed_locks = {}
44

    
45
    # Use locking if requested or when non-static information is wanted
46
    if not (self.op.static or self.op.use_locking):
47
      self.LogWarning("Non-static data requested, locks need to be acquired")
48
      self.op.use_locking = True
49

    
50
    if self.op.instances or not self.op.use_locking:
51
      # Expand instance names right here
52
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
53
    else:
54
      # Will use acquired locks
55
      self.wanted_names = None
56

    
57
    if self.op.use_locking:
58
      self.share_locks = ShareAll()
59

    
60
      if self.wanted_names is None:
61
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
62
      else:
63
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
64

    
65
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
66
      self.needed_locks[locking.LEVEL_NODE] = []
67
      self.needed_locks[locking.LEVEL_NETWORK] = []
68
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
69

    
70
  def DeclareLocks(self, level):
71
    if self.op.use_locking:
72
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
73
                               self.owned_locks(locking.LEVEL_INSTANCE)))
74
      if level == locking.LEVEL_NODEGROUP:
75

    
76
        # Lock all groups used by instances optimistically; this requires going
77
        # via the node before it's locked, requiring verification later on
78
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
79
          frozenset(group_uuid
80
                    for instance_uuid in owned_instances.keys()
81
                    for group_uuid in
82
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
83

    
84
      elif level == locking.LEVEL_NODE:
85
        self._LockInstancesNodes()
86

    
87
      elif level == locking.LEVEL_NETWORK:
88
        self.needed_locks[locking.LEVEL_NETWORK] = \
89
          frozenset(net_uuid
90
                    for instance_uuid in owned_instances.keys()
91
                    for net_uuid in
92
                    self.cfg.GetInstanceNetworks(instance_uuid))
93

    
94
  def CheckPrereq(self):
95
    """Check prerequisites.
96

97
    This only checks the optional instance list against the existing names.
98

99
    """
100
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
101
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
102
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
103
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
104

    
105
    if self.wanted_names is None:
106
      assert self.op.use_locking, "Locking was not used"
107
      self.wanted_names = owned_instances
108

    
109
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
110

    
111
    if self.op.use_locking:
112
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
113
                               owned_node_uuids, None)
114
    else:
115
      assert not (owned_instances or owned_groups or
116
                  owned_node_uuids or owned_networks)
117

    
118
    self.wanted_instances = instances.values()
119

    
120
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
121
    """Returns the status of a block device
122

123
    """
124
    if self.op.static or not node_uuid:
125
      return None
126

    
127
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
128
    if result.offline:
129
      return None
130

    
131
    result.Raise("Can't compute disk status for %s" % instance.name)
132

    
133
    status = result.payload
134
    if status is None:
135
      return None
136

    
137
    return (status.dev_path, status.major, status.minor,
138
            status.sync_percent, status.estimated_time,
139
            status.is_degraded, status.ldisk_status)
140

    
141
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
142
    """Compute block device status.
143

144
    """
145
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
146

    
147
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
148
                                        anno_dev)
149

    
150
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
151
                              dev):
152
    """Compute block device status.
153

154
    @attention: The device has to be annotated already.
155

156
    """
157
    drbd_info = None
158
    if dev.dev_type in constants.DTS_DRBD:
159
      # we change the snode then (otherwise we use the one passed in)
160
      if dev.logical_id[0] == instance.primary_node:
161
        snode_uuid = dev.logical_id[1]
162
      else:
163
        snode_uuid = dev.logical_id[0]
164
      drbd_info = {
165
        "primary_node": node_uuid2name_fn(instance.primary_node),
166
        "primary_minor": dev.logical_id[3],
167
        "secondary_node": node_uuid2name_fn(snode_uuid),
168
        "secondary_minor": dev.logical_id[4],
169
        "port": dev.logical_id[2],
170
        "secret": dev.logical_id[5],
171
      }
172

    
173
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
174
                                              instance, dev)
175
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
176

    
177
    if dev.children:
178
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
179
                                        instance, snode_uuid,
180
                                        node_uuid2name_fn),
181
                         dev.children)
182
    else:
183
      dev_children = []
184

    
185
    return {
186
      "iv_name": dev.iv_name,
187
      "dev_type": dev.dev_type,
188
      "logical_id": dev.logical_id,
189
      "drbd_info": drbd_info,
190
      "pstatus": dev_pstatus,
191
      "sstatus": dev_sstatus,
192
      "children": dev_children,
193
      "mode": dev.mode,
194
      "size": dev.size,
195
      "spindles": dev.spindles,
196
      "name": dev.name,
197
      "uuid": dev.uuid,
198
      }
199

    
200
  def Exec(self, feedback_fn):
201
    """Gather and return data"""
202
    result = {}
203

    
204
    cluster = self.cfg.GetClusterInfo()
205

    
206
    node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i)
207
                                   for i in self.wanted_instances))
208
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
209

    
210
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
211
                                                 for node in nodes.values()))
212

    
213
    for instance in self.wanted_instances:
214
      pnode = nodes[instance.primary_node]
215

    
216
      if self.op.static or pnode.offline:
217
        remote_state = None
218
        if pnode.offline:
219
          self.LogWarning("Primary node %s is marked offline, returning static"
220
                          " information only for instance %s" %
221
                          (pnode.name, instance.name))
222
      else:
223
        remote_info = self.rpc.call_instance_info(
224
            instance.primary_node, instance.name, instance.hypervisor,
225
            cluster.hvparams[instance.hypervisor])
226
        remote_info.Raise("Error checking node %s" % pnode.name)
227
        remote_info = remote_info.payload
228
        if remote_info and "state" in remote_info:
229
          if hv_base.HvInstanceState.IsShutdown(remote_info["state"]) \
230
                and (instance.hypervisor != constants.HT_KVM
231
                       or instance.hvparams[constants.HV_KVM_USER_SHUTDOWN]):
232
            remote_state = "user down"
233
          else:
234
            remote_state = "up"
235
        else:
236
          if instance.admin_state == constants.ADMINST_UP:
237
            remote_state = "down"
238
          else:
239
            remote_state = instance.admin_state
240

    
241
      group2name_fn = lambda uuid: groups[uuid].name
242
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
243

    
244
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
245
                                 node_uuid2name_fn),
246
                  instance.disks)
247

    
248
      secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance)
249
      snodes_group_uuids = [nodes[snode_uuid].group
250
                            for snode_uuid in secondary_nodes]
251

    
252
      result[instance.name] = {
253
        "name": instance.name,
254
        "config_state": instance.admin_state,
255
        "run_state": remote_state,
256
        "pnode": pnode.name,
257
        "pnode_group_uuid": pnode.group,
258
        "pnode_group_name": group2name_fn(pnode.group),
259
        "snodes": map(node_uuid2name_fn, secondary_nodes),
260
        "snodes_group_uuids": snodes_group_uuids,
261
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
262
        "os": instance.os,
263
        # this happens to be the same format used for hooks
264
        "nics": NICListToTuple(self, instance.nics),
265
        "disk_template": instance.disk_template,
266
        "disks": disks,
267
        "hypervisor": instance.hypervisor,
268
        "network_port": instance.network_port,
269
        "hv_instance": instance.hvparams,
270
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
271
        "be_instance": instance.beparams,
272
        "be_actual": cluster.FillBE(instance),
273
        "os_instance": instance.osparams,
274
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
275
        "serial_no": instance.serial_no,
276
        "mtime": instance.mtime,
277
        "ctime": instance.ctime,
278
        "uuid": instance.uuid,
279
        }
280

    
281
    return result