Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 178ad717

History | View | Annotate | Download (9.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25

    
26
from ganeti import compat
27
from ganeti import constants
28
from ganeti import locking
29
from ganeti.cmdlib.base import NoHooksLU
30
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
31
  CheckInstancesNodeGroups, AnnotateDiskParams
32
from ganeti.cmdlib.instance_utils import NICListToTuple
33
from ganeti.hypervisor import hv_base
34

    
35

    
36
class LUInstanceQueryData(NoHooksLU):
37
  """Query runtime instance data.
38

39
  """
40
  REQ_BGL = False
41

    
42
  def ExpandNames(self):
43
    self.needed_locks = {}
44

    
45
    # Use locking if requested or when non-static information is wanted
46
    if not (self.op.static or self.op.use_locking):
47
      self.LogWarning("Non-static data requested, locks need to be acquired")
48
      self.op.use_locking = True
49

    
50
    if self.op.instances or not self.op.use_locking:
51
      # Expand instance names right here
52
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
53
    else:
54
      # Will use acquired locks
55
      self.wanted_names = None
56

    
57
    if self.op.use_locking:
58
      self.share_locks = ShareAll()
59

    
60
      if self.wanted_names is None:
61
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
62
      else:
63
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
64

    
65
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
66
      self.needed_locks[locking.LEVEL_NODE] = []
67
      self.needed_locks[locking.LEVEL_NETWORK] = []
68
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
69

    
70
  def DeclareLocks(self, level):
71
    if self.op.use_locking:
72
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
73
                               self.owned_locks(locking.LEVEL_INSTANCE)))
74
      if level == locking.LEVEL_NODEGROUP:
75

    
76
        # Lock all groups used by instances optimistically; this requires going
77
        # via the node before it's locked, requiring verification later on
78
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
79
          frozenset(group_uuid
80
                    for instance_uuid in owned_instances.keys()
81
                    for group_uuid in
82
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
83

    
84
      elif level == locking.LEVEL_NODE:
85
        self._LockInstancesNodes()
86

    
87
      elif level == locking.LEVEL_NETWORK:
88
        self.needed_locks[locking.LEVEL_NETWORK] = \
89
          frozenset(net_uuid
90
                    for instance_uuid in owned_instances.keys()
91
                    for net_uuid in
92
                    self.cfg.GetInstanceNetworks(instance_uuid))
93

    
94
  def CheckPrereq(self):
95
    """Check prerequisites.
96

97
    This only checks the optional instance list against the existing names.
98

99
    """
100
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
101
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
102
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
103
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
104

    
105
    if self.wanted_names is None:
106
      assert self.op.use_locking, "Locking was not used"
107
      self.wanted_names = owned_instances
108

    
109
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
110

    
111
    if self.op.use_locking:
112
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
113
                               owned_node_uuids, None)
114
    else:
115
      assert not (owned_instances or owned_groups or
116
                  owned_node_uuids or owned_networks)
117

    
118
    self.wanted_instances = instances.values()
119

    
120
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
121
    """Returns the status of a block device
122

123
    """
124
    if self.op.static or not node_uuid:
125
      return None
126

    
127
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
128
    if result.offline:
129
      return None
130

    
131
    result.Raise("Can't compute disk status for %s" % instance.name)
132

    
133
    status = result.payload
134
    if status is None:
135
      return None
136

    
137
    return (status.dev_path, status.major, status.minor,
138
            status.sync_percent, status.estimated_time,
139
            status.is_degraded, status.ldisk_status)
140

    
141
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
142
    """Compute block device status.
143

144
    """
145
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
146

    
147
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
148
                                        anno_dev)
149

    
150
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
151
                              dev):
152
    """Compute block device status.
153

154
    @attention: The device has to be annotated already.
155

156
    """
157
    drbd_info = None
158
    if dev.dev_type in constants.DTS_DRBD:
159
      # we change the snode then (otherwise we use the one passed in)
160
      if dev.logical_id[0] == instance.primary_node:
161
        snode_uuid = dev.logical_id[1]
162
      else:
163
        snode_uuid = dev.logical_id[0]
164
      drbd_info = {
165
        "primary_node": node_uuid2name_fn(instance.primary_node),
166
        "primary_minor": dev.logical_id[3],
167
        "secondary_node": node_uuid2name_fn(snode_uuid),
168
        "secondary_minor": dev.logical_id[4],
169
        "port": dev.logical_id[2],
170
        "secret": dev.logical_id[5],
171
      }
172

    
173
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
174
                                              instance, dev)
175
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
176

    
177
    if dev.children:
178
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
179
                                        instance, snode_uuid,
180
                                        node_uuid2name_fn),
181
                         dev.children)
182
    else:
183
      dev_children = []
184

    
185
    return {
186
      "iv_name": dev.iv_name,
187
      "dev_type": dev.dev_type,
188
      "logical_id": dev.logical_id,
189
      "drbd_info": drbd_info,
190
      "pstatus": dev_pstatus,
191
      "sstatus": dev_sstatus,
192
      "children": dev_children,
193
      "mode": dev.mode,
194
      "size": dev.size,
195
      "spindles": dev.spindles,
196
      "name": dev.name,
197
      "uuid": dev.uuid,
198
      }
199

    
200
  def Exec(self, feedback_fn):
201
    """Gather and return data"""
202
    result = {}
203

    
204
    cluster = self.cfg.GetClusterInfo()
205

    
206
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
207
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
208

    
209
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
210
                                                 for node in nodes.values()))
211

    
212
    for instance in self.wanted_instances:
213
      pnode = nodes[instance.primary_node]
214

    
215
      if self.op.static or pnode.offline:
216
        remote_state = None
217
        if pnode.offline:
218
          self.LogWarning("Primary node %s is marked offline, returning static"
219
                          " information only for instance %s" %
220
                          (pnode.name, instance.name))
221
      else:
222
        remote_info = self.rpc.call_instance_info(
223
            instance.primary_node, instance.name, instance.hypervisor,
224
            cluster.hvparams[instance.hypervisor])
225
        remote_info.Raise("Error checking node %s" % pnode.name)
226
        remote_info = remote_info.payload
227
        if remote_info and "state" in remote_info:
228
          if hv_base.HvInstanceState.IsShutdown(remote_info["state"]) \
229
                and (instance.hypervisor != constants.HT_KVM
230
                       or instance.hvparams[constants.HV_KVM_USER_SHUTDOWN]):
231
            remote_state = "user down"
232
          else:
233
            remote_state = "up"
234
        else:
235
          if instance.admin_state == constants.ADMINST_UP:
236
            remote_state = "down"
237
          else:
238
            remote_state = instance.admin_state
239

    
240
      group2name_fn = lambda uuid: groups[uuid].name
241
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
242

    
243
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
244
                                 node_uuid2name_fn),
245
                  instance.disks)
246

    
247
      snodes_group_uuids = [nodes[snode_uuid].group
248
                            for snode_uuid in instance.secondary_nodes]
249

    
250
      result[instance.name] = {
251
        "name": instance.name,
252
        "config_state": instance.admin_state,
253
        "run_state": remote_state,
254
        "pnode": pnode.name,
255
        "pnode_group_uuid": pnode.group,
256
        "pnode_group_name": group2name_fn(pnode.group),
257
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
258
        "snodes_group_uuids": snodes_group_uuids,
259
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
260
        "os": instance.os,
261
        # this happens to be the same format used for hooks
262
        "nics": NICListToTuple(self, instance.nics),
263
        "disk_template": instance.disk_template,
264
        "disks": disks,
265
        "hypervisor": instance.hypervisor,
266
        "network_port": instance.network_port,
267
        "hv_instance": instance.hvparams,
268
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
269
        "be_instance": instance.beparams,
270
        "be_actual": cluster.FillBE(instance),
271
        "os_instance": instance.osparams,
272
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
273
        "serial_no": instance.serial_no,
274
        "mtime": instance.mtime,
275
        "ctime": instance.ctime,
276
        "uuid": instance.uuid,
277
        }
278

    
279
    return result