Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_query.py @ 2a02d6fe

History | View | Annotate | Download (10.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units for querying instances."""
23

    
24
import itertools
25

    
26
from ganeti import compat
27
from ganeti import constants
28
from ganeti import locking
29
from ganeti import query
30
from ganeti.cmdlib.base import QueryBase, NoHooksLU
31
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
32
  CheckInstancesNodeGroups, AnnotateDiskParams
33
from ganeti.cmdlib.instance_utils import NICListToTuple
34
from ganeti.hypervisor import hv_base
35

    
36

    
37

    
38
class InstanceQuery(QueryBase):
39
  FIELDS = query.INSTANCE_FIELDS
40

    
41
  def ExpandNames(self, lu):
42
    raise NotImplementedError
43

    
44
  def DeclareLocks(self, lu, level):
45
    raise NotImplementedError
46

    
47

    
48
class LUInstanceQuery(NoHooksLU):
49
  """Logical unit for querying instances.
50

51
  """
52
  # pylint: disable=W0142
53
  REQ_BGL = False
54

    
55
  def CheckArguments(self):
56
    raise NotImplementedError
57

    
58
  def ExpandNames(self):
59
    raise NotImplementedError
60

    
61
  def DeclareLocks(self, level):
62
    raise NotImplementedError
63

    
64
  def Exec(self, feedback_fn):
65
    raise NotImplementedError
66

    
67

    
68
class LUInstanceQueryData(NoHooksLU):
69
  """Query runtime instance data.
70

71
  """
72
  REQ_BGL = False
73

    
74
  def ExpandNames(self):
75
    self.needed_locks = {}
76

    
77
    # Use locking if requested or when non-static information is wanted
78
    if not (self.op.static or self.op.use_locking):
79
      self.LogWarning("Non-static data requested, locks need to be acquired")
80
      self.op.use_locking = True
81

    
82
    if self.op.instances or not self.op.use_locking:
83
      # Expand instance names right here
84
      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
85
    else:
86
      # Will use acquired locks
87
      self.wanted_names = None
88

    
89
    if self.op.use_locking:
90
      self.share_locks = ShareAll()
91

    
92
      if self.wanted_names is None:
93
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
94
      else:
95
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
96

    
97
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
98
      self.needed_locks[locking.LEVEL_NODE] = []
99
      self.needed_locks[locking.LEVEL_NETWORK] = []
100
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
101

    
102
  def DeclareLocks(self, level):
103
    if self.op.use_locking:
104
      owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
105
                               self.owned_locks(locking.LEVEL_INSTANCE)))
106
      if level == locking.LEVEL_NODEGROUP:
107

    
108
        # Lock all groups used by instances optimistically; this requires going
109
        # via the node before it's locked, requiring verification later on
110
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
111
          frozenset(group_uuid
112
                    for instance_uuid in owned_instances.keys()
113
                    for group_uuid in
114
                    self.cfg.GetInstanceNodeGroups(instance_uuid))
115

    
116
      elif level == locking.LEVEL_NODE:
117
        self._LockInstancesNodes()
118

    
119
      elif level == locking.LEVEL_NETWORK:
120
        self.needed_locks[locking.LEVEL_NETWORK] = \
121
          frozenset(net_uuid
122
                    for instance_uuid in owned_instances.keys()
123
                    for net_uuid in
124
                    self.cfg.GetInstanceNetworks(instance_uuid))
125

    
126
  def CheckPrereq(self):
127
    """Check prerequisites.
128

129
    This only checks the optional instance list against the existing names.
130

131
    """
132
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
133
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
134
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
135
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
136

    
137
    if self.wanted_names is None:
138
      assert self.op.use_locking, "Locking was not used"
139
      self.wanted_names = owned_instances
140

    
141
    instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
142

    
143
    if self.op.use_locking:
144
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
145
                               owned_node_uuids, None)
146
    else:
147
      assert not (owned_instances or owned_groups or
148
                  owned_node_uuids or owned_networks)
149

    
150
    self.wanted_instances = instances.values()
151

    
152
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
153
    """Returns the status of a block device
154

155
    """
156
    if self.op.static or not node_uuid:
157
      return None
158

    
159
    result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
160
    if result.offline:
161
      return None
162

    
163
    result.Raise("Can't compute disk status for %s" % instance.name)
164

    
165
    status = result.payload
166
    if status is None:
167
      return None
168

    
169
    return (status.dev_path, status.major, status.minor,
170
            status.sync_percent, status.estimated_time,
171
            status.is_degraded, status.ldisk_status)
172

    
173
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
174
    """Compute block device status.
175

176
    """
177
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
178

    
179
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
180
                                        anno_dev)
181

    
182
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
183
                              dev):
184
    """Compute block device status.
185

186
    @attention: The device has to be annotated already.
187

188
    """
189
    drbd_info = None
190
    if dev.dev_type in constants.DTS_DRBD:
191
      # we change the snode then (otherwise we use the one passed in)
192
      if dev.logical_id[0] == instance.primary_node:
193
        snode_uuid = dev.logical_id[1]
194
      else:
195
        snode_uuid = dev.logical_id[0]
196
      drbd_info = {
197
        "primary_node": node_uuid2name_fn(instance.primary_node),
198
        "primary_minor": dev.logical_id[3],
199
        "secondary_node": node_uuid2name_fn(snode_uuid),
200
        "secondary_minor": dev.logical_id[4],
201
        "port": dev.logical_id[2],
202
        "secret": dev.logical_id[5],
203
      }
204

    
205
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
206
                                              instance, dev)
207
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
208

    
209
    if dev.children:
210
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
211
                                        instance, snode_uuid,
212
                                        node_uuid2name_fn),
213
                         dev.children)
214
    else:
215
      dev_children = []
216

    
217
    return {
218
      "iv_name": dev.iv_name,
219
      "dev_type": dev.dev_type,
220
      "logical_id": dev.logical_id,
221
      "drbd_info": drbd_info,
222
      "pstatus": dev_pstatus,
223
      "sstatus": dev_sstatus,
224
      "children": dev_children,
225
      "mode": dev.mode,
226
      "size": dev.size,
227
      "spindles": dev.spindles,
228
      "name": dev.name,
229
      "uuid": dev.uuid,
230
      }
231

    
232
  def Exec(self, feedback_fn):
233
    """Gather and return data"""
234
    result = {}
235

    
236
    cluster = self.cfg.GetClusterInfo()
237

    
238
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
239
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
240

    
241
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
242
                                                 for node in nodes.values()))
243

    
244
    for instance in self.wanted_instances:
245
      pnode = nodes[instance.primary_node]
246

    
247
      if self.op.static or pnode.offline:
248
        remote_state = None
249
        if pnode.offline:
250
          self.LogWarning("Primary node %s is marked offline, returning static"
251
                          " information only for instance %s" %
252
                          (pnode.name, instance.name))
253
      else:
254
        remote_info = self.rpc.call_instance_info(
255
            instance.primary_node, instance.name, instance.hypervisor,
256
            cluster.hvparams[instance.hypervisor])
257
        remote_info.Raise("Error checking node %s" % pnode.name)
258
        remote_info = remote_info.payload
259
        if remote_info and "state" in remote_info:
260
          if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
261
            remote_state = "user down"
262
          else:
263
            remote_state = "up"
264
        else:
265
          if instance.admin_state == constants.ADMINST_UP:
266
            remote_state = "down"
267
          else:
268
            remote_state = instance.admin_state
269

    
270
      group2name_fn = lambda uuid: groups[uuid].name
271
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
272

    
273
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
274
                                 node_uuid2name_fn),
275
                  instance.disks)
276

    
277
      snodes_group_uuids = [nodes[snode_uuid].group
278
                            for snode_uuid in instance.secondary_nodes]
279

    
280
      result[instance.name] = {
281
        "name": instance.name,
282
        "config_state": instance.admin_state,
283
        "run_state": remote_state,
284
        "pnode": pnode.name,
285
        "pnode_group_uuid": pnode.group,
286
        "pnode_group_name": group2name_fn(pnode.group),
287
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
288
        "snodes_group_uuids": snodes_group_uuids,
289
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
290
        "os": instance.os,
291
        # this happens to be the same format used for hooks
292
        "nics": NICListToTuple(self, instance.nics),
293
        "disk_template": instance.disk_template,
294
        "disks": disks,
295
        "hypervisor": instance.hypervisor,
296
        "network_port": instance.network_port,
297
        "hv_instance": instance.hvparams,
298
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
299
        "be_instance": instance.beparams,
300
        "be_actual": cluster.FillBE(instance),
301
        "os_instance": instance.osparams,
302
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
303
        "serial_no": instance.serial_no,
304
        "mtime": instance.mtime,
305
        "ctime": instance.ctime,
306
        "uuid": instance.uuid,
307
        }
308

    
309
    return result