Revision 8aa8f6b1

b/Makefile.am
318 318
	lib/cmdlib/instance_storage.py \
319 319
	lib/cmdlib/instance_migration.py \
320 320
	lib/cmdlib/instance_operation.py \
321
	lib/cmdlib/instance_query.py \
321 322
	lib/cmdlib/instance_utils.py \
322 323
	lib/cmdlib/backup.py \
323 324
	lib/cmdlib/query.py \
b/lib/cmdlib/__init__.py
72 72
  LUInstanceRename, \
73 73
  LUInstanceRemove, \
74 74
  LUInstanceMove, \
75
  LUInstanceQuery, \
76
  LUInstanceQueryData, \
77 75
  LUInstanceMultiAlloc, \
78 76
  LUInstanceSetParams, \
79 77
  LUInstanceChangeGroup
......
92 90
  LUInstanceReinstall, \
93 91
  LUInstanceReboot, \
94 92
  LUInstanceConsole
93
from ganeti.cmdlib.instance_query import \
94
  LUInstanceQuery, \
95
  LUInstanceQueryData
95 96
from ganeti.cmdlib.backup import \
96 97
  LUBackupQuery, \
97 98
  LUBackupPrepare, \
b/lib/cmdlib/instance.py
23 23

  
24 24
import OpenSSL
25 25
import copy
26
import itertools
27 26
import logging
28
import operator
29 27
import os
30 28

  
31 29
from ganeti import compat
......
40 38
from ganeti import objects
41 39
from ganeti import opcodes
42 40
from ganeti import pathutils
43
from ganeti import qlang
44 41
from ganeti import rpc
45 42
from ganeti import utils
46
from ganeti import query
47 43

  
48
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
49
  ResultWithJobs
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
50 45

  
51 46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
52 47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
53 48
  _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
54 49
  _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
55 50
  _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
56
  _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
57
  _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
58
  _CheckInstanceState, _ExpandNodeName
51
  _AnnotateDiskParams, _GetUpdatedParams, _ExpandInstanceName, \
52
  _ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
59 53
from ganeti.cmdlib.instance_storage import _CreateDisks, \
60 54
  _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
61 55
  _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
62 56
  _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
63 57
  _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
64 58
  _AssembleInstanceDisks
65
from ganeti.cmdlib.instance_operation import _GetInstanceConsole
66 59
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
67 60
  _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
68 61
  _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
......
1809 1802
                                 (instance.name, target_node, msg))
1810 1803

  
1811 1804

  
1812
class _InstanceQuery(_QueryBase):
1813
  FIELDS = query.INSTANCE_FIELDS
1814

  
1815
  def ExpandNames(self, lu):
1816
    lu.needed_locks = {}
1817
    lu.share_locks = _ShareAll()
1818

  
1819
    if self.names:
1820
      self.wanted = _GetWantedInstances(lu, self.names)
1821
    else:
1822
      self.wanted = locking.ALL_SET
1823

  
1824
    self.do_locking = (self.use_locking and
1825
                       query.IQ_LIVE in self.requested_data)
1826
    if self.do_locking:
1827
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
1828
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
1829
      lu.needed_locks[locking.LEVEL_NODE] = []
1830
      lu.needed_locks[locking.LEVEL_NETWORK] = []
1831
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1832

  
1833
    self.do_grouplocks = (self.do_locking and
1834
                          query.IQ_NODES in self.requested_data)
1835

  
1836
  def DeclareLocks(self, lu, level):
1837
    if self.do_locking:
1838
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
1839
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
1840

  
1841
        # Lock all groups used by instances optimistically; this requires going
1842
        # via the node before it's locked, requiring verification later on
1843
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
1844
          set(group_uuid
1845
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1846
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
1847
      elif level == locking.LEVEL_NODE:
1848
        lu._LockInstancesNodes() # pylint: disable=W0212
1849

  
1850
      elif level == locking.LEVEL_NETWORK:
1851
        lu.needed_locks[locking.LEVEL_NETWORK] = \
1852
          frozenset(net_uuid
1853
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1854
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
1855

  
1856
  @staticmethod
1857
  def _CheckGroupLocks(lu):
1858
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
1859
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
1860

  
1861
    # Check if node groups for locked instances are still correct
1862
    for instance_name in owned_instances:
1863
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
1864

  
1865
  def _GetQueryData(self, lu):
1866
    """Computes the list of instances and their attributes.
1867

  
1868
    """
1869
    if self.do_grouplocks:
1870
      self._CheckGroupLocks(lu)
1871

  
1872
    cluster = lu.cfg.GetClusterInfo()
1873
    all_info = lu.cfg.GetAllInstancesInfo()
1874

  
1875
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
1876

  
1877
    instance_list = [all_info[name] for name in instance_names]
1878
    nodes = frozenset(itertools.chain(*(inst.all_nodes
1879
                                        for inst in instance_list)))
1880
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
1881
    bad_nodes = []
1882
    offline_nodes = []
1883
    wrongnode_inst = set()
1884

  
1885
    # Gather data as requested
1886
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
1887
      live_data = {}
1888
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
1889
      for name in nodes:
1890
        result = node_data[name]
1891
        if result.offline:
1892
          # offline nodes will be in both lists
1893
          assert result.fail_msg
1894
          offline_nodes.append(name)
1895
        if result.fail_msg:
1896
          bad_nodes.append(name)
1897
        elif result.payload:
1898
          for inst in result.payload:
1899
            if inst in all_info:
1900
              if all_info[inst].primary_node == name:
1901
                live_data.update(result.payload)
1902
              else:
1903
                wrongnode_inst.add(inst)
1904
            else:
1905
              # orphan instance; we don't list it here as we don't
1906
              # handle this case yet in the output of instance listing
1907
              logging.warning("Orphan instance '%s' found on node %s",
1908
                              inst, name)
1909
              # else no instance is alive
1910
    else:
1911
      live_data = {}
1912

  
1913
    if query.IQ_DISKUSAGE in self.requested_data:
1914
      gmi = ganeti.masterd.instance
1915
      disk_usage = dict((inst.name,
1916
                         gmi.ComputeDiskSize(inst.disk_template,
1917
                                             [{constants.IDISK_SIZE: disk.size}
1918
                                              for disk in inst.disks]))
1919
                        for inst in instance_list)
1920
    else:
1921
      disk_usage = None
1922

  
1923
    if query.IQ_CONSOLE in self.requested_data:
1924
      consinfo = {}
1925
      for inst in instance_list:
1926
        if inst.name in live_data:
1927
          # Instance is running
1928
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
1929
        else:
1930
          consinfo[inst.name] = None
1931
      assert set(consinfo.keys()) == set(instance_names)
1932
    else:
1933
      consinfo = None
1934

  
1935
    if query.IQ_NODES in self.requested_data:
1936
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
1937
                                            instance_list)))
1938
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
1939
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
1940
                    for uuid in set(map(operator.attrgetter("group"),
1941
                                        nodes.values())))
1942
    else:
1943
      nodes = None
1944
      groups = None
1945

  
1946
    if query.IQ_NETWORKS in self.requested_data:
1947
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
1948
                                    for i in instance_list))
1949
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
1950
    else:
1951
      networks = None
1952

  
1953
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
1954
                                   disk_usage, offline_nodes, bad_nodes,
1955
                                   live_data, wrongnode_inst, consinfo,
1956
                                   nodes, groups, networks)
1957

  
1958

  
1959
class LUInstanceQuery(NoHooksLU):
1960
  """Logical unit for querying instances.
1961

  
1962
  """
1963
  # pylint: disable=W0142
1964
  REQ_BGL = False
1965

  
1966
  def CheckArguments(self):
1967
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
1968
                             self.op.output_fields, self.op.use_locking)
1969

  
1970
  def ExpandNames(self):
1971
    self.iq.ExpandNames(self)
1972

  
1973
  def DeclareLocks(self, level):
1974
    self.iq.DeclareLocks(self, level)
1975

  
1976
  def Exec(self, feedback_fn):
1977
    return self.iq.OldStyleQuery(self)
1978

  
1979

  
1980
class LUInstanceQueryData(NoHooksLU):
1981
  """Query runtime instance data.
1982

  
1983
  """
1984
  REQ_BGL = False
1985

  
1986
  def ExpandNames(self):
1987
    self.needed_locks = {}
1988

  
1989
    # Use locking if requested or when non-static information is wanted
1990
    if not (self.op.static or self.op.use_locking):
1991
      self.LogWarning("Non-static data requested, locks need to be acquired")
1992
      self.op.use_locking = True
1993

  
1994
    if self.op.instances or not self.op.use_locking:
1995
      # Expand instance names right here
1996
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
1997
    else:
1998
      # Will use acquired locks
1999
      self.wanted_names = None
2000

  
2001
    if self.op.use_locking:
2002
      self.share_locks = _ShareAll()
2003

  
2004
      if self.wanted_names is None:
2005
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2006
      else:
2007
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
2008

  
2009
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
2010
      self.needed_locks[locking.LEVEL_NODE] = []
2011
      self.needed_locks[locking.LEVEL_NETWORK] = []
2012
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2013

  
2014
  def DeclareLocks(self, level):
2015
    if self.op.use_locking:
2016
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
2017
      if level == locking.LEVEL_NODEGROUP:
2018

  
2019
        # Lock all groups used by instances optimistically; this requires going
2020
        # via the node before it's locked, requiring verification later on
2021
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
2022
          frozenset(group_uuid
2023
                    for instance_name in owned_instances
2024
                    for group_uuid in
2025
                    self.cfg.GetInstanceNodeGroups(instance_name))
2026

  
2027
      elif level == locking.LEVEL_NODE:
2028
        self._LockInstancesNodes()
2029

  
2030
      elif level == locking.LEVEL_NETWORK:
2031
        self.needed_locks[locking.LEVEL_NETWORK] = \
2032
          frozenset(net_uuid
2033
                    for instance_name in owned_instances
2034
                    for net_uuid in
2035
                    self.cfg.GetInstanceNetworks(instance_name))
2036

  
2037
  def CheckPrereq(self):
2038
    """Check prerequisites.
2039

  
2040
    This only checks the optional instance list against the existing names.
2041

  
2042
    """
2043
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
2044
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
2045
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
2046
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
2047

  
2048
    if self.wanted_names is None:
2049
      assert self.op.use_locking, "Locking was not used"
2050
      self.wanted_names = owned_instances
2051

  
2052
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
2053

  
2054
    if self.op.use_locking:
2055
      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
2056
                                None)
2057
    else:
2058
      assert not (owned_instances or owned_groups or
2059
                  owned_nodes or owned_networks)
2060

  
2061
    self.wanted_instances = instances.values()
2062

  
2063
  def _ComputeBlockdevStatus(self, node, instance, dev):
2064
    """Returns the status of a block device
2065

  
2066
    """
2067
    if self.op.static or not node:
2068
      return None
2069

  
2070
    self.cfg.SetDiskID(dev, node)
2071

  
2072
    result = self.rpc.call_blockdev_find(node, dev)
2073
    if result.offline:
2074
      return None
2075

  
2076
    result.Raise("Can't compute disk status for %s" % instance.name)
2077

  
2078
    status = result.payload
2079
    if status is None:
2080
      return None
2081

  
2082
    return (status.dev_path, status.major, status.minor,
2083
            status.sync_percent, status.estimated_time,
2084
            status.is_degraded, status.ldisk_status)
2085

  
2086
  def _ComputeDiskStatus(self, instance, snode, dev):
2087
    """Compute block device status.
2088

  
2089
    """
2090
    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
2091

  
2092
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
2093

  
2094
  def _ComputeDiskStatusInner(self, instance, snode, dev):
2095
    """Compute block device status.
2096

  
2097
    @attention: The device has to be annotated already.
2098

  
2099
    """
2100
    if dev.dev_type in constants.LDS_DRBD:
2101
      # we change the snode then (otherwise we use the one passed in)
2102
      if dev.logical_id[0] == instance.primary_node:
2103
        snode = dev.logical_id[1]
2104
      else:
2105
        snode = dev.logical_id[0]
2106

  
2107
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
2108
                                              instance, dev)
2109
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
2110

  
2111
    if dev.children:
2112
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
2113
                                        instance, snode),
2114
                         dev.children)
2115
    else:
2116
      dev_children = []
2117

  
2118
    return {
2119
      "iv_name": dev.iv_name,
2120
      "dev_type": dev.dev_type,
2121
      "logical_id": dev.logical_id,
2122
      "physical_id": dev.physical_id,
2123
      "pstatus": dev_pstatus,
2124
      "sstatus": dev_sstatus,
2125
      "children": dev_children,
2126
      "mode": dev.mode,
2127
      "size": dev.size,
2128
      "name": dev.name,
2129
      "uuid": dev.uuid,
2130
      }
2131

  
2132
  def Exec(self, feedback_fn):
2133
    """Gather and return data"""
2134
    result = {}
2135

  
2136
    cluster = self.cfg.GetClusterInfo()
2137

  
2138
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
2139
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
2140

  
2141
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
2142
                                                 for node in nodes.values()))
2143

  
2144
    group2name_fn = lambda uuid: groups[uuid].name
2145
    for instance in self.wanted_instances:
2146
      pnode = nodes[instance.primary_node]
2147

  
2148
      if self.op.static or pnode.offline:
2149
        remote_state = None
2150
        if pnode.offline:
2151
          self.LogWarning("Primary node %s is marked offline, returning static"
2152
                          " information only for instance %s" %
2153
                          (pnode.name, instance.name))
2154
      else:
2155
        remote_info = self.rpc.call_instance_info(instance.primary_node,
2156
                                                  instance.name,
2157
                                                  instance.hypervisor)
2158
        remote_info.Raise("Error checking node %s" % instance.primary_node)
2159
        remote_info = remote_info.payload
2160
        if remote_info and "state" in remote_info:
2161
          remote_state = "up"
2162
        else:
2163
          if instance.admin_state == constants.ADMINST_UP:
2164
            remote_state = "down"
2165
          else:
2166
            remote_state = instance.admin_state
2167

  
2168
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
2169
                  instance.disks)
2170

  
2171
      snodes_group_uuids = [nodes[snode_name].group
2172
                            for snode_name in instance.secondary_nodes]
2173

  
2174
      result[instance.name] = {
2175
        "name": instance.name,
2176
        "config_state": instance.admin_state,
2177
        "run_state": remote_state,
2178
        "pnode": instance.primary_node,
2179
        "pnode_group_uuid": pnode.group,
2180
        "pnode_group_name": group2name_fn(pnode.group),
2181
        "snodes": instance.secondary_nodes,
2182
        "snodes_group_uuids": snodes_group_uuids,
2183
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
2184
        "os": instance.os,
2185
        # this happens to be the same format used for hooks
2186
        "nics": _NICListToTuple(self, instance.nics),
2187
        "disk_template": instance.disk_template,
2188
        "disks": disks,
2189
        "hypervisor": instance.hypervisor,
2190
        "network_port": instance.network_port,
2191
        "hv_instance": instance.hvparams,
2192
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
2193
        "be_instance": instance.beparams,
2194
        "be_actual": cluster.FillBE(instance),
2195
        "os_instance": instance.osparams,
2196
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
2197
        "serial_no": instance.serial_no,
2198
        "mtime": instance.mtime,
2199
        "ctime": instance.ctime,
2200
        "uuid": instance.uuid,
2201
        }
2202

  
2203
    return result
2204

  
2205

  
2206 1805
class LUInstanceMultiAlloc(NoHooksLU):
2207 1806
  """Allocates multiple instances at the same time.
2208 1807

  
b/lib/cmdlib/instance_query.py
1
#
2
#
3

  
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

  
21

  
22
"""Logical units for querying instances."""
23

  
24
import itertools
25
import logging
26
import operator
27

  
28
from ganeti import compat
29
from ganeti import constants
30
from ganeti import locking
31
from ganeti import qlang
32
from ganeti import query
33
from ganeti.cmdlib.base import _QueryBase, NoHooksLU
34
from ganeti.cmdlib.common import _ShareAll, _GetWantedInstances, \
35
  _CheckInstanceNodeGroups, _CheckInstancesNodeGroups, _AnnotateDiskParams
36
from ganeti.cmdlib.instance_operation import _GetInstanceConsole
37
from ganeti.cmdlib.instance_utils import _NICListToTuple
38

  
39
import ganeti.masterd.instance
40

  
41

  
42
class _InstanceQuery(_QueryBase):
43
  FIELDS = query.INSTANCE_FIELDS
44

  
45
  def ExpandNames(self, lu):
46
    lu.needed_locks = {}
47
    lu.share_locks = _ShareAll()
48

  
49
    if self.names:
50
      self.wanted = _GetWantedInstances(lu, self.names)
51
    else:
52
      self.wanted = locking.ALL_SET
53

  
54
    self.do_locking = (self.use_locking and
55
                       query.IQ_LIVE in self.requested_data)
56
    if self.do_locking:
57
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59
      lu.needed_locks[locking.LEVEL_NODE] = []
60
      lu.needed_locks[locking.LEVEL_NETWORK] = []
61
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62

  
63
    self.do_grouplocks = (self.do_locking and
64
                          query.IQ_NODES in self.requested_data)
65

  
66
  def DeclareLocks(self, lu, level):
67
    if self.do_locking:
68
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70

  
71
        # Lock all groups used by instances optimistically; this requires going
72
        # via the node before it's locked, requiring verification later on
73
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74
          set(group_uuid
75
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77
      elif level == locking.LEVEL_NODE:
78
        lu._LockInstancesNodes() # pylint: disable=W0212
79

  
80
      elif level == locking.LEVEL_NETWORK:
81
        lu.needed_locks[locking.LEVEL_NETWORK] = \
82
          frozenset(net_uuid
83
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
85

  
86
  @staticmethod
87
  def _CheckGroupLocks(lu):
88
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
90

  
91
    # Check if node groups for locked instances are still correct
92
    for instance_name in owned_instances:
93
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
94

  
95
  def _GetQueryData(self, lu):
96
    """Computes the list of instances and their attributes.
97

  
98
    """
99
    if self.do_grouplocks:
100
      self._CheckGroupLocks(lu)
101

  
102
    cluster = lu.cfg.GetClusterInfo()
103
    all_info = lu.cfg.GetAllInstancesInfo()
104

  
105
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106

  
107
    instance_list = [all_info[name] for name in instance_names]
108
    nodes = frozenset(itertools.chain(*(inst.all_nodes
109
                                        for inst in instance_list)))
110
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
111
    bad_nodes = []
112
    offline_nodes = []
113
    wrongnode_inst = set()
114

  
115
    # Gather data as requested
116
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117
      live_data = {}
118
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
119
      for name in nodes:
120
        result = node_data[name]
121
        if result.offline:
122
          # offline nodes will be in both lists
123
          assert result.fail_msg
124
          offline_nodes.append(name)
125
        if result.fail_msg:
126
          bad_nodes.append(name)
127
        elif result.payload:
128
          for inst in result.payload:
129
            if inst in all_info:
130
              if all_info[inst].primary_node == name:
131
                live_data.update(result.payload)
132
              else:
133
                wrongnode_inst.add(inst)
134
            else:
135
              # orphan instance; we don't list it here as we don't
136
              # handle this case yet in the output of instance listing
137
              logging.warning("Orphan instance '%s' found on node %s",
138
                              inst, name)
139
              # else no instance is alive
140
    else:
141
      live_data = {}
142

  
143
    if query.IQ_DISKUSAGE in self.requested_data:
144
      gmi = ganeti.masterd.instance
145
      disk_usage = dict((inst.name,
146
                         gmi.ComputeDiskSize(inst.disk_template,
147
                                             [{constants.IDISK_SIZE: disk.size}
148
                                              for disk in inst.disks]))
149
                        for inst in instance_list)
150
    else:
151
      disk_usage = None
152

  
153
    if query.IQ_CONSOLE in self.requested_data:
154
      consinfo = {}
155
      for inst in instance_list:
156
        if inst.name in live_data:
157
          # Instance is running
158
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
159
        else:
160
          consinfo[inst.name] = None
161
      assert set(consinfo.keys()) == set(instance_names)
162
    else:
163
      consinfo = None
164

  
165
    if query.IQ_NODES in self.requested_data:
166
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
167
                                            instance_list)))
168
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
169
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
170
                    for uuid in set(map(operator.attrgetter("group"),
171
                                        nodes.values())))
172
    else:
173
      nodes = None
174
      groups = None
175

  
176
    if query.IQ_NETWORKS in self.requested_data:
177
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
178
                                    for i in instance_list))
179
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
180
    else:
181
      networks = None
182

  
183
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
184
                                   disk_usage, offline_nodes, bad_nodes,
185
                                   live_data, wrongnode_inst, consinfo,
186
                                   nodes, groups, networks)
187

  
188

  
189
class LUInstanceQuery(NoHooksLU):
190
  """Logical unit for querying instances.
191

  
192
  """
193
  # pylint: disable=W0142
194
  REQ_BGL = False
195

  
196
  def CheckArguments(self):
197
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
198
                             self.op.output_fields, self.op.use_locking)
199

  
200
  def ExpandNames(self):
201
    self.iq.ExpandNames(self)
202

  
203
  def DeclareLocks(self, level):
204
    self.iq.DeclareLocks(self, level)
205

  
206
  def Exec(self, feedback_fn):
207
    return self.iq.OldStyleQuery(self)
208

  
209

  
210
class LUInstanceQueryData(NoHooksLU):
211
  """Query runtime instance data.
212

  
213
  """
214
  REQ_BGL = False
215

  
216
  def ExpandNames(self):
217
    self.needed_locks = {}
218

  
219
    # Use locking if requested or when non-static information is wanted
220
    if not (self.op.static or self.op.use_locking):
221
      self.LogWarning("Non-static data requested, locks need to be acquired")
222
      self.op.use_locking = True
223

  
224
    if self.op.instances or not self.op.use_locking:
225
      # Expand instance names right here
226
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
227
    else:
228
      # Will use acquired locks
229
      self.wanted_names = None
230

  
231
    if self.op.use_locking:
232
      self.share_locks = _ShareAll()
233

  
234
      if self.wanted_names is None:
235
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
236
      else:
237
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
238

  
239
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
240
      self.needed_locks[locking.LEVEL_NODE] = []
241
      self.needed_locks[locking.LEVEL_NETWORK] = []
242
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
243

  
244
  def DeclareLocks(self, level):
245
    if self.op.use_locking:
246
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
247
      if level == locking.LEVEL_NODEGROUP:
248

  
249
        # Lock all groups used by instances optimistically; this requires going
250
        # via the node before it's locked, requiring verification later on
251
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
252
          frozenset(group_uuid
253
                    for instance_name in owned_instances
254
                    for group_uuid in
255
                    self.cfg.GetInstanceNodeGroups(instance_name))
256

  
257
      elif level == locking.LEVEL_NODE:
258
        self._LockInstancesNodes()
259

  
260
      elif level == locking.LEVEL_NETWORK:
261
        self.needed_locks[locking.LEVEL_NETWORK] = \
262
          frozenset(net_uuid
263
                    for instance_name in owned_instances
264
                    for net_uuid in
265
                    self.cfg.GetInstanceNetworks(instance_name))
266

  
267
  def CheckPrereq(self):
268
    """Check prerequisites.
269

  
270
    This only checks the optional instance list against the existing names.
271

  
272
    """
273
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
274
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
275
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
276
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
277

  
278
    if self.wanted_names is None:
279
      assert self.op.use_locking, "Locking was not used"
280
      self.wanted_names = owned_instances
281

  
282
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
283

  
284
    if self.op.use_locking:
285
      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
286
                                None)
287
    else:
288
      assert not (owned_instances or owned_groups or
289
                  owned_nodes or owned_networks)
290

  
291
    self.wanted_instances = instances.values()
292

  
293
  def _ComputeBlockdevStatus(self, node, instance, dev):
294
    """Returns the status of a block device
295

  
296
    """
297
    if self.op.static or not node:
298
      return None
299

  
300
    self.cfg.SetDiskID(dev, node)
301

  
302
    result = self.rpc.call_blockdev_find(node, dev)
303
    if result.offline:
304
      return None
305

  
306
    result.Raise("Can't compute disk status for %s" % instance.name)
307

  
308
    status = result.payload
309
    if status is None:
310
      return None
311

  
312
    return (status.dev_path, status.major, status.minor,
313
            status.sync_percent, status.estimated_time,
314
            status.is_degraded, status.ldisk_status)
315

  
316
  def _ComputeDiskStatus(self, instance, snode, dev):
317
    """Compute block device status.
318

  
319
    """
320
    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
321

  
322
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
323

  
324
  def _ComputeDiskStatusInner(self, instance, snode, dev):
325
    """Compute block device status.
326

  
327
    @attention: The device has to be annotated already.
328

  
329
    """
330
    if dev.dev_type in constants.LDS_DRBD:
331
      # we change the snode then (otherwise we use the one passed in)
332
      if dev.logical_id[0] == instance.primary_node:
333
        snode = dev.logical_id[1]
334
      else:
335
        snode = dev.logical_id[0]
336

  
337
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
338
                                              instance, dev)
339
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
340

  
341
    if dev.children:
342
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
343
                                        instance, snode),
344
                         dev.children)
345
    else:
346
      dev_children = []
347

  
348
    return {
349
      "iv_name": dev.iv_name,
350
      "dev_type": dev.dev_type,
351
      "logical_id": dev.logical_id,
352
      "physical_id": dev.physical_id,
353
      "pstatus": dev_pstatus,
354
      "sstatus": dev_sstatus,
355
      "children": dev_children,
356
      "mode": dev.mode,
357
      "size": dev.size,
358
      "name": dev.name,
359
      "uuid": dev.uuid,
360
      }
361

  
362
  def Exec(self, feedback_fn):
363
    """Gather and return data"""
364
    result = {}
365

  
366
    cluster = self.cfg.GetClusterInfo()
367

  
368
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
369
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
370

  
371
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
372
                                                 for node in nodes.values()))
373

  
374
    group2name_fn = lambda uuid: groups[uuid].name
375
    for instance in self.wanted_instances:
376
      pnode = nodes[instance.primary_node]
377

  
378
      if self.op.static or pnode.offline:
379
        remote_state = None
380
        if pnode.offline:
381
          self.LogWarning("Primary node %s is marked offline, returning static"
382
                          " information only for instance %s" %
383
                          (pnode.name, instance.name))
384
      else:
385
        remote_info = self.rpc.call_instance_info(instance.primary_node,
386
                                                  instance.name,
387
                                                  instance.hypervisor)
388
        remote_info.Raise("Error checking node %s" % instance.primary_node)
389
        remote_info = remote_info.payload
390
        if remote_info and "state" in remote_info:
391
          remote_state = "up"
392
        else:
393
          if instance.admin_state == constants.ADMINST_UP:
394
            remote_state = "down"
395
          else:
396
            remote_state = instance.admin_state
397

  
398
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
399
                  instance.disks)
400

  
401
      snodes_group_uuids = [nodes[snode_name].group
402
                            for snode_name in instance.secondary_nodes]
403

  
404
      result[instance.name] = {
405
        "name": instance.name,
406
        "config_state": instance.admin_state,
407
        "run_state": remote_state,
408
        "pnode": instance.primary_node,
409
        "pnode_group_uuid": pnode.group,
410
        "pnode_group_name": group2name_fn(pnode.group),
411
        "snodes": instance.secondary_nodes,
412
        "snodes_group_uuids": snodes_group_uuids,
413
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
414
        "os": instance.os,
415
        # this happens to be the same format used for hooks
416
        "nics": _NICListToTuple(self, instance.nics),
417
        "disk_template": instance.disk_template,
418
        "disks": disks,
419
        "hypervisor": instance.hypervisor,
420
        "network_port": instance.network_port,
421
        "hv_instance": instance.hvparams,
422
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
423
        "be_instance": instance.beparams,
424
        "be_actual": cluster.FillBE(instance),
425
        "os_instance": instance.osparams,
426
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
427
        "serial_no": instance.serial_no,
428
        "mtime": instance.mtime,
429
        "ctime": instance.ctime,
430
        "uuid": instance.uuid,
431
        }
432

  
433
    return result
b/lib/cmdlib/query.py
28 28
from ganeti.cmdlib.base import NoHooksLU
29 29
from ganeti.cmdlib.cluster import _ClusterQuery
30 30
from ganeti.cmdlib.group import _GroupQuery
31
from ganeti.cmdlib.instance import _InstanceQuery
31
from ganeti.cmdlib.instance_query import _InstanceQuery
32 32
from ganeti.cmdlib.misc import _ExtStorageQuery
33 33
from ganeti.cmdlib.network import _NetworkQuery
34 34
from ganeti.cmdlib.node import _NodeQuery

Also available in: Unified diff