Revision 8aa8f6b1 lib/cmdlib/instance.py

b/lib/cmdlib/instance.py
23 23

  
24 24
import OpenSSL
25 25
import copy
26
import itertools
27 26
import logging
28
import operator
29 27
import os
30 28

  
31 29
from ganeti import compat
......
40 38
from ganeti import objects
41 39
from ganeti import opcodes
42 40
from ganeti import pathutils
43
from ganeti import qlang
44 41
from ganeti import rpc
45 42
from ganeti import utils
46
from ganeti import query
47 43

  
48
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
49
  ResultWithJobs
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
50 45

  
51 46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
52 47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
53 48
  _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
54 49
  _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
55 50
  _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
56
  _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
57
  _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
58
  _CheckInstanceState, _ExpandNodeName
51
  _AnnotateDiskParams, _GetUpdatedParams, _ExpandInstanceName, \
52
  _ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
59 53
from ganeti.cmdlib.instance_storage import _CreateDisks, \
60 54
  _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
61 55
  _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
62 56
  _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
63 57
  _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
64 58
  _AssembleInstanceDisks
65
from ganeti.cmdlib.instance_operation import _GetInstanceConsole
66 59
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
67 60
  _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
68 61
  _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
......
1809 1802
                                 (instance.name, target_node, msg))
1810 1803

  
1811 1804

  
1812
class _InstanceQuery(_QueryBase):
1813
  FIELDS = query.INSTANCE_FIELDS
1814

  
1815
  def ExpandNames(self, lu):
1816
    lu.needed_locks = {}
1817
    lu.share_locks = _ShareAll()
1818

  
1819
    if self.names:
1820
      self.wanted = _GetWantedInstances(lu, self.names)
1821
    else:
1822
      self.wanted = locking.ALL_SET
1823

  
1824
    self.do_locking = (self.use_locking and
1825
                       query.IQ_LIVE in self.requested_data)
1826
    if self.do_locking:
1827
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
1828
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
1829
      lu.needed_locks[locking.LEVEL_NODE] = []
1830
      lu.needed_locks[locking.LEVEL_NETWORK] = []
1831
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1832

  
1833
    self.do_grouplocks = (self.do_locking and
1834
                          query.IQ_NODES in self.requested_data)
1835

  
1836
  def DeclareLocks(self, lu, level):
1837
    if self.do_locking:
1838
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
1839
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
1840

  
1841
        # Lock all groups used by instances optimistically; this requires going
1842
        # via the node before it's locked, requiring verification later on
1843
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
1844
          set(group_uuid
1845
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1846
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
1847
      elif level == locking.LEVEL_NODE:
1848
        lu._LockInstancesNodes() # pylint: disable=W0212
1849

  
1850
      elif level == locking.LEVEL_NETWORK:
1851
        lu.needed_locks[locking.LEVEL_NETWORK] = \
1852
          frozenset(net_uuid
1853
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1854
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
1855

  
1856
  @staticmethod
1857
  def _CheckGroupLocks(lu):
1858
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
1859
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
1860

  
1861
    # Check if node groups for locked instances are still correct
1862
    for instance_name in owned_instances:
1863
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
1864

  
1865
  def _GetQueryData(self, lu):
1866
    """Computes the list of instances and their attributes.
1867

  
1868
    """
1869
    if self.do_grouplocks:
1870
      self._CheckGroupLocks(lu)
1871

  
1872
    cluster = lu.cfg.GetClusterInfo()
1873
    all_info = lu.cfg.GetAllInstancesInfo()
1874

  
1875
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
1876

  
1877
    instance_list = [all_info[name] for name in instance_names]
1878
    nodes = frozenset(itertools.chain(*(inst.all_nodes
1879
                                        for inst in instance_list)))
1880
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
1881
    bad_nodes = []
1882
    offline_nodes = []
1883
    wrongnode_inst = set()
1884

  
1885
    # Gather data as requested
1886
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
1887
      live_data = {}
1888
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
1889
      for name in nodes:
1890
        result = node_data[name]
1891
        if result.offline:
1892
          # offline nodes will be in both lists
1893
          assert result.fail_msg
1894
          offline_nodes.append(name)
1895
        if result.fail_msg:
1896
          bad_nodes.append(name)
1897
        elif result.payload:
1898
          for inst in result.payload:
1899
            if inst in all_info:
1900
              if all_info[inst].primary_node == name:
1901
                live_data.update(result.payload)
1902
              else:
1903
                wrongnode_inst.add(inst)
1904
            else:
1905
              # orphan instance; we don't list it here as we don't
1906
              # handle this case yet in the output of instance listing
1907
              logging.warning("Orphan instance '%s' found on node %s",
1908
                              inst, name)
1909
              # else no instance is alive
1910
    else:
1911
      live_data = {}
1912

  
1913
    if query.IQ_DISKUSAGE in self.requested_data:
1914
      gmi = ganeti.masterd.instance
1915
      disk_usage = dict((inst.name,
1916
                         gmi.ComputeDiskSize(inst.disk_template,
1917
                                             [{constants.IDISK_SIZE: disk.size}
1918
                                              for disk in inst.disks]))
1919
                        for inst in instance_list)
1920
    else:
1921
      disk_usage = None
1922

  
1923
    if query.IQ_CONSOLE in self.requested_data:
1924
      consinfo = {}
1925
      for inst in instance_list:
1926
        if inst.name in live_data:
1927
          # Instance is running
1928
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
1929
        else:
1930
          consinfo[inst.name] = None
1931
      assert set(consinfo.keys()) == set(instance_names)
1932
    else:
1933
      consinfo = None
1934

  
1935
    if query.IQ_NODES in self.requested_data:
1936
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
1937
                                            instance_list)))
1938
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
1939
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
1940
                    for uuid in set(map(operator.attrgetter("group"),
1941
                                        nodes.values())))
1942
    else:
1943
      nodes = None
1944
      groups = None
1945

  
1946
    if query.IQ_NETWORKS in self.requested_data:
1947
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
1948
                                    for i in instance_list))
1949
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
1950
    else:
1951
      networks = None
1952

  
1953
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
1954
                                   disk_usage, offline_nodes, bad_nodes,
1955
                                   live_data, wrongnode_inst, consinfo,
1956
                                   nodes, groups, networks)
1957

  
1958

  
1959
class LUInstanceQuery(NoHooksLU):
1960
  """Logical unit for querying instances.
1961

  
1962
  """
1963
  # pylint: disable=W0142
1964
  REQ_BGL = False
1965

  
1966
  def CheckArguments(self):
1967
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
1968
                             self.op.output_fields, self.op.use_locking)
1969

  
1970
  def ExpandNames(self):
1971
    self.iq.ExpandNames(self)
1972

  
1973
  def DeclareLocks(self, level):
1974
    self.iq.DeclareLocks(self, level)
1975

  
1976
  def Exec(self, feedback_fn):
1977
    return self.iq.OldStyleQuery(self)
1978

  
1979

  
1980
class LUInstanceQueryData(NoHooksLU):
1981
  """Query runtime instance data.
1982

  
1983
  """
1984
  REQ_BGL = False
1985

  
1986
  def ExpandNames(self):
1987
    self.needed_locks = {}
1988

  
1989
    # Use locking if requested or when non-static information is wanted
1990
    if not (self.op.static or self.op.use_locking):
1991
      self.LogWarning("Non-static data requested, locks need to be acquired")
1992
      self.op.use_locking = True
1993

  
1994
    if self.op.instances or not self.op.use_locking:
1995
      # Expand instance names right here
1996
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
1997
    else:
1998
      # Will use acquired locks
1999
      self.wanted_names = None
2000

  
2001
    if self.op.use_locking:
2002
      self.share_locks = _ShareAll()
2003

  
2004
      if self.wanted_names is None:
2005
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2006
      else:
2007
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
2008

  
2009
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
2010
      self.needed_locks[locking.LEVEL_NODE] = []
2011
      self.needed_locks[locking.LEVEL_NETWORK] = []
2012
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2013

  
2014
  def DeclareLocks(self, level):
2015
    if self.op.use_locking:
2016
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
2017
      if level == locking.LEVEL_NODEGROUP:
2018

  
2019
        # Lock all groups used by instances optimistically; this requires going
2020
        # via the node before it's locked, requiring verification later on
2021
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
2022
          frozenset(group_uuid
2023
                    for instance_name in owned_instances
2024
                    for group_uuid in
2025
                    self.cfg.GetInstanceNodeGroups(instance_name))
2026

  
2027
      elif level == locking.LEVEL_NODE:
2028
        self._LockInstancesNodes()
2029

  
2030
      elif level == locking.LEVEL_NETWORK:
2031
        self.needed_locks[locking.LEVEL_NETWORK] = \
2032
          frozenset(net_uuid
2033
                    for instance_name in owned_instances
2034
                    for net_uuid in
2035
                    self.cfg.GetInstanceNetworks(instance_name))
2036

  
2037
  def CheckPrereq(self):
2038
    """Check prerequisites.
2039

  
2040
    This only checks the optional instance list against the existing names.
2041

  
2042
    """
2043
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
2044
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
2045
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
2046
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
2047

  
2048
    if self.wanted_names is None:
2049
      assert self.op.use_locking, "Locking was not used"
2050
      self.wanted_names = owned_instances
2051

  
2052
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
2053

  
2054
    if self.op.use_locking:
2055
      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
2056
                                None)
2057
    else:
2058
      assert not (owned_instances or owned_groups or
2059
                  owned_nodes or owned_networks)
2060

  
2061
    self.wanted_instances = instances.values()
2062

  
2063
  def _ComputeBlockdevStatus(self, node, instance, dev):
2064
    """Returns the status of a block device
2065

  
2066
    """
2067
    if self.op.static or not node:
2068
      return None
2069

  
2070
    self.cfg.SetDiskID(dev, node)
2071

  
2072
    result = self.rpc.call_blockdev_find(node, dev)
2073
    if result.offline:
2074
      return None
2075

  
2076
    result.Raise("Can't compute disk status for %s" % instance.name)
2077

  
2078
    status = result.payload
2079
    if status is None:
2080
      return None
2081

  
2082
    return (status.dev_path, status.major, status.minor,
2083
            status.sync_percent, status.estimated_time,
2084
            status.is_degraded, status.ldisk_status)
2085

  
2086
  def _ComputeDiskStatus(self, instance, snode, dev):
2087
    """Compute block device status.
2088

  
2089
    """
2090
    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
2091

  
2092
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
2093

  
2094
  def _ComputeDiskStatusInner(self, instance, snode, dev):
2095
    """Compute block device status.
2096

  
2097
    @attention: The device has to be annotated already.
2098

  
2099
    """
2100
    if dev.dev_type in constants.LDS_DRBD:
2101
      # we change the snode then (otherwise we use the one passed in)
2102
      if dev.logical_id[0] == instance.primary_node:
2103
        snode = dev.logical_id[1]
2104
      else:
2105
        snode = dev.logical_id[0]
2106

  
2107
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
2108
                                              instance, dev)
2109
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
2110

  
2111
    if dev.children:
2112
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
2113
                                        instance, snode),
2114
                         dev.children)
2115
    else:
2116
      dev_children = []
2117

  
2118
    return {
2119
      "iv_name": dev.iv_name,
2120
      "dev_type": dev.dev_type,
2121
      "logical_id": dev.logical_id,
2122
      "physical_id": dev.physical_id,
2123
      "pstatus": dev_pstatus,
2124
      "sstatus": dev_sstatus,
2125
      "children": dev_children,
2126
      "mode": dev.mode,
2127
      "size": dev.size,
2128
      "name": dev.name,
2129
      "uuid": dev.uuid,
2130
      }
2131

  
2132
  def Exec(self, feedback_fn):
2133
    """Gather and return data"""
2134
    result = {}
2135

  
2136
    cluster = self.cfg.GetClusterInfo()
2137

  
2138
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
2139
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
2140

  
2141
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
2142
                                                 for node in nodes.values()))
2143

  
2144
    group2name_fn = lambda uuid: groups[uuid].name
2145
    for instance in self.wanted_instances:
2146
      pnode = nodes[instance.primary_node]
2147

  
2148
      if self.op.static or pnode.offline:
2149
        remote_state = None
2150
        if pnode.offline:
2151
          self.LogWarning("Primary node %s is marked offline, returning static"
2152
                          " information only for instance %s" %
2153
                          (pnode.name, instance.name))
2154
      else:
2155
        remote_info = self.rpc.call_instance_info(instance.primary_node,
2156
                                                  instance.name,
2157
                                                  instance.hypervisor)
2158
        remote_info.Raise("Error checking node %s" % instance.primary_node)
2159
        remote_info = remote_info.payload
2160
        if remote_info and "state" in remote_info:
2161
          remote_state = "up"
2162
        else:
2163
          if instance.admin_state == constants.ADMINST_UP:
2164
            remote_state = "down"
2165
          else:
2166
            remote_state = instance.admin_state
2167

  
2168
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
2169
                  instance.disks)
2170

  
2171
      snodes_group_uuids = [nodes[snode_name].group
2172
                            for snode_name in instance.secondary_nodes]
2173

  
2174
      result[instance.name] = {
2175
        "name": instance.name,
2176
        "config_state": instance.admin_state,
2177
        "run_state": remote_state,
2178
        "pnode": instance.primary_node,
2179
        "pnode_group_uuid": pnode.group,
2180
        "pnode_group_name": group2name_fn(pnode.group),
2181
        "snodes": instance.secondary_nodes,
2182
        "snodes_group_uuids": snodes_group_uuids,
2183
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
2184
        "os": instance.os,
2185
        # this happens to be the same format used for hooks
2186
        "nics": _NICListToTuple(self, instance.nics),
2187
        "disk_template": instance.disk_template,
2188
        "disks": disks,
2189
        "hypervisor": instance.hypervisor,
2190
        "network_port": instance.network_port,
2191
        "hv_instance": instance.hvparams,
2192
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
2193
        "be_instance": instance.beparams,
2194
        "be_actual": cluster.FillBE(instance),
2195
        "os_instance": instance.osparams,
2196
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
2197
        "serial_no": instance.serial_no,
2198
        "mtime": instance.mtime,
2199
        "ctime": instance.ctime,
2200
        "uuid": instance.uuid,
2201
        }
2202

  
2203
    return result
2204

  
2205

  
2206 1805
class LUInstanceMultiAlloc(NoHooksLU):
2207 1806
  """Allocates multiple instances at the same time.
2208 1807

  

Also available in: Unified diff