Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / node.py @ a295eb80

History | View | Annotate | Download (56.7 kB)

1 31b836b8 Thomas Thrainer
#
2 31b836b8 Thomas Thrainer
#
3 31b836b8 Thomas Thrainer
4 31b836b8 Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 31b836b8 Thomas Thrainer
#
6 31b836b8 Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 31b836b8 Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 31b836b8 Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 31b836b8 Thomas Thrainer
# (at your option) any later version.
10 31b836b8 Thomas Thrainer
#
11 31b836b8 Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 31b836b8 Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 31b836b8 Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 31b836b8 Thomas Thrainer
# General Public License for more details.
15 31b836b8 Thomas Thrainer
#
16 31b836b8 Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 31b836b8 Thomas Thrainer
# along with this program; if not, write to the Free Software
18 31b836b8 Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 31b836b8 Thomas Thrainer
# 02110-1301, USA.
20 31b836b8 Thomas Thrainer
21 31b836b8 Thomas Thrainer
22 31b836b8 Thomas Thrainer
"""Logical units dealing with nodes."""
23 31b836b8 Thomas Thrainer
24 31b836b8 Thomas Thrainer
import logging
25 31b836b8 Thomas Thrainer
import operator
26 31b836b8 Thomas Thrainer
27 31b836b8 Thomas Thrainer
from ganeti import constants
28 31b836b8 Thomas Thrainer
from ganeti import errors
29 31b836b8 Thomas Thrainer
from ganeti import locking
30 31b836b8 Thomas Thrainer
from ganeti import netutils
31 31b836b8 Thomas Thrainer
from ganeti import objects
32 31b836b8 Thomas Thrainer
from ganeti import opcodes
33 31b836b8 Thomas Thrainer
from ganeti import qlang
34 31b836b8 Thomas Thrainer
from ganeti import query
35 31b836b8 Thomas Thrainer
from ganeti import rpc
36 31b836b8 Thomas Thrainer
from ganeti import utils
37 31b836b8 Thomas Thrainer
from ganeti.masterd import iallocator
38 31b836b8 Thomas Thrainer
39 5eacbcae Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
40 31b836b8 Thomas Thrainer
  ResultWithJobs
41 5eacbcae Thomas Thrainer
from ganeti.cmdlib.common import CheckParamsNotGlobal, \
42 5eacbcae Thomas Thrainer
  MergeAndVerifyHvState, MergeAndVerifyDiskState, \
43 5eacbcae Thomas Thrainer
  IsExclusiveStorageEnabledNode, CheckNodePVs, \
44 5eacbcae Thomas Thrainer
  RedistributeAncillaryFiles, ExpandNodeName, ShareAll, SupportsOob, \
45 5eacbcae Thomas Thrainer
  CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
46 5eacbcae Thomas Thrainer
  AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
47 5eacbcae Thomas Thrainer
  GetWantedNodes, MapInstanceDisksToNodes, RunPostHook, \
48 5eacbcae Thomas Thrainer
  FindFaultyInstanceDisks
49 31b836b8 Thomas Thrainer
50 31b836b8 Thomas Thrainer
51 31b836b8 Thomas Thrainer
def _DecideSelfPromotion(lu, exceptions=None):
52 31b836b8 Thomas Thrainer
  """Decide whether I should promote myself as a master candidate.
53 31b836b8 Thomas Thrainer

54 31b836b8 Thomas Thrainer
  """
55 31b836b8 Thomas Thrainer
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
56 31b836b8 Thomas Thrainer
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
57 31b836b8 Thomas Thrainer
  # the new node will increase mc_max with one, so:
58 31b836b8 Thomas Thrainer
  mc_should = min(mc_should + 1, cp_size)
59 31b836b8 Thomas Thrainer
  return mc_now < mc_should
60 31b836b8 Thomas Thrainer
61 31b836b8 Thomas Thrainer
62 31b836b8 Thomas Thrainer
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
63 31b836b8 Thomas Thrainer
  """Ensure that a node has the given secondary ip.
64 31b836b8 Thomas Thrainer

65 31b836b8 Thomas Thrainer
  @type lu: L{LogicalUnit}
66 31b836b8 Thomas Thrainer
  @param lu: the LU on behalf of which we make the check
67 31b836b8 Thomas Thrainer
  @type node: string
68 31b836b8 Thomas Thrainer
  @param node: the node to check
69 31b836b8 Thomas Thrainer
  @type secondary_ip: string
70 31b836b8 Thomas Thrainer
  @param secondary_ip: the ip to check
71 31b836b8 Thomas Thrainer
  @type prereq: boolean
72 31b836b8 Thomas Thrainer
  @param prereq: whether to throw a prerequisite or an execute error
73 31b836b8 Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
74 31b836b8 Thomas Thrainer
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
75 31b836b8 Thomas Thrainer

76 31b836b8 Thomas Thrainer
  """
77 31b836b8 Thomas Thrainer
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
78 31b836b8 Thomas Thrainer
  result.Raise("Failure checking secondary ip on node %s" % node,
79 31b836b8 Thomas Thrainer
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
80 31b836b8 Thomas Thrainer
  if not result.payload:
81 31b836b8 Thomas Thrainer
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
82 31b836b8 Thomas Thrainer
           " please fix and re-run this command" % secondary_ip)
83 31b836b8 Thomas Thrainer
    if prereq:
84 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
85 31b836b8 Thomas Thrainer
    else:
86 31b836b8 Thomas Thrainer
      raise errors.OpExecError(msg)
87 31b836b8 Thomas Thrainer
88 31b836b8 Thomas Thrainer
89 31b836b8 Thomas Thrainer
class LUNodeAdd(LogicalUnit):
90 31b836b8 Thomas Thrainer
  """Logical unit for adding node to the cluster.
91 31b836b8 Thomas Thrainer

92 31b836b8 Thomas Thrainer
  """
93 31b836b8 Thomas Thrainer
  HPATH = "node-add"
94 31b836b8 Thomas Thrainer
  HTYPE = constants.HTYPE_NODE
95 31b836b8 Thomas Thrainer
  _NFLAGS = ["master_capable", "vm_capable"]
96 31b836b8 Thomas Thrainer
97 31b836b8 Thomas Thrainer
  def CheckArguments(self):
98 31b836b8 Thomas Thrainer
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
99 31b836b8 Thomas Thrainer
    # validate/normalize the node name
100 31b836b8 Thomas Thrainer
    self.hostname = netutils.GetHostname(name=self.op.node_name,
101 31b836b8 Thomas Thrainer
                                         family=self.primary_ip_family)
102 31b836b8 Thomas Thrainer
    self.op.node_name = self.hostname.name
103 31b836b8 Thomas Thrainer
104 31b836b8 Thomas Thrainer
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
105 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Cannot readd the master node",
106 31b836b8 Thomas Thrainer
                                 errors.ECODE_STATE)
107 31b836b8 Thomas Thrainer
108 31b836b8 Thomas Thrainer
    if self.op.readd and self.op.group:
109 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
110 31b836b8 Thomas Thrainer
                                 " being readded", errors.ECODE_INVAL)
111 31b836b8 Thomas Thrainer
112 31b836b8 Thomas Thrainer
  def BuildHooksEnv(self):
113 31b836b8 Thomas Thrainer
    """Build hooks env.
114 31b836b8 Thomas Thrainer

115 31b836b8 Thomas Thrainer
    This will run on all nodes before, and on all nodes + the new node after.
116 31b836b8 Thomas Thrainer

117 31b836b8 Thomas Thrainer
    """
118 31b836b8 Thomas Thrainer
    return {
119 31b836b8 Thomas Thrainer
      "OP_TARGET": self.op.node_name,
120 31b836b8 Thomas Thrainer
      "NODE_NAME": self.op.node_name,
121 31b836b8 Thomas Thrainer
      "NODE_PIP": self.op.primary_ip,
122 31b836b8 Thomas Thrainer
      "NODE_SIP": self.op.secondary_ip,
123 31b836b8 Thomas Thrainer
      "MASTER_CAPABLE": str(self.op.master_capable),
124 31b836b8 Thomas Thrainer
      "VM_CAPABLE": str(self.op.vm_capable),
125 31b836b8 Thomas Thrainer
      }
126 31b836b8 Thomas Thrainer
127 31b836b8 Thomas Thrainer
  def BuildHooksNodes(self):
128 31b836b8 Thomas Thrainer
    """Build hooks nodes.
129 31b836b8 Thomas Thrainer

130 31b836b8 Thomas Thrainer
    """
131 31b836b8 Thomas Thrainer
    # Exclude added node
132 31b836b8 Thomas Thrainer
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
133 31b836b8 Thomas Thrainer
    post_nodes = pre_nodes + [self.op.node_name, ]
134 31b836b8 Thomas Thrainer
135 31b836b8 Thomas Thrainer
    return (pre_nodes, post_nodes)
136 31b836b8 Thomas Thrainer
137 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
138 31b836b8 Thomas Thrainer
    """Check prerequisites.
139 31b836b8 Thomas Thrainer

140 31b836b8 Thomas Thrainer
    This checks:
141 31b836b8 Thomas Thrainer
     - the new node is not already in the config
142 31b836b8 Thomas Thrainer
     - it is resolvable
143 31b836b8 Thomas Thrainer
     - its parameters (single/dual homed) matches the cluster
144 31b836b8 Thomas Thrainer

145 31b836b8 Thomas Thrainer
    Any errors are signaled by raising errors.OpPrereqError.
146 31b836b8 Thomas Thrainer

147 31b836b8 Thomas Thrainer
    """
148 31b836b8 Thomas Thrainer
    cfg = self.cfg
149 31b836b8 Thomas Thrainer
    hostname = self.hostname
150 31b836b8 Thomas Thrainer
    node = hostname.name
151 31b836b8 Thomas Thrainer
    primary_ip = self.op.primary_ip = hostname.ip
152 31b836b8 Thomas Thrainer
    if self.op.secondary_ip is None:
153 31b836b8 Thomas Thrainer
      if self.primary_ip_family == netutils.IP6Address.family:
154 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
155 31b836b8 Thomas Thrainer
                                   " IPv4 address must be given as secondary",
156 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
157 31b836b8 Thomas Thrainer
      self.op.secondary_ip = primary_ip
158 31b836b8 Thomas Thrainer
159 31b836b8 Thomas Thrainer
    secondary_ip = self.op.secondary_ip
160 31b836b8 Thomas Thrainer
    if not netutils.IP4Address.IsValid(secondary_ip):
161 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
162 31b836b8 Thomas Thrainer
                                 " address" % secondary_ip, errors.ECODE_INVAL)
163 31b836b8 Thomas Thrainer
164 31b836b8 Thomas Thrainer
    node_list = cfg.GetNodeList()
165 31b836b8 Thomas Thrainer
    if not self.op.readd and node in node_list:
166 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Node %s is already in the configuration" %
167 31b836b8 Thomas Thrainer
                                 node, errors.ECODE_EXISTS)
168 31b836b8 Thomas Thrainer
    elif self.op.readd and node not in node_list:
169 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
170 31b836b8 Thomas Thrainer
                                 errors.ECODE_NOENT)
171 31b836b8 Thomas Thrainer
172 31b836b8 Thomas Thrainer
    self.changed_primary_ip = False
173 31b836b8 Thomas Thrainer
174 31b836b8 Thomas Thrainer
    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
175 31b836b8 Thomas Thrainer
      if self.op.readd and node == existing_node_name:
176 31b836b8 Thomas Thrainer
        if existing_node.secondary_ip != secondary_ip:
177 31b836b8 Thomas Thrainer
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
178 31b836b8 Thomas Thrainer
                                     " address configuration as before",
179 31b836b8 Thomas Thrainer
                                     errors.ECODE_INVAL)
180 31b836b8 Thomas Thrainer
        if existing_node.primary_ip != primary_ip:
181 31b836b8 Thomas Thrainer
          self.changed_primary_ip = True
182 31b836b8 Thomas Thrainer
183 31b836b8 Thomas Thrainer
        continue
184 31b836b8 Thomas Thrainer
185 31b836b8 Thomas Thrainer
      if (existing_node.primary_ip == primary_ip or
186 31b836b8 Thomas Thrainer
          existing_node.secondary_ip == primary_ip or
187 31b836b8 Thomas Thrainer
          existing_node.primary_ip == secondary_ip or
188 31b836b8 Thomas Thrainer
          existing_node.secondary_ip == secondary_ip):
189 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("New node ip address(es) conflict with"
190 31b836b8 Thomas Thrainer
                                   " existing node %s" % existing_node.name,
191 31b836b8 Thomas Thrainer
                                   errors.ECODE_NOTUNIQUE)
192 31b836b8 Thomas Thrainer
193 31b836b8 Thomas Thrainer
    # After this 'if' block, None is no longer a valid value for the
194 31b836b8 Thomas Thrainer
    # _capable op attributes
195 31b836b8 Thomas Thrainer
    if self.op.readd:
196 31b836b8 Thomas Thrainer
      old_node = self.cfg.GetNodeInfo(node)
197 31b836b8 Thomas Thrainer
      assert old_node is not None, "Can't retrieve locked node %s" % node
198 31b836b8 Thomas Thrainer
      for attr in self._NFLAGS:
199 31b836b8 Thomas Thrainer
        if getattr(self.op, attr) is None:
200 31b836b8 Thomas Thrainer
          setattr(self.op, attr, getattr(old_node, attr))
201 31b836b8 Thomas Thrainer
    else:
202 31b836b8 Thomas Thrainer
      for attr in self._NFLAGS:
203 31b836b8 Thomas Thrainer
        if getattr(self.op, attr) is None:
204 31b836b8 Thomas Thrainer
          setattr(self.op, attr, True)
205 31b836b8 Thomas Thrainer
206 31b836b8 Thomas Thrainer
    if self.op.readd and not self.op.vm_capable:
207 31b836b8 Thomas Thrainer
      pri, sec = cfg.GetNodeInstances(node)
208 31b836b8 Thomas Thrainer
      if pri or sec:
209 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
210 31b836b8 Thomas Thrainer
                                   " flag set to false, but it already holds"
211 31b836b8 Thomas Thrainer
                                   " instances" % node,
212 31b836b8 Thomas Thrainer
                                   errors.ECODE_STATE)
213 31b836b8 Thomas Thrainer
214 31b836b8 Thomas Thrainer
    # check that the type of the node (single versus dual homed) is the
215 31b836b8 Thomas Thrainer
    # same as for the master
216 31b836b8 Thomas Thrainer
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
217 31b836b8 Thomas Thrainer
    master_singlehomed = myself.secondary_ip == myself.primary_ip
218 31b836b8 Thomas Thrainer
    newbie_singlehomed = secondary_ip == primary_ip
219 31b836b8 Thomas Thrainer
    if master_singlehomed != newbie_singlehomed:
220 31b836b8 Thomas Thrainer
      if master_singlehomed:
221 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("The master has no secondary ip but the"
222 31b836b8 Thomas Thrainer
                                   " new node has one",
223 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
224 31b836b8 Thomas Thrainer
      else:
225 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("The master has a secondary ip but the"
226 31b836b8 Thomas Thrainer
                                   " new node doesn't have one",
227 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
228 31b836b8 Thomas Thrainer
229 31b836b8 Thomas Thrainer
    # checks reachability
230 31b836b8 Thomas Thrainer
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
231 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Node not reachable by ping",
232 31b836b8 Thomas Thrainer
                                 errors.ECODE_ENVIRON)
233 31b836b8 Thomas Thrainer
234 31b836b8 Thomas Thrainer
    if not newbie_singlehomed:
235 31b836b8 Thomas Thrainer
      # check reachability from my secondary ip to newbie's secondary ip
236 31b836b8 Thomas Thrainer
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
237 31b836b8 Thomas Thrainer
                              source=myself.secondary_ip):
238 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
239 31b836b8 Thomas Thrainer
                                   " based ping to node daemon port",
240 31b836b8 Thomas Thrainer
                                   errors.ECODE_ENVIRON)
241 31b836b8 Thomas Thrainer
242 31b836b8 Thomas Thrainer
    if self.op.readd:
243 31b836b8 Thomas Thrainer
      exceptions = [node]
244 31b836b8 Thomas Thrainer
    else:
245 31b836b8 Thomas Thrainer
      exceptions = []
246 31b836b8 Thomas Thrainer
247 31b836b8 Thomas Thrainer
    if self.op.master_capable:
248 31b836b8 Thomas Thrainer
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
249 31b836b8 Thomas Thrainer
    else:
250 31b836b8 Thomas Thrainer
      self.master_candidate = False
251 31b836b8 Thomas Thrainer
252 31b836b8 Thomas Thrainer
    if self.op.readd:
253 31b836b8 Thomas Thrainer
      self.new_node = old_node
254 31b836b8 Thomas Thrainer
    else:
255 31b836b8 Thomas Thrainer
      node_group = cfg.LookupNodeGroup(self.op.group)
256 31b836b8 Thomas Thrainer
      self.new_node = objects.Node(name=node,
257 31b836b8 Thomas Thrainer
                                   primary_ip=primary_ip,
258 31b836b8 Thomas Thrainer
                                   secondary_ip=secondary_ip,
259 31b836b8 Thomas Thrainer
                                   master_candidate=self.master_candidate,
260 31b836b8 Thomas Thrainer
                                   offline=False, drained=False,
261 31b836b8 Thomas Thrainer
                                   group=node_group, ndparams={})
262 31b836b8 Thomas Thrainer
263 31b836b8 Thomas Thrainer
    if self.op.ndparams:
264 31b836b8 Thomas Thrainer
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
265 5eacbcae Thomas Thrainer
      CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
266 5eacbcae Thomas Thrainer
                           "node", "cluster or group")
267 31b836b8 Thomas Thrainer
268 31b836b8 Thomas Thrainer
    if self.op.hv_state:
269 5eacbcae Thomas Thrainer
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
270 31b836b8 Thomas Thrainer
271 31b836b8 Thomas Thrainer
    if self.op.disk_state:
272 5eacbcae Thomas Thrainer
      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
273 31b836b8 Thomas Thrainer
274 31b836b8 Thomas Thrainer
    # TODO: If we need to have multiple DnsOnlyRunner we probably should make
275 31b836b8 Thomas Thrainer
    #       it a property on the base class.
276 31b836b8 Thomas Thrainer
    rpcrunner = rpc.DnsOnlyRunner()
277 31b836b8 Thomas Thrainer
    result = rpcrunner.call_version([node])[node]
278 31b836b8 Thomas Thrainer
    result.Raise("Can't get version information from node %s" % node)
279 31b836b8 Thomas Thrainer
    if constants.PROTOCOL_VERSION == result.payload:
280 31b836b8 Thomas Thrainer
      logging.info("Communication to node %s fine, sw version %s match",
281 31b836b8 Thomas Thrainer
                   node, result.payload)
282 31b836b8 Thomas Thrainer
    else:
283 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Version mismatch master version %s,"
284 31b836b8 Thomas Thrainer
                                 " node version %s" %
285 31b836b8 Thomas Thrainer
                                 (constants.PROTOCOL_VERSION, result.payload),
286 31b836b8 Thomas Thrainer
                                 errors.ECODE_ENVIRON)
287 31b836b8 Thomas Thrainer
288 31b836b8 Thomas Thrainer
    vg_name = cfg.GetVGName()
289 31b836b8 Thomas Thrainer
    if vg_name is not None:
290 31b836b8 Thomas Thrainer
      vparams = {constants.NV_PVLIST: [vg_name]}
291 5eacbcae Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node)
292 31b836b8 Thomas Thrainer
      cname = self.cfg.GetClusterName()
293 5b0dfcef Helga Velroyen
      result = rpcrunner.call_node_verify_light(
294 5b0dfcef Helga Velroyen
          [node], vparams, cname, cfg.GetClusterInfo().hvparams)[node]
295 5eacbcae Thomas Thrainer
      (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
296 31b836b8 Thomas Thrainer
      if errmsgs:
297 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Checks on node PVs failed: %s" %
298 31b836b8 Thomas Thrainer
                                   "; ".join(errmsgs), errors.ECODE_ENVIRON)
299 31b836b8 Thomas Thrainer
300 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
301 31b836b8 Thomas Thrainer
    """Adds the new node to the cluster.
302 31b836b8 Thomas Thrainer

303 31b836b8 Thomas Thrainer
    """
304 31b836b8 Thomas Thrainer
    new_node = self.new_node
305 31b836b8 Thomas Thrainer
    node = new_node.name
306 31b836b8 Thomas Thrainer
307 31b836b8 Thomas Thrainer
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
308 31b836b8 Thomas Thrainer
      "Not owning BGL"
309 31b836b8 Thomas Thrainer
310 31b836b8 Thomas Thrainer
    # We adding a new node so we assume it's powered
311 31b836b8 Thomas Thrainer
    new_node.powered = True
312 31b836b8 Thomas Thrainer
313 31b836b8 Thomas Thrainer
    # for re-adds, reset the offline/drained/master-candidate flags;
314 31b836b8 Thomas Thrainer
    # we need to reset here, otherwise offline would prevent RPC calls
315 31b836b8 Thomas Thrainer
    # later in the procedure; this also means that if the re-add
316 31b836b8 Thomas Thrainer
    # fails, we are left with a non-offlined, broken node
317 31b836b8 Thomas Thrainer
    if self.op.readd:
318 31b836b8 Thomas Thrainer
      new_node.drained = new_node.offline = False # pylint: disable=W0201
319 31b836b8 Thomas Thrainer
      self.LogInfo("Readding a node, the offline/drained flags were reset")
320 31b836b8 Thomas Thrainer
      # if we demote the node, we do cleanup later in the procedure
321 31b836b8 Thomas Thrainer
      new_node.master_candidate = self.master_candidate
322 31b836b8 Thomas Thrainer
      if self.changed_primary_ip:
323 31b836b8 Thomas Thrainer
        new_node.primary_ip = self.op.primary_ip
324 31b836b8 Thomas Thrainer
325 31b836b8 Thomas Thrainer
    # copy the master/vm_capable flags
326 31b836b8 Thomas Thrainer
    for attr in self._NFLAGS:
327 31b836b8 Thomas Thrainer
      setattr(new_node, attr, getattr(self.op, attr))
328 31b836b8 Thomas Thrainer
329 31b836b8 Thomas Thrainer
    # notify the user about any possible mc promotion
330 31b836b8 Thomas Thrainer
    if new_node.master_candidate:
331 31b836b8 Thomas Thrainer
      self.LogInfo("Node will be a master candidate")
332 31b836b8 Thomas Thrainer
333 31b836b8 Thomas Thrainer
    if self.op.ndparams:
334 31b836b8 Thomas Thrainer
      new_node.ndparams = self.op.ndparams
335 31b836b8 Thomas Thrainer
    else:
336 31b836b8 Thomas Thrainer
      new_node.ndparams = {}
337 31b836b8 Thomas Thrainer
338 31b836b8 Thomas Thrainer
    if self.op.hv_state:
339 31b836b8 Thomas Thrainer
      new_node.hv_state_static = self.new_hv_state
340 31b836b8 Thomas Thrainer
341 31b836b8 Thomas Thrainer
    if self.op.disk_state:
342 31b836b8 Thomas Thrainer
      new_node.disk_state_static = self.new_disk_state
343 31b836b8 Thomas Thrainer
344 31b836b8 Thomas Thrainer
    # Add node to our /etc/hosts, and add key to known_hosts
345 31b836b8 Thomas Thrainer
    if self.cfg.GetClusterInfo().modify_etc_hosts:
346 31b836b8 Thomas Thrainer
      master_node = self.cfg.GetMasterNode()
347 31b836b8 Thomas Thrainer
      result = self.rpc.call_etc_hosts_modify(master_node,
348 31b836b8 Thomas Thrainer
                                              constants.ETC_HOSTS_ADD,
349 31b836b8 Thomas Thrainer
                                              self.hostname.name,
350 31b836b8 Thomas Thrainer
                                              self.hostname.ip)
351 31b836b8 Thomas Thrainer
      result.Raise("Can't update hosts file with new host data")
352 31b836b8 Thomas Thrainer
353 31b836b8 Thomas Thrainer
    if new_node.secondary_ip != new_node.primary_ip:
354 31b836b8 Thomas Thrainer
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
355 31b836b8 Thomas Thrainer
                               False)
356 31b836b8 Thomas Thrainer
357 31b836b8 Thomas Thrainer
    node_verify_list = [self.cfg.GetMasterNode()]
358 31b836b8 Thomas Thrainer
    node_verify_param = {
359 31b836b8 Thomas Thrainer
      constants.NV_NODELIST: ([node], {}),
360 31b836b8 Thomas Thrainer
      # TODO: do a node-net-test as well?
361 31b836b8 Thomas Thrainer
    }
362 31b836b8 Thomas Thrainer
363 31b836b8 Thomas Thrainer
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
364 5b0dfcef Helga Velroyen
                                       self.cfg.GetClusterName(),
365 5b0dfcef Helga Velroyen
                                       self.cfg.GetClusterInfo().hvparams)
366 31b836b8 Thomas Thrainer
    for verifier in node_verify_list:
367 31b836b8 Thomas Thrainer
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
368 31b836b8 Thomas Thrainer
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
369 31b836b8 Thomas Thrainer
      if nl_payload:
370 31b836b8 Thomas Thrainer
        for failed in nl_payload:
371 31b836b8 Thomas Thrainer
          feedback_fn("ssh/hostname verification failed"
372 31b836b8 Thomas Thrainer
                      " (checking from %s): %s" %
373 31b836b8 Thomas Thrainer
                      (verifier, nl_payload[failed]))
374 31b836b8 Thomas Thrainer
        raise errors.OpExecError("ssh/hostname verification failed")
375 31b836b8 Thomas Thrainer
376 31b836b8 Thomas Thrainer
    if self.op.readd:
377 5eacbcae Thomas Thrainer
      RedistributeAncillaryFiles(self)
378 31b836b8 Thomas Thrainer
      self.context.ReaddNode(new_node)
379 31b836b8 Thomas Thrainer
      # make sure we redistribute the config
380 31b836b8 Thomas Thrainer
      self.cfg.Update(new_node, feedback_fn)
381 31b836b8 Thomas Thrainer
      # and make sure the new node will not have old files around
382 31b836b8 Thomas Thrainer
      if not new_node.master_candidate:
383 31b836b8 Thomas Thrainer
        result = self.rpc.call_node_demote_from_mc(new_node.name)
384 c7dd65be Klaus Aehlig
        result.Warn("Node failed to demote itself from master candidate status",
385 c7dd65be Klaus Aehlig
                    self.LogWarning)
386 31b836b8 Thomas Thrainer
    else:
387 5eacbcae Thomas Thrainer
      RedistributeAncillaryFiles(self, additional_nodes=[node],
388 5eacbcae Thomas Thrainer
                                 additional_vm=self.op.vm_capable)
389 31b836b8 Thomas Thrainer
      self.context.AddNode(new_node, self.proc.GetECId())
390 31b836b8 Thomas Thrainer
391 31b836b8 Thomas Thrainer
392 31b836b8 Thomas Thrainer
class LUNodeSetParams(LogicalUnit):
393 31b836b8 Thomas Thrainer
  """Modifies the parameters of a node.
394 31b836b8 Thomas Thrainer

395 31b836b8 Thomas Thrainer
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
396 31b836b8 Thomas Thrainer
      to the node role (as _ROLE_*)
397 31b836b8 Thomas Thrainer
  @cvar _R2F: a dictionary from node role to tuples of flags
398 31b836b8 Thomas Thrainer
  @cvar _FLAGS: a list of attribute names corresponding to the flags
399 31b836b8 Thomas Thrainer

400 31b836b8 Thomas Thrainer
  """
401 31b836b8 Thomas Thrainer
  HPATH = "node-modify"
402 31b836b8 Thomas Thrainer
  HTYPE = constants.HTYPE_NODE
403 31b836b8 Thomas Thrainer
  REQ_BGL = False
404 31b836b8 Thomas Thrainer
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
405 31b836b8 Thomas Thrainer
  _F2R = {
406 31b836b8 Thomas Thrainer
    (True, False, False): _ROLE_CANDIDATE,
407 31b836b8 Thomas Thrainer
    (False, True, False): _ROLE_DRAINED,
408 31b836b8 Thomas Thrainer
    (False, False, True): _ROLE_OFFLINE,
409 31b836b8 Thomas Thrainer
    (False, False, False): _ROLE_REGULAR,
410 31b836b8 Thomas Thrainer
    }
411 31b836b8 Thomas Thrainer
  _R2F = dict((v, k) for k, v in _F2R.items())
412 31b836b8 Thomas Thrainer
  _FLAGS = ["master_candidate", "drained", "offline"]
413 31b836b8 Thomas Thrainer
414 31b836b8 Thomas Thrainer
  def CheckArguments(self):
415 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
416 31b836b8 Thomas Thrainer
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
417 31b836b8 Thomas Thrainer
                self.op.master_capable, self.op.vm_capable,
418 31b836b8 Thomas Thrainer
                self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
419 31b836b8 Thomas Thrainer
                self.op.disk_state]
420 31b836b8 Thomas Thrainer
    if all_mods.count(None) == len(all_mods):
421 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Please pass at least one modification",
422 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
423 31b836b8 Thomas Thrainer
    if all_mods.count(True) > 1:
424 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Can't set the node into more than one"
425 31b836b8 Thomas Thrainer
                                 " state at the same time",
426 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
427 31b836b8 Thomas Thrainer
428 31b836b8 Thomas Thrainer
    # Boolean value that tells us whether we might be demoting from MC
429 31b836b8 Thomas Thrainer
    self.might_demote = (self.op.master_candidate is False or
430 31b836b8 Thomas Thrainer
                         self.op.offline is True or
431 31b836b8 Thomas Thrainer
                         self.op.drained is True or
432 31b836b8 Thomas Thrainer
                         self.op.master_capable is False)
433 31b836b8 Thomas Thrainer
434 31b836b8 Thomas Thrainer
    if self.op.secondary_ip:
435 31b836b8 Thomas Thrainer
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
436 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
437 31b836b8 Thomas Thrainer
                                   " address" % self.op.secondary_ip,
438 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
439 31b836b8 Thomas Thrainer
440 31b836b8 Thomas Thrainer
    self.lock_all = self.op.auto_promote and self.might_demote
441 31b836b8 Thomas Thrainer
    self.lock_instances = self.op.secondary_ip is not None
442 31b836b8 Thomas Thrainer
443 31b836b8 Thomas Thrainer
  def _InstanceFilter(self, instance):
444 31b836b8 Thomas Thrainer
    """Filter for getting affected instances.
445 31b836b8 Thomas Thrainer

446 31b836b8 Thomas Thrainer
    """
447 31b836b8 Thomas Thrainer
    return (instance.disk_template in constants.DTS_INT_MIRROR and
448 31b836b8 Thomas Thrainer
            self.op.node_name in instance.all_nodes)
449 31b836b8 Thomas Thrainer
450 31b836b8 Thomas Thrainer
  def ExpandNames(self):
451 31b836b8 Thomas Thrainer
    if self.lock_all:
452 31b836b8 Thomas Thrainer
      self.needed_locks = {
453 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE: locking.ALL_SET,
454 31b836b8 Thomas Thrainer
455 31b836b8 Thomas Thrainer
        # Block allocations when all nodes are locked
456 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
457 31b836b8 Thomas Thrainer
        }
458 31b836b8 Thomas Thrainer
    else:
459 31b836b8 Thomas Thrainer
      self.needed_locks = {
460 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE: self.op.node_name,
461 31b836b8 Thomas Thrainer
        }
462 31b836b8 Thomas Thrainer
463 31b836b8 Thomas Thrainer
    # Since modifying a node can have severe effects on currently running
464 31b836b8 Thomas Thrainer
    # operations the resource lock is at least acquired in shared mode
465 31b836b8 Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = \
466 31b836b8 Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE]
467 31b836b8 Thomas Thrainer
468 31b836b8 Thomas Thrainer
    # Get all locks except nodes in shared mode; they are not used for anything
469 31b836b8 Thomas Thrainer
    # but read-only access
470 5eacbcae Thomas Thrainer
    self.share_locks = ShareAll()
471 31b836b8 Thomas Thrainer
    self.share_locks[locking.LEVEL_NODE] = 0
472 31b836b8 Thomas Thrainer
    self.share_locks[locking.LEVEL_NODE_RES] = 0
473 31b836b8 Thomas Thrainer
    self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
474 31b836b8 Thomas Thrainer
475 31b836b8 Thomas Thrainer
    if self.lock_instances:
476 31b836b8 Thomas Thrainer
      self.needed_locks[locking.LEVEL_INSTANCE] = \
477 31b836b8 Thomas Thrainer
        frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
478 31b836b8 Thomas Thrainer
479 31b836b8 Thomas Thrainer
  def BuildHooksEnv(self):
480 31b836b8 Thomas Thrainer
    """Build hooks env.
481 31b836b8 Thomas Thrainer

482 31b836b8 Thomas Thrainer
    This runs on the master node.
483 31b836b8 Thomas Thrainer

484 31b836b8 Thomas Thrainer
    """
485 31b836b8 Thomas Thrainer
    return {
486 31b836b8 Thomas Thrainer
      "OP_TARGET": self.op.node_name,
487 31b836b8 Thomas Thrainer
      "MASTER_CANDIDATE": str(self.op.master_candidate),
488 31b836b8 Thomas Thrainer
      "OFFLINE": str(self.op.offline),
489 31b836b8 Thomas Thrainer
      "DRAINED": str(self.op.drained),
490 31b836b8 Thomas Thrainer
      "MASTER_CAPABLE": str(self.op.master_capable),
491 31b836b8 Thomas Thrainer
      "VM_CAPABLE": str(self.op.vm_capable),
492 31b836b8 Thomas Thrainer
      }
493 31b836b8 Thomas Thrainer
494 31b836b8 Thomas Thrainer
  def BuildHooksNodes(self):
495 31b836b8 Thomas Thrainer
    """Build hooks nodes.
496 31b836b8 Thomas Thrainer

497 31b836b8 Thomas Thrainer
    """
498 31b836b8 Thomas Thrainer
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
499 31b836b8 Thomas Thrainer
    return (nl, nl)
500 31b836b8 Thomas Thrainer
501 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
502 31b836b8 Thomas Thrainer
    """Check prerequisites.
503 31b836b8 Thomas Thrainer

504 31b836b8 Thomas Thrainer
    This only checks the instance list against the existing names.
505 31b836b8 Thomas Thrainer

506 31b836b8 Thomas Thrainer
    """
507 31b836b8 Thomas Thrainer
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
508 31b836b8 Thomas Thrainer
509 31b836b8 Thomas Thrainer
    if self.lock_instances:
510 31b836b8 Thomas Thrainer
      affected_instances = \
511 31b836b8 Thomas Thrainer
        self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
512 31b836b8 Thomas Thrainer
513 31b836b8 Thomas Thrainer
      # Verify instance locks
514 31b836b8 Thomas Thrainer
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
515 31b836b8 Thomas Thrainer
      wanted_instances = frozenset(affected_instances.keys())
516 31b836b8 Thomas Thrainer
      if wanted_instances - owned_instances:
517 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Instances affected by changing node %s's"
518 31b836b8 Thomas Thrainer
                                   " secondary IP address have changed since"
519 31b836b8 Thomas Thrainer
                                   " locks were acquired, wanted '%s', have"
520 31b836b8 Thomas Thrainer
                                   " '%s'; retry the operation" %
521 31b836b8 Thomas Thrainer
                                   (self.op.node_name,
522 31b836b8 Thomas Thrainer
                                    utils.CommaJoin(wanted_instances),
523 31b836b8 Thomas Thrainer
                                    utils.CommaJoin(owned_instances)),
524 31b836b8 Thomas Thrainer
                                   errors.ECODE_STATE)
525 31b836b8 Thomas Thrainer
    else:
526 31b836b8 Thomas Thrainer
      affected_instances = None
527 31b836b8 Thomas Thrainer
528 31b836b8 Thomas Thrainer
    if (self.op.master_candidate is not None or
529 31b836b8 Thomas Thrainer
        self.op.drained is not None or
530 31b836b8 Thomas Thrainer
        self.op.offline is not None):
531 31b836b8 Thomas Thrainer
      # we can't change the master's node flags
532 31b836b8 Thomas Thrainer
      if self.op.node_name == self.cfg.GetMasterNode():
533 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("The master role can be changed"
534 31b836b8 Thomas Thrainer
                                   " only via master-failover",
535 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
536 31b836b8 Thomas Thrainer
537 31b836b8 Thomas Thrainer
    if self.op.master_candidate and not node.master_capable:
538 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
539 31b836b8 Thomas Thrainer
                                 " it a master candidate" % node.name,
540 31b836b8 Thomas Thrainer
                                 errors.ECODE_STATE)
541 31b836b8 Thomas Thrainer
542 31b836b8 Thomas Thrainer
    if self.op.vm_capable is False:
543 31b836b8 Thomas Thrainer
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
544 31b836b8 Thomas Thrainer
      if ipri or isec:
545 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
546 31b836b8 Thomas Thrainer
                                   " the vm_capable flag" % node.name,
547 31b836b8 Thomas Thrainer
                                   errors.ECODE_STATE)
548 31b836b8 Thomas Thrainer
549 31b836b8 Thomas Thrainer
    if node.master_candidate and self.might_demote and not self.lock_all:
550 31b836b8 Thomas Thrainer
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
551 31b836b8 Thomas Thrainer
      # check if after removing the current node, we're missing master
552 31b836b8 Thomas Thrainer
      # candidates
553 31b836b8 Thomas Thrainer
      (mc_remaining, mc_should, _) = \
554 31b836b8 Thomas Thrainer
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
555 31b836b8 Thomas Thrainer
      if mc_remaining < mc_should:
556 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Not enough master candidates, please"
557 31b836b8 Thomas Thrainer
                                   " pass auto promote option to allow"
558 31b836b8 Thomas Thrainer
                                   " promotion (--auto-promote or RAPI"
559 31b836b8 Thomas Thrainer
                                   " auto_promote=True)", errors.ECODE_STATE)
560 31b836b8 Thomas Thrainer
561 31b836b8 Thomas Thrainer
    self.old_flags = old_flags = (node.master_candidate,
562 31b836b8 Thomas Thrainer
                                  node.drained, node.offline)
563 31b836b8 Thomas Thrainer
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
564 31b836b8 Thomas Thrainer
    self.old_role = old_role = self._F2R[old_flags]
565 31b836b8 Thomas Thrainer
566 31b836b8 Thomas Thrainer
    # Check for ineffective changes
567 31b836b8 Thomas Thrainer
    for attr in self._FLAGS:
568 31b836b8 Thomas Thrainer
      if (getattr(self.op, attr) is False and getattr(node, attr) is False):
569 31b836b8 Thomas Thrainer
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
570 31b836b8 Thomas Thrainer
        setattr(self.op, attr, None)
571 31b836b8 Thomas Thrainer
572 31b836b8 Thomas Thrainer
    # Past this point, any flag change to False means a transition
573 31b836b8 Thomas Thrainer
    # away from the respective state, as only real changes are kept
574 31b836b8 Thomas Thrainer
575 31b836b8 Thomas Thrainer
    # TODO: We might query the real power state if it supports OOB
576 5eacbcae Thomas Thrainer
    if SupportsOob(self.cfg, node):
577 31b836b8 Thomas Thrainer
      if self.op.offline is False and not (node.powered or
578 31b836b8 Thomas Thrainer
                                           self.op.powered is True):
579 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
580 31b836b8 Thomas Thrainer
                                    " offline status can be reset") %
581 31b836b8 Thomas Thrainer
                                   self.op.node_name, errors.ECODE_STATE)
582 31b836b8 Thomas Thrainer
    elif self.op.powered is not None:
583 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
584 31b836b8 Thomas Thrainer
                                  " as it does not support out-of-band"
585 31b836b8 Thomas Thrainer
                                  " handling") % self.op.node_name,
586 31b836b8 Thomas Thrainer
                                 errors.ECODE_STATE)
587 31b836b8 Thomas Thrainer
588 31b836b8 Thomas Thrainer
    # If we're being deofflined/drained, we'll MC ourself if needed
589 31b836b8 Thomas Thrainer
    if (self.op.drained is False or self.op.offline is False or
590 31b836b8 Thomas Thrainer
        (self.op.master_capable and not node.master_capable)):
591 31b836b8 Thomas Thrainer
      if _DecideSelfPromotion(self):
592 31b836b8 Thomas Thrainer
        self.op.master_candidate = True
593 31b836b8 Thomas Thrainer
        self.LogInfo("Auto-promoting node to master candidate")
594 31b836b8 Thomas Thrainer
595 31b836b8 Thomas Thrainer
    # If we're no longer master capable, we'll demote ourselves from MC
596 31b836b8 Thomas Thrainer
    if self.op.master_capable is False and node.master_candidate:
597 31b836b8 Thomas Thrainer
      self.LogInfo("Demoting from master candidate")
598 31b836b8 Thomas Thrainer
      self.op.master_candidate = False
599 31b836b8 Thomas Thrainer
600 31b836b8 Thomas Thrainer
    # Compute new role
601 31b836b8 Thomas Thrainer
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
602 31b836b8 Thomas Thrainer
    if self.op.master_candidate:
603 31b836b8 Thomas Thrainer
      new_role = self._ROLE_CANDIDATE
604 31b836b8 Thomas Thrainer
    elif self.op.drained:
605 31b836b8 Thomas Thrainer
      new_role = self._ROLE_DRAINED
606 31b836b8 Thomas Thrainer
    elif self.op.offline:
607 31b836b8 Thomas Thrainer
      new_role = self._ROLE_OFFLINE
608 31b836b8 Thomas Thrainer
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
609 31b836b8 Thomas Thrainer
      # False is still in new flags, which means we're un-setting (the
610 31b836b8 Thomas Thrainer
      # only) True flag
611 31b836b8 Thomas Thrainer
      new_role = self._ROLE_REGULAR
612 31b836b8 Thomas Thrainer
    else: # no new flags, nothing, keep old role
613 31b836b8 Thomas Thrainer
      new_role = old_role
614 31b836b8 Thomas Thrainer
615 31b836b8 Thomas Thrainer
    self.new_role = new_role
616 31b836b8 Thomas Thrainer
617 31b836b8 Thomas Thrainer
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
618 31b836b8 Thomas Thrainer
      # Trying to transition out of offline status
619 31b836b8 Thomas Thrainer
      result = self.rpc.call_version([node.name])[node.name]
620 31b836b8 Thomas Thrainer
      if result.fail_msg:
621 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
622 31b836b8 Thomas Thrainer
                                   " to report its version: %s" %
623 31b836b8 Thomas Thrainer
                                   (node.name, result.fail_msg),
624 31b836b8 Thomas Thrainer
                                   errors.ECODE_STATE)
625 31b836b8 Thomas Thrainer
      else:
626 31b836b8 Thomas Thrainer
        self.LogWarning("Transitioning node from offline to online state"
627 31b836b8 Thomas Thrainer
                        " without using re-add. Please make sure the node"
628 31b836b8 Thomas Thrainer
                        " is healthy!")
629 31b836b8 Thomas Thrainer
630 31b836b8 Thomas Thrainer
    # When changing the secondary ip, verify if this is a single-homed to
631 31b836b8 Thomas Thrainer
    # multi-homed transition or vice versa, and apply the relevant
632 31b836b8 Thomas Thrainer
    # restrictions.
633 31b836b8 Thomas Thrainer
    if self.op.secondary_ip:
634 31b836b8 Thomas Thrainer
      # Ok even without locking, because this can't be changed by any LU
635 31b836b8 Thomas Thrainer
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
636 31b836b8 Thomas Thrainer
      master_singlehomed = master.secondary_ip == master.primary_ip
637 31b836b8 Thomas Thrainer
      if master_singlehomed and self.op.secondary_ip != node.primary_ip:
638 31b836b8 Thomas Thrainer
        if self.op.force and node.name == master.name:
639 31b836b8 Thomas Thrainer
          self.LogWarning("Transitioning from single-homed to multi-homed"
640 31b836b8 Thomas Thrainer
                          " cluster; all nodes will require a secondary IP"
641 31b836b8 Thomas Thrainer
                          " address")
642 31b836b8 Thomas Thrainer
        else:
643 31b836b8 Thomas Thrainer
          raise errors.OpPrereqError("Changing the secondary ip on a"
644 31b836b8 Thomas Thrainer
                                     " single-homed cluster requires the"
645 31b836b8 Thomas Thrainer
                                     " --force option to be passed, and the"
646 31b836b8 Thomas Thrainer
                                     " target node to be the master",
647 31b836b8 Thomas Thrainer
                                     errors.ECODE_INVAL)
648 31b836b8 Thomas Thrainer
      elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
649 31b836b8 Thomas Thrainer
        if self.op.force and node.name == master.name:
650 31b836b8 Thomas Thrainer
          self.LogWarning("Transitioning from multi-homed to single-homed"
651 31b836b8 Thomas Thrainer
                          " cluster; secondary IP addresses will have to be"
652 31b836b8 Thomas Thrainer
                          " removed")
653 31b836b8 Thomas Thrainer
        else:
654 31b836b8 Thomas Thrainer
          raise errors.OpPrereqError("Cannot set the secondary IP to be the"
655 31b836b8 Thomas Thrainer
                                     " same as the primary IP on a multi-homed"
656 31b836b8 Thomas Thrainer
                                     " cluster, unless the --force option is"
657 31b836b8 Thomas Thrainer
                                     " passed, and the target node is the"
658 31b836b8 Thomas Thrainer
                                     " master", errors.ECODE_INVAL)
659 31b836b8 Thomas Thrainer
660 31b836b8 Thomas Thrainer
      assert not (frozenset(affected_instances) -
661 31b836b8 Thomas Thrainer
                  self.owned_locks(locking.LEVEL_INSTANCE))
662 31b836b8 Thomas Thrainer
663 31b836b8 Thomas Thrainer
      if node.offline:
664 31b836b8 Thomas Thrainer
        if affected_instances:
665 31b836b8 Thomas Thrainer
          msg = ("Cannot change secondary IP address: offline node has"
666 31b836b8 Thomas Thrainer
                 " instances (%s) configured to use it" %
667 31b836b8 Thomas Thrainer
                 utils.CommaJoin(affected_instances.keys()))
668 31b836b8 Thomas Thrainer
          raise errors.OpPrereqError(msg, errors.ECODE_STATE)
669 31b836b8 Thomas Thrainer
      else:
670 31b836b8 Thomas Thrainer
        # On online nodes, check that no instances are running, and that
671 31b836b8 Thomas Thrainer
        # the node has the new ip and we can reach it.
672 31b836b8 Thomas Thrainer
        for instance in affected_instances.values():
673 5eacbcae Thomas Thrainer
          CheckInstanceState(self, instance, INSTANCE_DOWN,
674 5eacbcae Thomas Thrainer
                             msg="cannot change secondary ip")
675 31b836b8 Thomas Thrainer
676 31b836b8 Thomas Thrainer
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
677 31b836b8 Thomas Thrainer
        if master.name != node.name:
678 31b836b8 Thomas Thrainer
          # check reachability from master secondary ip to new secondary ip
679 31b836b8 Thomas Thrainer
          if not netutils.TcpPing(self.op.secondary_ip,
680 31b836b8 Thomas Thrainer
                                  constants.DEFAULT_NODED_PORT,
681 31b836b8 Thomas Thrainer
                                  source=master.secondary_ip):
682 31b836b8 Thomas Thrainer
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
683 31b836b8 Thomas Thrainer
                                       " based ping to node daemon port",
684 31b836b8 Thomas Thrainer
                                       errors.ECODE_ENVIRON)
685 31b836b8 Thomas Thrainer
686 31b836b8 Thomas Thrainer
    if self.op.ndparams:
687 5eacbcae Thomas Thrainer
      new_ndparams = GetUpdatedParams(self.node.ndparams, self.op.ndparams)
688 31b836b8 Thomas Thrainer
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
689 5eacbcae Thomas Thrainer
      CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
690 5eacbcae Thomas Thrainer
                           "node", "cluster or group")
691 31b836b8 Thomas Thrainer
      self.new_ndparams = new_ndparams
692 31b836b8 Thomas Thrainer
693 31b836b8 Thomas Thrainer
    if self.op.hv_state:
694 5eacbcae Thomas Thrainer
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
695 5eacbcae Thomas Thrainer
                                                self.node.hv_state_static)
696 31b836b8 Thomas Thrainer
697 31b836b8 Thomas Thrainer
    if self.op.disk_state:
698 31b836b8 Thomas Thrainer
      self.new_disk_state = \
699 5eacbcae Thomas Thrainer
        MergeAndVerifyDiskState(self.op.disk_state,
700 5eacbcae Thomas Thrainer
                                self.node.disk_state_static)
701 31b836b8 Thomas Thrainer
702 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
703 31b836b8 Thomas Thrainer
    """Modifies a node.
704 31b836b8 Thomas Thrainer

705 31b836b8 Thomas Thrainer
    """
706 31b836b8 Thomas Thrainer
    node = self.node
707 31b836b8 Thomas Thrainer
    old_role = self.old_role
708 31b836b8 Thomas Thrainer
    new_role = self.new_role
709 31b836b8 Thomas Thrainer
710 31b836b8 Thomas Thrainer
    result = []
711 31b836b8 Thomas Thrainer
712 31b836b8 Thomas Thrainer
    if self.op.ndparams:
713 31b836b8 Thomas Thrainer
      node.ndparams = self.new_ndparams
714 31b836b8 Thomas Thrainer
715 31b836b8 Thomas Thrainer
    if self.op.powered is not None:
716 31b836b8 Thomas Thrainer
      node.powered = self.op.powered
717 31b836b8 Thomas Thrainer
718 31b836b8 Thomas Thrainer
    if self.op.hv_state:
719 31b836b8 Thomas Thrainer
      node.hv_state_static = self.new_hv_state
720 31b836b8 Thomas Thrainer
721 31b836b8 Thomas Thrainer
    if self.op.disk_state:
722 31b836b8 Thomas Thrainer
      node.disk_state_static = self.new_disk_state
723 31b836b8 Thomas Thrainer
724 31b836b8 Thomas Thrainer
    for attr in ["master_capable", "vm_capable"]:
725 31b836b8 Thomas Thrainer
      val = getattr(self.op, attr)
726 31b836b8 Thomas Thrainer
      if val is not None:
727 31b836b8 Thomas Thrainer
        setattr(node, attr, val)
728 31b836b8 Thomas Thrainer
        result.append((attr, str(val)))
729 31b836b8 Thomas Thrainer
730 31b836b8 Thomas Thrainer
    if new_role != old_role:
731 31b836b8 Thomas Thrainer
      # Tell the node to demote itself, if no longer MC and not offline
732 31b836b8 Thomas Thrainer
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
733 31b836b8 Thomas Thrainer
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
734 31b836b8 Thomas Thrainer
        if msg:
735 31b836b8 Thomas Thrainer
          self.LogWarning("Node failed to demote itself: %s", msg)
736 31b836b8 Thomas Thrainer
737 31b836b8 Thomas Thrainer
      new_flags = self._R2F[new_role]
738 31b836b8 Thomas Thrainer
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
739 31b836b8 Thomas Thrainer
        if of != nf:
740 31b836b8 Thomas Thrainer
          result.append((desc, str(nf)))
741 31b836b8 Thomas Thrainer
      (node.master_candidate, node.drained, node.offline) = new_flags
742 31b836b8 Thomas Thrainer
743 31b836b8 Thomas Thrainer
      # we locked all nodes, we adjust the CP before updating this node
744 31b836b8 Thomas Thrainer
      if self.lock_all:
745 5eacbcae Thomas Thrainer
        AdjustCandidatePool(self, [node.name])
746 31b836b8 Thomas Thrainer
747 31b836b8 Thomas Thrainer
    if self.op.secondary_ip:
748 31b836b8 Thomas Thrainer
      node.secondary_ip = self.op.secondary_ip
749 31b836b8 Thomas Thrainer
      result.append(("secondary_ip", self.op.secondary_ip))
750 31b836b8 Thomas Thrainer
751 31b836b8 Thomas Thrainer
    # this will trigger configuration file update, if needed
752 31b836b8 Thomas Thrainer
    self.cfg.Update(node, feedback_fn)
753 31b836b8 Thomas Thrainer
754 31b836b8 Thomas Thrainer
    # this will trigger job queue propagation or cleanup if the mc
755 31b836b8 Thomas Thrainer
    # flag changed
756 31b836b8 Thomas Thrainer
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
757 31b836b8 Thomas Thrainer
      self.context.ReaddNode(node)
758 31b836b8 Thomas Thrainer
759 31b836b8 Thomas Thrainer
    return result
760 31b836b8 Thomas Thrainer
761 31b836b8 Thomas Thrainer
762 31b836b8 Thomas Thrainer
class LUNodePowercycle(NoHooksLU):
763 31b836b8 Thomas Thrainer
  """Powercycles a node.
764 31b836b8 Thomas Thrainer

765 31b836b8 Thomas Thrainer
  """
766 31b836b8 Thomas Thrainer
  REQ_BGL = False
767 31b836b8 Thomas Thrainer
768 31b836b8 Thomas Thrainer
  def CheckArguments(self):
769 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
770 31b836b8 Thomas Thrainer
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
771 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("The node is the master and the force"
772 31b836b8 Thomas Thrainer
                                 " parameter was not set",
773 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
774 31b836b8 Thomas Thrainer
775 31b836b8 Thomas Thrainer
  def ExpandNames(self):
776 31b836b8 Thomas Thrainer
    """Locking for PowercycleNode.
777 31b836b8 Thomas Thrainer

778 31b836b8 Thomas Thrainer
    This is a last-resort option and shouldn't block on other
779 31b836b8 Thomas Thrainer
    jobs. Therefore, we grab no locks.
780 31b836b8 Thomas Thrainer

781 31b836b8 Thomas Thrainer
    """
782 31b836b8 Thomas Thrainer
    self.needed_locks = {}
783 31b836b8 Thomas Thrainer
784 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
785 31b836b8 Thomas Thrainer
    """Reboots a node.
786 31b836b8 Thomas Thrainer

787 31b836b8 Thomas Thrainer
    """
788 31b836b8 Thomas Thrainer
    result = self.rpc.call_node_powercycle(self.op.node_name,
789 31b836b8 Thomas Thrainer
                                           self.cfg.GetHypervisorType())
790 31b836b8 Thomas Thrainer
    result.Raise("Failed to schedule the reboot")
791 31b836b8 Thomas Thrainer
    return result.payload
792 31b836b8 Thomas Thrainer
793 31b836b8 Thomas Thrainer
794 31b836b8 Thomas Thrainer
def _GetNodeInstancesInner(cfg, fn):
795 31b836b8 Thomas Thrainer
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
796 31b836b8 Thomas Thrainer
797 31b836b8 Thomas Thrainer
798 31b836b8 Thomas Thrainer
def _GetNodePrimaryInstances(cfg, node_name):
799 31b836b8 Thomas Thrainer
  """Returns primary instances on a node.
800 31b836b8 Thomas Thrainer

801 31b836b8 Thomas Thrainer
  """
802 31b836b8 Thomas Thrainer
  return _GetNodeInstancesInner(cfg,
803 31b836b8 Thomas Thrainer
                                lambda inst: node_name == inst.primary_node)
804 31b836b8 Thomas Thrainer
805 31b836b8 Thomas Thrainer
806 31b836b8 Thomas Thrainer
def _GetNodeSecondaryInstances(cfg, node_name):
807 31b836b8 Thomas Thrainer
  """Returns secondary instances on a node.
808 31b836b8 Thomas Thrainer

809 31b836b8 Thomas Thrainer
  """
810 31b836b8 Thomas Thrainer
  return _GetNodeInstancesInner(cfg,
811 31b836b8 Thomas Thrainer
                                lambda inst: node_name in inst.secondary_nodes)
812 31b836b8 Thomas Thrainer
813 31b836b8 Thomas Thrainer
814 31b836b8 Thomas Thrainer
def _GetNodeInstances(cfg, node_name):
815 31b836b8 Thomas Thrainer
  """Returns a list of all primary and secondary instances on a node.
816 31b836b8 Thomas Thrainer

817 31b836b8 Thomas Thrainer
  """
818 31b836b8 Thomas Thrainer
819 31b836b8 Thomas Thrainer
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
820 31b836b8 Thomas Thrainer
821 31b836b8 Thomas Thrainer
822 31b836b8 Thomas Thrainer
class LUNodeEvacuate(NoHooksLU):
823 31b836b8 Thomas Thrainer
  """Evacuates instances off a list of nodes.
824 31b836b8 Thomas Thrainer

825 31b836b8 Thomas Thrainer
  """
826 31b836b8 Thomas Thrainer
  REQ_BGL = False
827 31b836b8 Thomas Thrainer
828 31b836b8 Thomas Thrainer
  _MODE2IALLOCATOR = {
829 31b836b8 Thomas Thrainer
    constants.NODE_EVAC_PRI: constants.IALLOCATOR_NEVAC_PRI,
830 31b836b8 Thomas Thrainer
    constants.NODE_EVAC_SEC: constants.IALLOCATOR_NEVAC_SEC,
831 31b836b8 Thomas Thrainer
    constants.NODE_EVAC_ALL: constants.IALLOCATOR_NEVAC_ALL,
832 31b836b8 Thomas Thrainer
    }
833 31b836b8 Thomas Thrainer
  assert frozenset(_MODE2IALLOCATOR.keys()) == constants.NODE_EVAC_MODES
834 31b836b8 Thomas Thrainer
  assert (frozenset(_MODE2IALLOCATOR.values()) ==
835 31b836b8 Thomas Thrainer
          constants.IALLOCATOR_NEVAC_MODES)
836 31b836b8 Thomas Thrainer
837 31b836b8 Thomas Thrainer
  def CheckArguments(self):
838 5eacbcae Thomas Thrainer
    CheckIAllocatorOrNode(self, "iallocator", "remote_node")
839 31b836b8 Thomas Thrainer
840 31b836b8 Thomas Thrainer
  def ExpandNames(self):
841 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
842 31b836b8 Thomas Thrainer
843 31b836b8 Thomas Thrainer
    if self.op.remote_node is not None:
844 5eacbcae Thomas Thrainer
      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
845 31b836b8 Thomas Thrainer
      assert self.op.remote_node
846 31b836b8 Thomas Thrainer
847 31b836b8 Thomas Thrainer
      if self.op.remote_node == self.op.node_name:
848 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Can not use evacuated node as a new"
849 31b836b8 Thomas Thrainer
                                   " secondary node", errors.ECODE_INVAL)
850 31b836b8 Thomas Thrainer
851 31b836b8 Thomas Thrainer
      if self.op.mode != constants.NODE_EVAC_SEC:
852 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Without the use of an iallocator only"
853 31b836b8 Thomas Thrainer
                                   " secondary instances can be evacuated",
854 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
855 31b836b8 Thomas Thrainer
856 31b836b8 Thomas Thrainer
    # Declare locks
857 5eacbcae Thomas Thrainer
    self.share_locks = ShareAll()
858 31b836b8 Thomas Thrainer
    self.needed_locks = {
859 31b836b8 Thomas Thrainer
      locking.LEVEL_INSTANCE: [],
860 31b836b8 Thomas Thrainer
      locking.LEVEL_NODEGROUP: [],
861 31b836b8 Thomas Thrainer
      locking.LEVEL_NODE: [],
862 31b836b8 Thomas Thrainer
      }
863 31b836b8 Thomas Thrainer
864 31b836b8 Thomas Thrainer
    # Determine nodes (via group) optimistically, needs verification once locks
865 31b836b8 Thomas Thrainer
    # have been acquired
866 31b836b8 Thomas Thrainer
    self.lock_nodes = self._DetermineNodes()
867 31b836b8 Thomas Thrainer
868 31b836b8 Thomas Thrainer
  def _DetermineNodes(self):
869 31b836b8 Thomas Thrainer
    """Gets the list of nodes to operate on.
870 31b836b8 Thomas Thrainer

871 31b836b8 Thomas Thrainer
    """
872 31b836b8 Thomas Thrainer
    if self.op.remote_node is None:
873 31b836b8 Thomas Thrainer
      # Iallocator will choose any node(s) in the same group
874 31b836b8 Thomas Thrainer
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
875 31b836b8 Thomas Thrainer
    else:
876 31b836b8 Thomas Thrainer
      group_nodes = frozenset([self.op.remote_node])
877 31b836b8 Thomas Thrainer
878 31b836b8 Thomas Thrainer
    # Determine nodes to be locked
879 31b836b8 Thomas Thrainer
    return set([self.op.node_name]) | group_nodes
880 31b836b8 Thomas Thrainer
881 31b836b8 Thomas Thrainer
  def _DetermineInstances(self):
882 31b836b8 Thomas Thrainer
    """Builds list of instances to operate on.
883 31b836b8 Thomas Thrainer

884 31b836b8 Thomas Thrainer
    """
885 31b836b8 Thomas Thrainer
    assert self.op.mode in constants.NODE_EVAC_MODES
886 31b836b8 Thomas Thrainer
887 31b836b8 Thomas Thrainer
    if self.op.mode == constants.NODE_EVAC_PRI:
888 31b836b8 Thomas Thrainer
      # Primary instances only
889 31b836b8 Thomas Thrainer
      inst_fn = _GetNodePrimaryInstances
890 31b836b8 Thomas Thrainer
      assert self.op.remote_node is None, \
891 31b836b8 Thomas Thrainer
        "Evacuating primary instances requires iallocator"
892 31b836b8 Thomas Thrainer
    elif self.op.mode == constants.NODE_EVAC_SEC:
893 31b836b8 Thomas Thrainer
      # Secondary instances only
894 31b836b8 Thomas Thrainer
      inst_fn = _GetNodeSecondaryInstances
895 31b836b8 Thomas Thrainer
    else:
896 31b836b8 Thomas Thrainer
      # All instances
897 31b836b8 Thomas Thrainer
      assert self.op.mode == constants.NODE_EVAC_ALL
898 31b836b8 Thomas Thrainer
      inst_fn = _GetNodeInstances
899 31b836b8 Thomas Thrainer
      # TODO: In 2.6, change the iallocator interface to take an evacuation mode
900 31b836b8 Thomas Thrainer
      # per instance
901 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Due to an issue with the iallocator"
902 31b836b8 Thomas Thrainer
                                 " interface it is not possible to evacuate"
903 31b836b8 Thomas Thrainer
                                 " all instances at once; specify explicitly"
904 31b836b8 Thomas Thrainer
                                 " whether to evacuate primary or secondary"
905 31b836b8 Thomas Thrainer
                                 " instances",
906 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
907 31b836b8 Thomas Thrainer
908 31b836b8 Thomas Thrainer
    return inst_fn(self.cfg, self.op.node_name)
909 31b836b8 Thomas Thrainer
910 31b836b8 Thomas Thrainer
  def DeclareLocks(self, level):
911 31b836b8 Thomas Thrainer
    if level == locking.LEVEL_INSTANCE:
912 31b836b8 Thomas Thrainer
      # Lock instances optimistically, needs verification once node and group
913 31b836b8 Thomas Thrainer
      # locks have been acquired
914 31b836b8 Thomas Thrainer
      self.needed_locks[locking.LEVEL_INSTANCE] = \
915 31b836b8 Thomas Thrainer
        set(i.name for i in self._DetermineInstances())
916 31b836b8 Thomas Thrainer
917 31b836b8 Thomas Thrainer
    elif level == locking.LEVEL_NODEGROUP:
918 31b836b8 Thomas Thrainer
      # Lock node groups for all potential target nodes optimistically, needs
919 31b836b8 Thomas Thrainer
      # verification once nodes have been acquired
920 31b836b8 Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
921 31b836b8 Thomas Thrainer
        self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
922 31b836b8 Thomas Thrainer
923 31b836b8 Thomas Thrainer
    elif level == locking.LEVEL_NODE:
924 31b836b8 Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
925 31b836b8 Thomas Thrainer
926 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
927 31b836b8 Thomas Thrainer
    # Verify locks
928 31b836b8 Thomas Thrainer
    owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
929 31b836b8 Thomas Thrainer
    owned_nodes = self.owned_locks(locking.LEVEL_NODE)
930 31b836b8 Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
931 31b836b8 Thomas Thrainer
932 31b836b8 Thomas Thrainer
    need_nodes = self._DetermineNodes()
933 31b836b8 Thomas Thrainer
934 31b836b8 Thomas Thrainer
    if not owned_nodes.issuperset(need_nodes):
935 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
936 31b836b8 Thomas Thrainer
                                 " locks were acquired, current nodes are"
937 31b836b8 Thomas Thrainer
                                 " are '%s', used to be '%s'; retry the"
938 31b836b8 Thomas Thrainer
                                 " operation" %
939 31b836b8 Thomas Thrainer
                                 (self.op.node_name,
940 31b836b8 Thomas Thrainer
                                  utils.CommaJoin(need_nodes),
941 31b836b8 Thomas Thrainer
                                  utils.CommaJoin(owned_nodes)),
942 31b836b8 Thomas Thrainer
                                 errors.ECODE_STATE)
943 31b836b8 Thomas Thrainer
944 31b836b8 Thomas Thrainer
    wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
945 31b836b8 Thomas Thrainer
    if owned_groups != wanted_groups:
946 31b836b8 Thomas Thrainer
      raise errors.OpExecError("Node groups changed since locks were acquired,"
947 31b836b8 Thomas Thrainer
                               " current groups are '%s', used to be '%s';"
948 31b836b8 Thomas Thrainer
                               " retry the operation" %
949 31b836b8 Thomas Thrainer
                               (utils.CommaJoin(wanted_groups),
950 31b836b8 Thomas Thrainer
                                utils.CommaJoin(owned_groups)))
951 31b836b8 Thomas Thrainer
952 31b836b8 Thomas Thrainer
    # Determine affected instances
953 31b836b8 Thomas Thrainer
    self.instances = self._DetermineInstances()
954 31b836b8 Thomas Thrainer
    self.instance_names = [i.name for i in self.instances]
955 31b836b8 Thomas Thrainer
956 31b836b8 Thomas Thrainer
    if set(self.instance_names) != owned_instances:
957 31b836b8 Thomas Thrainer
      raise errors.OpExecError("Instances on node '%s' changed since locks"
958 31b836b8 Thomas Thrainer
                               " were acquired, current instances are '%s',"
959 31b836b8 Thomas Thrainer
                               " used to be '%s'; retry the operation" %
960 31b836b8 Thomas Thrainer
                               (self.op.node_name,
961 31b836b8 Thomas Thrainer
                                utils.CommaJoin(self.instance_names),
962 31b836b8 Thomas Thrainer
                                utils.CommaJoin(owned_instances)))
963 31b836b8 Thomas Thrainer
964 31b836b8 Thomas Thrainer
    if self.instance_names:
965 31b836b8 Thomas Thrainer
      self.LogInfo("Evacuating instances from node '%s': %s",
966 31b836b8 Thomas Thrainer
                   self.op.node_name,
967 31b836b8 Thomas Thrainer
                   utils.CommaJoin(utils.NiceSort(self.instance_names)))
968 31b836b8 Thomas Thrainer
    else:
969 31b836b8 Thomas Thrainer
      self.LogInfo("No instances to evacuate from node '%s'",
970 31b836b8 Thomas Thrainer
                   self.op.node_name)
971 31b836b8 Thomas Thrainer
972 31b836b8 Thomas Thrainer
    if self.op.remote_node is not None:
973 31b836b8 Thomas Thrainer
      for i in self.instances:
974 31b836b8 Thomas Thrainer
        if i.primary_node == self.op.remote_node:
975 31b836b8 Thomas Thrainer
          raise errors.OpPrereqError("Node %s is the primary node of"
976 31b836b8 Thomas Thrainer
                                     " instance %s, cannot use it as"
977 31b836b8 Thomas Thrainer
                                     " secondary" %
978 31b836b8 Thomas Thrainer
                                     (self.op.remote_node, i.name),
979 31b836b8 Thomas Thrainer
                                     errors.ECODE_INVAL)
980 31b836b8 Thomas Thrainer
981 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
982 31b836b8 Thomas Thrainer
    assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
983 31b836b8 Thomas Thrainer
984 31b836b8 Thomas Thrainer
    if not self.instance_names:
985 31b836b8 Thomas Thrainer
      # No instances to evacuate
986 31b836b8 Thomas Thrainer
      jobs = []
987 31b836b8 Thomas Thrainer
988 31b836b8 Thomas Thrainer
    elif self.op.iallocator is not None:
989 31b836b8 Thomas Thrainer
      # TODO: Implement relocation to other group
990 31b836b8 Thomas Thrainer
      evac_mode = self._MODE2IALLOCATOR[self.op.mode]
991 31b836b8 Thomas Thrainer
      req = iallocator.IAReqNodeEvac(evac_mode=evac_mode,
992 31b836b8 Thomas Thrainer
                                     instances=list(self.instance_names))
993 31b836b8 Thomas Thrainer
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
994 31b836b8 Thomas Thrainer
995 31b836b8 Thomas Thrainer
      ial.Run(self.op.iallocator)
996 31b836b8 Thomas Thrainer
997 31b836b8 Thomas Thrainer
      if not ial.success:
998 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Can't compute node evacuation using"
999 31b836b8 Thomas Thrainer
                                   " iallocator '%s': %s" %
1000 31b836b8 Thomas Thrainer
                                   (self.op.iallocator, ial.info),
1001 31b836b8 Thomas Thrainer
                                   errors.ECODE_NORES)
1002 31b836b8 Thomas Thrainer
1003 5eacbcae Thomas Thrainer
      jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
1004 31b836b8 Thomas Thrainer
1005 31b836b8 Thomas Thrainer
    elif self.op.remote_node is not None:
1006 31b836b8 Thomas Thrainer
      assert self.op.mode == constants.NODE_EVAC_SEC
1007 31b836b8 Thomas Thrainer
      jobs = [
1008 31b836b8 Thomas Thrainer
        [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
1009 31b836b8 Thomas Thrainer
                                        remote_node=self.op.remote_node,
1010 31b836b8 Thomas Thrainer
                                        disks=[],
1011 31b836b8 Thomas Thrainer
                                        mode=constants.REPLACE_DISK_CHG,
1012 31b836b8 Thomas Thrainer
                                        early_release=self.op.early_release)]
1013 31b836b8 Thomas Thrainer
        for instance_name in self.instance_names]
1014 31b836b8 Thomas Thrainer
1015 31b836b8 Thomas Thrainer
    else:
1016 31b836b8 Thomas Thrainer
      raise errors.ProgrammerError("No iallocator or remote node")
1017 31b836b8 Thomas Thrainer
1018 31b836b8 Thomas Thrainer
    return ResultWithJobs(jobs)
1019 31b836b8 Thomas Thrainer
1020 31b836b8 Thomas Thrainer
1021 31b836b8 Thomas Thrainer
class LUNodeMigrate(LogicalUnit):
1022 31b836b8 Thomas Thrainer
  """Migrate all instances from a node.
1023 31b836b8 Thomas Thrainer

1024 31b836b8 Thomas Thrainer
  """
1025 31b836b8 Thomas Thrainer
  HPATH = "node-migrate"
1026 31b836b8 Thomas Thrainer
  HTYPE = constants.HTYPE_NODE
1027 31b836b8 Thomas Thrainer
  REQ_BGL = False
1028 31b836b8 Thomas Thrainer
1029 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1030 31b836b8 Thomas Thrainer
    pass
1031 31b836b8 Thomas Thrainer
1032 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1033 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1034 31b836b8 Thomas Thrainer
1035 5eacbcae Thomas Thrainer
    self.share_locks = ShareAll()
1036 31b836b8 Thomas Thrainer
    self.needed_locks = {
1037 31b836b8 Thomas Thrainer
      locking.LEVEL_NODE: [self.op.node_name],
1038 31b836b8 Thomas Thrainer
      }
1039 31b836b8 Thomas Thrainer
1040 31b836b8 Thomas Thrainer
  def BuildHooksEnv(self):
1041 31b836b8 Thomas Thrainer
    """Build hooks env.
1042 31b836b8 Thomas Thrainer

1043 31b836b8 Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1044 31b836b8 Thomas Thrainer

1045 31b836b8 Thomas Thrainer
    """
1046 31b836b8 Thomas Thrainer
    return {
1047 31b836b8 Thomas Thrainer
      "NODE_NAME": self.op.node_name,
1048 31b836b8 Thomas Thrainer
      "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
1049 31b836b8 Thomas Thrainer
      }
1050 31b836b8 Thomas Thrainer
1051 31b836b8 Thomas Thrainer
  def BuildHooksNodes(self):
1052 31b836b8 Thomas Thrainer
    """Build hooks nodes.
1053 31b836b8 Thomas Thrainer

1054 31b836b8 Thomas Thrainer
    """
1055 31b836b8 Thomas Thrainer
    nl = [self.cfg.GetMasterNode()]
1056 31b836b8 Thomas Thrainer
    return (nl, nl)
1057 31b836b8 Thomas Thrainer
1058 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
1059 31b836b8 Thomas Thrainer
    pass
1060 31b836b8 Thomas Thrainer
1061 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1062 31b836b8 Thomas Thrainer
    # Prepare jobs for migration instances
1063 31b836b8 Thomas Thrainer
    allow_runtime_changes = self.op.allow_runtime_changes
1064 31b836b8 Thomas Thrainer
    jobs = [
1065 31b836b8 Thomas Thrainer
      [opcodes.OpInstanceMigrate(instance_name=inst.name,
1066 31b836b8 Thomas Thrainer
                                 mode=self.op.mode,
1067 31b836b8 Thomas Thrainer
                                 live=self.op.live,
1068 31b836b8 Thomas Thrainer
                                 iallocator=self.op.iallocator,
1069 31b836b8 Thomas Thrainer
                                 target_node=self.op.target_node,
1070 31b836b8 Thomas Thrainer
                                 allow_runtime_changes=allow_runtime_changes,
1071 31b836b8 Thomas Thrainer
                                 ignore_ipolicy=self.op.ignore_ipolicy)]
1072 31b836b8 Thomas Thrainer
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)]
1073 31b836b8 Thomas Thrainer
1074 31b836b8 Thomas Thrainer
    # TODO: Run iallocator in this opcode and pass correct placement options to
1075 31b836b8 Thomas Thrainer
    # OpInstanceMigrate. Since other jobs can modify the cluster between
1076 31b836b8 Thomas Thrainer
    # running the iallocator and the actual migration, a good consistency model
1077 31b836b8 Thomas Thrainer
    # will have to be found.
1078 31b836b8 Thomas Thrainer
1079 31b836b8 Thomas Thrainer
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
1080 31b836b8 Thomas Thrainer
            frozenset([self.op.node_name]))
1081 31b836b8 Thomas Thrainer
1082 31b836b8 Thomas Thrainer
    return ResultWithJobs(jobs)
1083 31b836b8 Thomas Thrainer
1084 31b836b8 Thomas Thrainer
1085 31b836b8 Thomas Thrainer
def _GetStorageTypeArgs(cfg, storage_type):
1086 31b836b8 Thomas Thrainer
  """Returns the arguments for a storage type.
1087 31b836b8 Thomas Thrainer

1088 31b836b8 Thomas Thrainer
  """
1089 31b836b8 Thomas Thrainer
  # Special case for file storage
1090 31b836b8 Thomas Thrainer
  if storage_type == constants.ST_FILE:
1091 31b836b8 Thomas Thrainer
    # storage.FileStorage wants a list of storage directories
1092 31b836b8 Thomas Thrainer
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1093 31b836b8 Thomas Thrainer
1094 31b836b8 Thomas Thrainer
  return []
1095 31b836b8 Thomas Thrainer
1096 31b836b8 Thomas Thrainer
1097 31b836b8 Thomas Thrainer
class LUNodeModifyStorage(NoHooksLU):
1098 31b836b8 Thomas Thrainer
  """Logical unit for modifying a storage volume on a node.
1099 31b836b8 Thomas Thrainer

1100 31b836b8 Thomas Thrainer
  """
1101 31b836b8 Thomas Thrainer
  REQ_BGL = False
1102 31b836b8 Thomas Thrainer
1103 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1104 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1105 31b836b8 Thomas Thrainer
1106 31b836b8 Thomas Thrainer
    storage_type = self.op.storage_type
1107 31b836b8 Thomas Thrainer
1108 31b836b8 Thomas Thrainer
    try:
1109 31b836b8 Thomas Thrainer
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
1110 31b836b8 Thomas Thrainer
    except KeyError:
1111 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
1112 31b836b8 Thomas Thrainer
                                 " modified" % storage_type,
1113 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
1114 31b836b8 Thomas Thrainer
1115 31b836b8 Thomas Thrainer
    diff = set(self.op.changes.keys()) - modifiable
1116 31b836b8 Thomas Thrainer
    if diff:
1117 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("The following fields can not be modified for"
1118 31b836b8 Thomas Thrainer
                                 " storage units of type '%s': %r" %
1119 31b836b8 Thomas Thrainer
                                 (storage_type, list(diff)),
1120 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
1121 31b836b8 Thomas Thrainer
1122 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1123 31b836b8 Thomas Thrainer
    self.needed_locks = {
1124 31b836b8 Thomas Thrainer
      locking.LEVEL_NODE: self.op.node_name,
1125 31b836b8 Thomas Thrainer
      }
1126 31b836b8 Thomas Thrainer
1127 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1128 31b836b8 Thomas Thrainer
    """Computes the list of nodes and their attributes.
1129 31b836b8 Thomas Thrainer

1130 31b836b8 Thomas Thrainer
    """
1131 31b836b8 Thomas Thrainer
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1132 31b836b8 Thomas Thrainer
    result = self.rpc.call_storage_modify(self.op.node_name,
1133 31b836b8 Thomas Thrainer
                                          self.op.storage_type, st_args,
1134 31b836b8 Thomas Thrainer
                                          self.op.name, self.op.changes)
1135 31b836b8 Thomas Thrainer
    result.Raise("Failed to modify storage unit '%s' on %s" %
1136 31b836b8 Thomas Thrainer
                 (self.op.name, self.op.node_name))
1137 31b836b8 Thomas Thrainer
1138 31b836b8 Thomas Thrainer
1139 5eacbcae Thomas Thrainer
class NodeQuery(QueryBase):
1140 31b836b8 Thomas Thrainer
  FIELDS = query.NODE_FIELDS
1141 31b836b8 Thomas Thrainer
1142 31b836b8 Thomas Thrainer
  def ExpandNames(self, lu):
1143 31b836b8 Thomas Thrainer
    lu.needed_locks = {}
1144 5eacbcae Thomas Thrainer
    lu.share_locks = ShareAll()
1145 31b836b8 Thomas Thrainer
1146 31b836b8 Thomas Thrainer
    if self.names:
1147 5eacbcae Thomas Thrainer
      self.wanted = GetWantedNodes(lu, self.names)
1148 31b836b8 Thomas Thrainer
    else:
1149 31b836b8 Thomas Thrainer
      self.wanted = locking.ALL_SET
1150 31b836b8 Thomas Thrainer
1151 31b836b8 Thomas Thrainer
    self.do_locking = (self.use_locking and
1152 31b836b8 Thomas Thrainer
                       query.NQ_LIVE in self.requested_data)
1153 31b836b8 Thomas Thrainer
1154 31b836b8 Thomas Thrainer
    if self.do_locking:
1155 31b836b8 Thomas Thrainer
      # If any non-static field is requested we need to lock the nodes
1156 31b836b8 Thomas Thrainer
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
1157 31b836b8 Thomas Thrainer
      lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1158 31b836b8 Thomas Thrainer
1159 31b836b8 Thomas Thrainer
  def DeclareLocks(self, lu, level):
1160 31b836b8 Thomas Thrainer
    pass
1161 31b836b8 Thomas Thrainer
1162 31b836b8 Thomas Thrainer
  def _GetQueryData(self, lu):
1163 31b836b8 Thomas Thrainer
    """Computes the list of nodes and their attributes.
1164 31b836b8 Thomas Thrainer

1165 31b836b8 Thomas Thrainer
    """
1166 31b836b8 Thomas Thrainer
    all_info = lu.cfg.GetAllNodesInfo()
1167 31b836b8 Thomas Thrainer
1168 31b836b8 Thomas Thrainer
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
1169 31b836b8 Thomas Thrainer
1170 31b836b8 Thomas Thrainer
    # Gather data as requested
1171 31b836b8 Thomas Thrainer
    if query.NQ_LIVE in self.requested_data:
1172 31b836b8 Thomas Thrainer
      # filter out non-vm_capable nodes
1173 31b836b8 Thomas Thrainer
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
1174 31b836b8 Thomas Thrainer
1175 31b836b8 Thomas Thrainer
      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
1176 4b92e992 Helga Velroyen
      # FIXME: This currently maps everything to lvm, this should be more
1177 4b92e992 Helga Velroyen
      # flexible
1178 06fb92cf Bernardo Dal Seno
      vg_req = rpc.BuildVgInfoQuery(lu.cfg)
1179 a295eb80 Helga Velroyen
      default_hypervisor = lu.cfg.GetHypervisorType()
1180 a295eb80 Helga Velroyen
      hvparams = lu.cfg.GetClusterInfo().hvparams[default_hypervisor]
1181 a295eb80 Helga Velroyen
      hvspecs = [(default_hypervisor, hvparams)]
1182 06fb92cf Bernardo Dal Seno
      node_data = lu.rpc.call_node_info(toquery_nodes, vg_req,
1183 a295eb80 Helga Velroyen
                                        hvspecs, es_flags)
1184 31b836b8 Thomas Thrainer
      live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
1185 31b836b8 Thomas Thrainer
                       for (name, nresult) in node_data.items()
1186 31b836b8 Thomas Thrainer
                       if not nresult.fail_msg and nresult.payload)
1187 31b836b8 Thomas Thrainer
    else:
1188 31b836b8 Thomas Thrainer
      live_data = None
1189 31b836b8 Thomas Thrainer
1190 31b836b8 Thomas Thrainer
    if query.NQ_INST in self.requested_data:
1191 31b836b8 Thomas Thrainer
      node_to_primary = dict([(name, set()) for name in nodenames])
1192 31b836b8 Thomas Thrainer
      node_to_secondary = dict([(name, set()) for name in nodenames])
1193 31b836b8 Thomas Thrainer
1194 31b836b8 Thomas Thrainer
      inst_data = lu.cfg.GetAllInstancesInfo()
1195 31b836b8 Thomas Thrainer
1196 31b836b8 Thomas Thrainer
      for inst in inst_data.values():
1197 31b836b8 Thomas Thrainer
        if inst.primary_node in node_to_primary:
1198 31b836b8 Thomas Thrainer
          node_to_primary[inst.primary_node].add(inst.name)
1199 31b836b8 Thomas Thrainer
        for secnode in inst.secondary_nodes:
1200 31b836b8 Thomas Thrainer
          if secnode in node_to_secondary:
1201 31b836b8 Thomas Thrainer
            node_to_secondary[secnode].add(inst.name)
1202 31b836b8 Thomas Thrainer
    else:
1203 31b836b8 Thomas Thrainer
      node_to_primary = None
1204 31b836b8 Thomas Thrainer
      node_to_secondary = None
1205 31b836b8 Thomas Thrainer
1206 31b836b8 Thomas Thrainer
    if query.NQ_OOB in self.requested_data:
1207 5eacbcae Thomas Thrainer
      oob_support = dict((name, bool(SupportsOob(lu.cfg, node)))
1208 31b836b8 Thomas Thrainer
                         for name, node in all_info.iteritems())
1209 31b836b8 Thomas Thrainer
    else:
1210 31b836b8 Thomas Thrainer
      oob_support = None
1211 31b836b8 Thomas Thrainer
1212 31b836b8 Thomas Thrainer
    if query.NQ_GROUP in self.requested_data:
1213 31b836b8 Thomas Thrainer
      groups = lu.cfg.GetAllNodeGroupsInfo()
1214 31b836b8 Thomas Thrainer
    else:
1215 31b836b8 Thomas Thrainer
      groups = {}
1216 31b836b8 Thomas Thrainer
1217 31b836b8 Thomas Thrainer
    return query.NodeQueryData([all_info[name] for name in nodenames],
1218 31b836b8 Thomas Thrainer
                               live_data, lu.cfg.GetMasterNode(),
1219 31b836b8 Thomas Thrainer
                               node_to_primary, node_to_secondary, groups,
1220 31b836b8 Thomas Thrainer
                               oob_support, lu.cfg.GetClusterInfo())
1221 31b836b8 Thomas Thrainer
1222 31b836b8 Thomas Thrainer
1223 31b836b8 Thomas Thrainer
class LUNodeQuery(NoHooksLU):
1224 31b836b8 Thomas Thrainer
  """Logical unit for querying nodes.
1225 31b836b8 Thomas Thrainer

1226 31b836b8 Thomas Thrainer
  """
1227 31b836b8 Thomas Thrainer
  # pylint: disable=W0142
1228 31b836b8 Thomas Thrainer
  REQ_BGL = False
1229 31b836b8 Thomas Thrainer
1230 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1231 5eacbcae Thomas Thrainer
    self.nq = NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
1232 31b836b8 Thomas Thrainer
                         self.op.output_fields, self.op.use_locking)
1233 31b836b8 Thomas Thrainer
1234 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1235 31b836b8 Thomas Thrainer
    self.nq.ExpandNames(self)
1236 31b836b8 Thomas Thrainer
1237 31b836b8 Thomas Thrainer
  def DeclareLocks(self, level):
1238 31b836b8 Thomas Thrainer
    self.nq.DeclareLocks(self, level)
1239 31b836b8 Thomas Thrainer
1240 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1241 31b836b8 Thomas Thrainer
    return self.nq.OldStyleQuery(self)
1242 31b836b8 Thomas Thrainer
1243 31b836b8 Thomas Thrainer
1244 31b836b8 Thomas Thrainer
def _CheckOutputFields(static, dynamic, selected):
1245 31b836b8 Thomas Thrainer
  """Checks whether all selected fields are valid.
1246 31b836b8 Thomas Thrainer

1247 31b836b8 Thomas Thrainer
  @type static: L{utils.FieldSet}
1248 31b836b8 Thomas Thrainer
  @param static: static fields set
1249 31b836b8 Thomas Thrainer
  @type dynamic: L{utils.FieldSet}
1250 31b836b8 Thomas Thrainer
  @param dynamic: dynamic fields set
1251 31b836b8 Thomas Thrainer

1252 31b836b8 Thomas Thrainer
  """
1253 31b836b8 Thomas Thrainer
  f = utils.FieldSet()
1254 31b836b8 Thomas Thrainer
  f.Extend(static)
1255 31b836b8 Thomas Thrainer
  f.Extend(dynamic)
1256 31b836b8 Thomas Thrainer
1257 31b836b8 Thomas Thrainer
  delta = f.NonMatching(selected)
1258 31b836b8 Thomas Thrainer
  if delta:
1259 31b836b8 Thomas Thrainer
    raise errors.OpPrereqError("Unknown output fields selected: %s"
1260 31b836b8 Thomas Thrainer
                               % ",".join(delta), errors.ECODE_INVAL)
1261 31b836b8 Thomas Thrainer
1262 31b836b8 Thomas Thrainer
1263 31b836b8 Thomas Thrainer
class LUNodeQueryvols(NoHooksLU):
1264 31b836b8 Thomas Thrainer
  """Logical unit for getting volumes on node(s).
1265 31b836b8 Thomas Thrainer

1266 31b836b8 Thomas Thrainer
  """
1267 31b836b8 Thomas Thrainer
  REQ_BGL = False
1268 31b836b8 Thomas Thrainer
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1269 31b836b8 Thomas Thrainer
  _FIELDS_STATIC = utils.FieldSet("node")
1270 31b836b8 Thomas Thrainer
1271 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1272 31b836b8 Thomas Thrainer
    _CheckOutputFields(static=self._FIELDS_STATIC,
1273 31b836b8 Thomas Thrainer
                       dynamic=self._FIELDS_DYNAMIC,
1274 31b836b8 Thomas Thrainer
                       selected=self.op.output_fields)
1275 31b836b8 Thomas Thrainer
1276 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1277 5eacbcae Thomas Thrainer
    self.share_locks = ShareAll()
1278 31b836b8 Thomas Thrainer
1279 31b836b8 Thomas Thrainer
    if self.op.nodes:
1280 31b836b8 Thomas Thrainer
      self.needed_locks = {
1281 5eacbcae Thomas Thrainer
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
1282 31b836b8 Thomas Thrainer
        }
1283 31b836b8 Thomas Thrainer
    else:
1284 31b836b8 Thomas Thrainer
      self.needed_locks = {
1285 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE: locking.ALL_SET,
1286 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1287 31b836b8 Thomas Thrainer
        }
1288 31b836b8 Thomas Thrainer
1289 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1290 31b836b8 Thomas Thrainer
    """Computes the list of nodes and their attributes.
1291 31b836b8 Thomas Thrainer

1292 31b836b8 Thomas Thrainer
    """
1293 31b836b8 Thomas Thrainer
    nodenames = self.owned_locks(locking.LEVEL_NODE)
1294 31b836b8 Thomas Thrainer
    volumes = self.rpc.call_node_volumes(nodenames)
1295 31b836b8 Thomas Thrainer
1296 31b836b8 Thomas Thrainer
    ilist = self.cfg.GetAllInstancesInfo()
1297 5eacbcae Thomas Thrainer
    vol2inst = MapInstanceDisksToNodes(ilist.values())
1298 31b836b8 Thomas Thrainer
1299 31b836b8 Thomas Thrainer
    output = []
1300 31b836b8 Thomas Thrainer
    for node in nodenames:
1301 31b836b8 Thomas Thrainer
      nresult = volumes[node]
1302 31b836b8 Thomas Thrainer
      if nresult.offline:
1303 31b836b8 Thomas Thrainer
        continue
1304 31b836b8 Thomas Thrainer
      msg = nresult.fail_msg
1305 31b836b8 Thomas Thrainer
      if msg:
1306 31b836b8 Thomas Thrainer
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
1307 31b836b8 Thomas Thrainer
        continue
1308 31b836b8 Thomas Thrainer
1309 31b836b8 Thomas Thrainer
      node_vols = sorted(nresult.payload,
1310 31b836b8 Thomas Thrainer
                         key=operator.itemgetter("dev"))
1311 31b836b8 Thomas Thrainer
1312 31b836b8 Thomas Thrainer
      for vol in node_vols:
1313 31b836b8 Thomas Thrainer
        node_output = []
1314 31b836b8 Thomas Thrainer
        for field in self.op.output_fields:
1315 31b836b8 Thomas Thrainer
          if field == "node":
1316 31b836b8 Thomas Thrainer
            val = node
1317 31b836b8 Thomas Thrainer
          elif field == "phys":
1318 31b836b8 Thomas Thrainer
            val = vol["dev"]
1319 31b836b8 Thomas Thrainer
          elif field == "vg":
1320 31b836b8 Thomas Thrainer
            val = vol["vg"]
1321 31b836b8 Thomas Thrainer
          elif field == "name":
1322 31b836b8 Thomas Thrainer
            val = vol["name"]
1323 31b836b8 Thomas Thrainer
          elif field == "size":
1324 31b836b8 Thomas Thrainer
            val = int(float(vol["size"]))
1325 31b836b8 Thomas Thrainer
          elif field == "instance":
1326 31b836b8 Thomas Thrainer
            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
1327 31b836b8 Thomas Thrainer
          else:
1328 31b836b8 Thomas Thrainer
            raise errors.ParameterError(field)
1329 31b836b8 Thomas Thrainer
          node_output.append(str(val))
1330 31b836b8 Thomas Thrainer
1331 31b836b8 Thomas Thrainer
        output.append(node_output)
1332 31b836b8 Thomas Thrainer
1333 31b836b8 Thomas Thrainer
    return output
1334 31b836b8 Thomas Thrainer
1335 31b836b8 Thomas Thrainer
1336 31b836b8 Thomas Thrainer
class LUNodeQueryStorage(NoHooksLU):
1337 31b836b8 Thomas Thrainer
  """Logical unit for getting information on storage units on node(s).
1338 31b836b8 Thomas Thrainer

1339 31b836b8 Thomas Thrainer
  """
1340 31b836b8 Thomas Thrainer
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
1341 31b836b8 Thomas Thrainer
  REQ_BGL = False
1342 31b836b8 Thomas Thrainer
1343 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1344 31b836b8 Thomas Thrainer
    _CheckOutputFields(static=self._FIELDS_STATIC,
1345 31b836b8 Thomas Thrainer
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
1346 31b836b8 Thomas Thrainer
                       selected=self.op.output_fields)
1347 31b836b8 Thomas Thrainer
1348 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1349 5eacbcae Thomas Thrainer
    self.share_locks = ShareAll()
1350 31b836b8 Thomas Thrainer
1351 31b836b8 Thomas Thrainer
    if self.op.nodes:
1352 31b836b8 Thomas Thrainer
      self.needed_locks = {
1353 5eacbcae Thomas Thrainer
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
1354 31b836b8 Thomas Thrainer
        }
1355 31b836b8 Thomas Thrainer
    else:
1356 31b836b8 Thomas Thrainer
      self.needed_locks = {
1357 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE: locking.ALL_SET,
1358 31b836b8 Thomas Thrainer
        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1359 31b836b8 Thomas Thrainer
        }
1360 31b836b8 Thomas Thrainer
1361 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1362 31b836b8 Thomas Thrainer
    """Computes the list of nodes and their attributes.
1363 31b836b8 Thomas Thrainer

1364 31b836b8 Thomas Thrainer
    """
1365 31b836b8 Thomas Thrainer
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
1366 31b836b8 Thomas Thrainer
1367 31b836b8 Thomas Thrainer
    # Always get name to sort by
1368 31b836b8 Thomas Thrainer
    if constants.SF_NAME in self.op.output_fields:
1369 31b836b8 Thomas Thrainer
      fields = self.op.output_fields[:]
1370 31b836b8 Thomas Thrainer
    else:
1371 31b836b8 Thomas Thrainer
      fields = [constants.SF_NAME] + self.op.output_fields
1372 31b836b8 Thomas Thrainer
1373 31b836b8 Thomas Thrainer
    # Never ask for node or type as it's only known to the LU
1374 31b836b8 Thomas Thrainer
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
1375 31b836b8 Thomas Thrainer
      while extra in fields:
1376 31b836b8 Thomas Thrainer
        fields.remove(extra)
1377 31b836b8 Thomas Thrainer
1378 31b836b8 Thomas Thrainer
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
1379 31b836b8 Thomas Thrainer
    name_idx = field_idx[constants.SF_NAME]
1380 31b836b8 Thomas Thrainer
1381 31b836b8 Thomas Thrainer
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1382 31b836b8 Thomas Thrainer
    data = self.rpc.call_storage_list(self.nodes,
1383 31b836b8 Thomas Thrainer
                                      self.op.storage_type, st_args,
1384 31b836b8 Thomas Thrainer
                                      self.op.name, fields)
1385 31b836b8 Thomas Thrainer
1386 31b836b8 Thomas Thrainer
    result = []
1387 31b836b8 Thomas Thrainer
1388 31b836b8 Thomas Thrainer
    for node in utils.NiceSort(self.nodes):
1389 31b836b8 Thomas Thrainer
      nresult = data[node]
1390 31b836b8 Thomas Thrainer
      if nresult.offline:
1391 31b836b8 Thomas Thrainer
        continue
1392 31b836b8 Thomas Thrainer
1393 31b836b8 Thomas Thrainer
      msg = nresult.fail_msg
1394 31b836b8 Thomas Thrainer
      if msg:
1395 31b836b8 Thomas Thrainer
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
1396 31b836b8 Thomas Thrainer
        continue
1397 31b836b8 Thomas Thrainer
1398 31b836b8 Thomas Thrainer
      rows = dict([(row[name_idx], row) for row in nresult.payload])
1399 31b836b8 Thomas Thrainer
1400 31b836b8 Thomas Thrainer
      for name in utils.NiceSort(rows.keys()):
1401 31b836b8 Thomas Thrainer
        row = rows[name]
1402 31b836b8 Thomas Thrainer
1403 31b836b8 Thomas Thrainer
        out = []
1404 31b836b8 Thomas Thrainer
1405 31b836b8 Thomas Thrainer
        for field in self.op.output_fields:
1406 31b836b8 Thomas Thrainer
          if field == constants.SF_NODE:
1407 31b836b8 Thomas Thrainer
            val = node
1408 31b836b8 Thomas Thrainer
          elif field == constants.SF_TYPE:
1409 31b836b8 Thomas Thrainer
            val = self.op.storage_type
1410 31b836b8 Thomas Thrainer
          elif field in field_idx:
1411 31b836b8 Thomas Thrainer
            val = row[field_idx[field]]
1412 31b836b8 Thomas Thrainer
          else:
1413 31b836b8 Thomas Thrainer
            raise errors.ParameterError(field)
1414 31b836b8 Thomas Thrainer
1415 31b836b8 Thomas Thrainer
          out.append(val)
1416 31b836b8 Thomas Thrainer
1417 31b836b8 Thomas Thrainer
        result.append(out)
1418 31b836b8 Thomas Thrainer
1419 31b836b8 Thomas Thrainer
    return result
1420 31b836b8 Thomas Thrainer
1421 31b836b8 Thomas Thrainer
1422 31b836b8 Thomas Thrainer
class LUNodeRemove(LogicalUnit):
1423 31b836b8 Thomas Thrainer
  """Logical unit for removing a node.
1424 31b836b8 Thomas Thrainer

1425 31b836b8 Thomas Thrainer
  """
1426 31b836b8 Thomas Thrainer
  HPATH = "node-remove"
1427 31b836b8 Thomas Thrainer
  HTYPE = constants.HTYPE_NODE
1428 31b836b8 Thomas Thrainer
1429 31b836b8 Thomas Thrainer
  def BuildHooksEnv(self):
1430 31b836b8 Thomas Thrainer
    """Build hooks env.
1431 31b836b8 Thomas Thrainer

1432 31b836b8 Thomas Thrainer
    """
1433 31b836b8 Thomas Thrainer
    return {
1434 31b836b8 Thomas Thrainer
      "OP_TARGET": self.op.node_name,
1435 31b836b8 Thomas Thrainer
      "NODE_NAME": self.op.node_name,
1436 31b836b8 Thomas Thrainer
      }
1437 31b836b8 Thomas Thrainer
1438 31b836b8 Thomas Thrainer
  def BuildHooksNodes(self):
1439 31b836b8 Thomas Thrainer
    """Build hooks nodes.
1440 31b836b8 Thomas Thrainer

1441 31b836b8 Thomas Thrainer
    This doesn't run on the target node in the pre phase as a failed
1442 31b836b8 Thomas Thrainer
    node would then be impossible to remove.
1443 31b836b8 Thomas Thrainer

1444 31b836b8 Thomas Thrainer
    """
1445 31b836b8 Thomas Thrainer
    all_nodes = self.cfg.GetNodeList()
1446 31b836b8 Thomas Thrainer
    try:
1447 31b836b8 Thomas Thrainer
      all_nodes.remove(self.op.node_name)
1448 31b836b8 Thomas Thrainer
    except ValueError:
1449 31b836b8 Thomas Thrainer
      pass
1450 31b836b8 Thomas Thrainer
    return (all_nodes, all_nodes)
1451 31b836b8 Thomas Thrainer
1452 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
1453 31b836b8 Thomas Thrainer
    """Check prerequisites.
1454 31b836b8 Thomas Thrainer

1455 31b836b8 Thomas Thrainer
    This checks:
1456 31b836b8 Thomas Thrainer
     - the node exists in the configuration
1457 31b836b8 Thomas Thrainer
     - it does not have primary or secondary instances
1458 31b836b8 Thomas Thrainer
     - it's not the master
1459 31b836b8 Thomas Thrainer

1460 31b836b8 Thomas Thrainer
    Any errors are signaled by raising errors.OpPrereqError.
1461 31b836b8 Thomas Thrainer

1462 31b836b8 Thomas Thrainer
    """
1463 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1464 31b836b8 Thomas Thrainer
    node = self.cfg.GetNodeInfo(self.op.node_name)
1465 31b836b8 Thomas Thrainer
    assert node is not None
1466 31b836b8 Thomas Thrainer
1467 31b836b8 Thomas Thrainer
    masternode = self.cfg.GetMasterNode()
1468 31b836b8 Thomas Thrainer
    if node.name == masternode:
1469 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Node is the master node, failover to another"
1470 31b836b8 Thomas Thrainer
                                 " node is required", errors.ECODE_INVAL)
1471 31b836b8 Thomas Thrainer
1472 31b836b8 Thomas Thrainer
    for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
1473 31b836b8 Thomas Thrainer
      if node.name in instance.all_nodes:
1474 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1475 31b836b8 Thomas Thrainer
                                   " please remove first" % instance_name,
1476 31b836b8 Thomas Thrainer
                                   errors.ECODE_INVAL)
1477 31b836b8 Thomas Thrainer
    self.op.node_name = node.name
1478 31b836b8 Thomas Thrainer
    self.node = node
1479 31b836b8 Thomas Thrainer
1480 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1481 31b836b8 Thomas Thrainer
    """Removes the node from the cluster.
1482 31b836b8 Thomas Thrainer

1483 31b836b8 Thomas Thrainer
    """
1484 31b836b8 Thomas Thrainer
    node = self.node
1485 31b836b8 Thomas Thrainer
    logging.info("Stopping the node daemon and removing configs from node %s",
1486 31b836b8 Thomas Thrainer
                 node.name)
1487 31b836b8 Thomas Thrainer
1488 31b836b8 Thomas Thrainer
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1489 31b836b8 Thomas Thrainer
1490 31b836b8 Thomas Thrainer
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
1491 31b836b8 Thomas Thrainer
      "Not owning BGL"
1492 31b836b8 Thomas Thrainer
1493 31b836b8 Thomas Thrainer
    # Promote nodes to master candidate as needed
1494 5eacbcae Thomas Thrainer
    AdjustCandidatePool(self, exceptions=[node.name])
1495 31b836b8 Thomas Thrainer
    self.context.RemoveNode(node.name)
1496 31b836b8 Thomas Thrainer
1497 31b836b8 Thomas Thrainer
    # Run post hooks on the node before it's removed
1498 5eacbcae Thomas Thrainer
    RunPostHook(self, node.name)
1499 31b836b8 Thomas Thrainer
1500 31b836b8 Thomas Thrainer
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
1501 31b836b8 Thomas Thrainer
    msg = result.fail_msg
1502 31b836b8 Thomas Thrainer
    if msg:
1503 31b836b8 Thomas Thrainer
      self.LogWarning("Errors encountered on the remote node while leaving"
1504 31b836b8 Thomas Thrainer
                      " the cluster: %s", msg)
1505 31b836b8 Thomas Thrainer
1506 31b836b8 Thomas Thrainer
    # Remove node from our /etc/hosts
1507 31b836b8 Thomas Thrainer
    if self.cfg.GetClusterInfo().modify_etc_hosts:
1508 31b836b8 Thomas Thrainer
      master_node = self.cfg.GetMasterNode()
1509 31b836b8 Thomas Thrainer
      result = self.rpc.call_etc_hosts_modify(master_node,
1510 31b836b8 Thomas Thrainer
                                              constants.ETC_HOSTS_REMOVE,
1511 31b836b8 Thomas Thrainer
                                              node.name, None)
1512 31b836b8 Thomas Thrainer
      result.Raise("Can't update hosts file with new host data")
1513 5eacbcae Thomas Thrainer
      RedistributeAncillaryFiles(self)
1514 31b836b8 Thomas Thrainer
1515 31b836b8 Thomas Thrainer
1516 31b836b8 Thomas Thrainer
class LURepairNodeStorage(NoHooksLU):
1517 31b836b8 Thomas Thrainer
  """Repairs the volume group on a node.
1518 31b836b8 Thomas Thrainer

1519 31b836b8 Thomas Thrainer
  """
1520 31b836b8 Thomas Thrainer
  REQ_BGL = False
1521 31b836b8 Thomas Thrainer
1522 31b836b8 Thomas Thrainer
  def CheckArguments(self):
1523 5eacbcae Thomas Thrainer
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1524 31b836b8 Thomas Thrainer
1525 31b836b8 Thomas Thrainer
    storage_type = self.op.storage_type
1526 31b836b8 Thomas Thrainer
1527 31b836b8 Thomas Thrainer
    if (constants.SO_FIX_CONSISTENCY not in
1528 31b836b8 Thomas Thrainer
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
1529 31b836b8 Thomas Thrainer
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
1530 31b836b8 Thomas Thrainer
                                 " repaired" % storage_type,
1531 31b836b8 Thomas Thrainer
                                 errors.ECODE_INVAL)
1532 31b836b8 Thomas Thrainer
1533 31b836b8 Thomas Thrainer
  def ExpandNames(self):
1534 31b836b8 Thomas Thrainer
    self.needed_locks = {
1535 31b836b8 Thomas Thrainer
      locking.LEVEL_NODE: [self.op.node_name],
1536 31b836b8 Thomas Thrainer
      }
1537 31b836b8 Thomas Thrainer
1538 31b836b8 Thomas Thrainer
  def _CheckFaultyDisks(self, instance, node_name):
1539 31b836b8 Thomas Thrainer
    """Ensure faulty disks abort the opcode or at least warn."""
1540 31b836b8 Thomas Thrainer
    try:
1541 5eacbcae Thomas Thrainer
      if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
1542 5eacbcae Thomas Thrainer
                                 node_name, True):
1543 31b836b8 Thomas Thrainer
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
1544 31b836b8 Thomas Thrainer
                                   " node '%s'" % (instance.name, node_name),
1545 31b836b8 Thomas Thrainer
                                   errors.ECODE_STATE)
1546 31b836b8 Thomas Thrainer
    except errors.OpPrereqError, err:
1547 31b836b8 Thomas Thrainer
      if self.op.ignore_consistency:
1548 31b836b8 Thomas Thrainer
        self.LogWarning(str(err.args[0]))
1549 31b836b8 Thomas Thrainer
      else:
1550 31b836b8 Thomas Thrainer
        raise
1551 31b836b8 Thomas Thrainer
1552 31b836b8 Thomas Thrainer
  def CheckPrereq(self):
1553 31b836b8 Thomas Thrainer
    """Check prerequisites.
1554 31b836b8 Thomas Thrainer

1555 31b836b8 Thomas Thrainer
    """
1556 31b836b8 Thomas Thrainer
    # Check whether any instance on this node has faulty disks
1557 31b836b8 Thomas Thrainer
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
1558 1d4a4b26 Thomas Thrainer
      if not inst.disks_active:
1559 31b836b8 Thomas Thrainer
        continue
1560 31b836b8 Thomas Thrainer
      check_nodes = set(inst.all_nodes)
1561 31b836b8 Thomas Thrainer
      check_nodes.discard(self.op.node_name)
1562 31b836b8 Thomas Thrainer
      for inst_node_name in check_nodes:
1563 31b836b8 Thomas Thrainer
        self._CheckFaultyDisks(inst, inst_node_name)
1564 31b836b8 Thomas Thrainer
1565 31b836b8 Thomas Thrainer
  def Exec(self, feedback_fn):
1566 31b836b8 Thomas Thrainer
    feedback_fn("Repairing storage unit '%s' on %s ..." %
1567 31b836b8 Thomas Thrainer
                (self.op.name, self.op.node_name))
1568 31b836b8 Thomas Thrainer
1569 31b836b8 Thomas Thrainer
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1570 31b836b8 Thomas Thrainer
    result = self.rpc.call_storage_execute(self.op.node_name,
1571 31b836b8 Thomas Thrainer
                                           self.op.storage_type, st_args,
1572 31b836b8 Thomas Thrainer
                                           self.op.name,
1573 31b836b8 Thomas Thrainer
                                           constants.SO_FIX_CONSISTENCY)
1574 31b836b8 Thomas Thrainer
    result.Raise("Failed to repair storage unit '%s' on %s" %
1575 31b836b8 Thomas Thrainer
                 (self.op.name, self.op.node_name))