Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 6873a52a

History | View | Annotate | Download (315.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
54 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
63 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
64 20777413 Iustin Pop

65 a8083063 Iustin Pop
  """
66 a8083063 Iustin Pop
  HPATH = None
67 a8083063 Iustin Pop
  HTYPE = None
68 a8083063 Iustin Pop
  _OP_REQP = []
69 7e55040e Guido Trotter
  REQ_BGL = True
70 a8083063 Iustin Pop
71 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
72 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
73 a8083063 Iustin Pop

74 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
75 a8083063 Iustin Pop
    validity.
76 a8083063 Iustin Pop

77 a8083063 Iustin Pop
    """
78 5bfac263 Iustin Pop
    self.proc = processor
79 a8083063 Iustin Pop
    self.op = op
80 77b657a3 Guido Trotter
    self.cfg = context.cfg
81 77b657a3 Guido Trotter
    self.context = context
82 72737a7f Iustin Pop
    self.rpc = rpc
83 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
84 d465bdc8 Guido Trotter
    self.needed_locks = None
85 6683bba2 Guido Trotter
    self.acquired_locks = {}
86 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
87 ca2a79e1 Guido Trotter
    self.add_locks = {}
88 ca2a79e1 Guido Trotter
    self.remove_locks = {}
89 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
90 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
91 c92b310a Michael Hanselmann
    self.__ssh = None
92 86d9d3bb Iustin Pop
    # logging
93 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
94 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
95 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
96 20777413 Iustin Pop
    # support for dry-run
97 20777413 Iustin Pop
    self.dry_run_result = None
98 ee844e20 Iustin Pop
    # support for generic debug attribute
99 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
100 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
101 ee844e20 Iustin Pop
      self.op.debug_level = 0
102 c92b310a Michael Hanselmann
103 6fd35c4d Michael Hanselmann
    # Tasklets
104 3a012b41 Michael Hanselmann
    self.tasklets = None
105 6fd35c4d Michael Hanselmann
106 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
107 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
108 a8083063 Iustin Pop
      if attr_val is None:
109 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
110 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
111 6fd35c4d Michael Hanselmann
112 4be4691d Iustin Pop
    self.CheckArguments()
113 a8083063 Iustin Pop
114 c92b310a Michael Hanselmann
  def __GetSSH(self):
115 c92b310a Michael Hanselmann
    """Returns the SshRunner object
116 c92b310a Michael Hanselmann

117 c92b310a Michael Hanselmann
    """
118 c92b310a Michael Hanselmann
    if not self.__ssh:
119 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
120 c92b310a Michael Hanselmann
    return self.__ssh
121 c92b310a Michael Hanselmann
122 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
123 c92b310a Michael Hanselmann
124 4be4691d Iustin Pop
  def CheckArguments(self):
125 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
126 4be4691d Iustin Pop

127 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
128 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
129 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
130 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
133 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
134 4be4691d Iustin Pop
        waited for them)
135 4be4691d Iustin Pop

136 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
137 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
138 4be4691d Iustin Pop

139 4be4691d Iustin Pop
    """
140 4be4691d Iustin Pop
    pass
141 4be4691d Iustin Pop
142 d465bdc8 Guido Trotter
  def ExpandNames(self):
143 d465bdc8 Guido Trotter
    """Expand names for this LU.
144 d465bdc8 Guido Trotter

145 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
146 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
147 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
148 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
149 d465bdc8 Guido Trotter

150 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
151 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
152 d465bdc8 Guido Trotter
    as values. Rules:
153 e4376078 Iustin Pop

154 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
155 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
156 e4376078 Iustin Pop
      - don't put anything for the BGL level
157 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
158 d465bdc8 Guido Trotter

159 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
160 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
161 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
162 3977a4c1 Guido Trotter

163 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
164 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
165 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
166 6fd35c4d Michael Hanselmann

167 e4376078 Iustin Pop
    Examples::
168 e4376078 Iustin Pop

169 e4376078 Iustin Pop
      # Acquire all nodes and one instance
170 e4376078 Iustin Pop
      self.needed_locks = {
171 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
172 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
173 e4376078 Iustin Pop
      }
174 e4376078 Iustin Pop
      # Acquire just two nodes
175 e4376078 Iustin Pop
      self.needed_locks = {
176 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
177 e4376078 Iustin Pop
      }
178 e4376078 Iustin Pop
      # Acquire no locks
179 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
180 d465bdc8 Guido Trotter

181 d465bdc8 Guido Trotter
    """
182 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
183 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
184 d465bdc8 Guido Trotter
    # time.
185 d465bdc8 Guido Trotter
    if self.REQ_BGL:
186 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
187 d465bdc8 Guido Trotter
    else:
188 d465bdc8 Guido Trotter
      raise NotImplementedError
189 d465bdc8 Guido Trotter
190 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
191 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
194 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
195 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
196 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
197 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
198 fb8dcb62 Guido Trotter
    default it does nothing.
199 fb8dcb62 Guido Trotter

200 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
201 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
202 fb8dcb62 Guido Trotter

203 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
204 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
205 fb8dcb62 Guido Trotter

206 fb8dcb62 Guido Trotter
    """
207 fb8dcb62 Guido Trotter
208 a8083063 Iustin Pop
  def CheckPrereq(self):
209 a8083063 Iustin Pop
    """Check prerequisites for this LU.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
212 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
213 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
214 a8083063 Iustin Pop
    allowed.
215 a8083063 Iustin Pop

216 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
217 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
220 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    """
223 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
224 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
225 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
226 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
227 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
228 6fd35c4d Michael Hanselmann
    else:
229 6fd35c4d Michael Hanselmann
      raise NotImplementedError
230 a8083063 Iustin Pop
231 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
232 a8083063 Iustin Pop
    """Execute the LU.
233 a8083063 Iustin Pop

234 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
235 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
236 a8083063 Iustin Pop
    code, or expected.
237 a8083063 Iustin Pop

238 a8083063 Iustin Pop
    """
239 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
240 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
241 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
242 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
243 6fd35c4d Michael Hanselmann
    else:
244 6fd35c4d Michael Hanselmann
      raise NotImplementedError
245 a8083063 Iustin Pop
246 a8083063 Iustin Pop
  def BuildHooksEnv(self):
247 a8083063 Iustin Pop
    """Build hooks environment for this LU.
248 a8083063 Iustin Pop

249 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
250 a8083063 Iustin Pop
    containing the environment that will be used for running the
251 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
252 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
253 a8083063 Iustin Pop
    the hook should run after the execution.
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
256 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
257 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
258 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
259 a8083063 Iustin Pop

260 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
261 a8083063 Iustin Pop

262 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
263 a8083063 Iustin Pop
    not be called.
264 a8083063 Iustin Pop

265 a8083063 Iustin Pop
    """
266 a8083063 Iustin Pop
    raise NotImplementedError
267 a8083063 Iustin Pop
268 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
269 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
270 1fce5219 Guido Trotter

271 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
272 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
273 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
274 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
275 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
276 1fce5219 Guido Trotter

277 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
278 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
279 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
280 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
281 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
282 e4376078 Iustin Pop
        in the PRE phase
283 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
284 e4376078 Iustin Pop
        and hook results
285 1fce5219 Guido Trotter

286 1fce5219 Guido Trotter
    """
287 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
288 2d54e29c Iustin Pop
    # be a function warnings
289 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
290 1fce5219 Guido Trotter
    return lu_result
291 1fce5219 Guido Trotter
292 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
293 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
294 43905206 Guido Trotter

295 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
296 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
297 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
298 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
299 43905206 Guido Trotter
    before.
300 43905206 Guido Trotter

301 43905206 Guido Trotter
    """
302 43905206 Guido Trotter
    if self.needed_locks is None:
303 43905206 Guido Trotter
      self.needed_locks = {}
304 43905206 Guido Trotter
    else:
305 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
306 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
307 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
308 cf26a87a Iustin Pop
                                                self.op.instance_name)
309 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
310 43905206 Guido Trotter
311 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
312 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
313 c4a2fee1 Guido Trotter

314 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
315 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
316 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
317 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
318 c4a2fee1 Guido Trotter

319 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
320 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
321 c4a2fee1 Guido Trotter

322 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
323 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
324 c4a2fee1 Guido Trotter

325 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
326 c4a2fee1 Guido Trotter

327 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
328 e4376078 Iustin Pop
        self._LockInstancesNodes()
329 c4a2fee1 Guido Trotter

330 a82ce292 Guido Trotter
    @type primary_only: boolean
331 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
332 a82ce292 Guido Trotter

333 c4a2fee1 Guido Trotter
    """
334 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
335 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
336 c4a2fee1 Guido Trotter
337 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
338 c4a2fee1 Guido Trotter
339 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
340 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
341 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
342 c4a2fee1 Guido Trotter
    wanted_nodes = []
343 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
344 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
345 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
346 a82ce292 Guido Trotter
      if not primary_only:
347 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
348 9513b6ab Guido Trotter
349 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
350 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
351 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
352 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
353 c4a2fee1 Guido Trotter
354 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
355 c4a2fee1 Guido Trotter
356 a8083063 Iustin Pop
357 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
358 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
359 a8083063 Iustin Pop

360 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
361 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
362 a8083063 Iustin Pop

363 a8083063 Iustin Pop
  """
364 a8083063 Iustin Pop
  HPATH = None
365 a8083063 Iustin Pop
  HTYPE = None
366 a8083063 Iustin Pop
367 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
368 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
369 fc8a6b8f Iustin Pop

370 fc8a6b8f Iustin Pop
    This just raises an error.
371 fc8a6b8f Iustin Pop

372 fc8a6b8f Iustin Pop
    """
373 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
374 fc8a6b8f Iustin Pop
375 a8083063 Iustin Pop
376 9a6800e1 Michael Hanselmann
class Tasklet:
377 9a6800e1 Michael Hanselmann
  """Tasklet base class.
378 9a6800e1 Michael Hanselmann

379 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
380 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
381 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
384 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
385 9a6800e1 Michael Hanselmann
    - Implement Exec
386 9a6800e1 Michael Hanselmann

387 9a6800e1 Michael Hanselmann
  """
388 464243a7 Michael Hanselmann
  def __init__(self, lu):
389 464243a7 Michael Hanselmann
    self.lu = lu
390 464243a7 Michael Hanselmann
391 464243a7 Michael Hanselmann
    # Shortcuts
392 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
393 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
394 464243a7 Michael Hanselmann
395 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
396 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
397 9a6800e1 Michael Hanselmann

398 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
399 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
400 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
401 9a6800e1 Michael Hanselmann

402 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
403 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
404 9a6800e1 Michael Hanselmann

405 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
406 9a6800e1 Michael Hanselmann
    hasn't been done before.
407 9a6800e1 Michael Hanselmann

408 9a6800e1 Michael Hanselmann
    """
409 9a6800e1 Michael Hanselmann
    raise NotImplementedError
410 9a6800e1 Michael Hanselmann
411 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
412 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
413 9a6800e1 Michael Hanselmann

414 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
415 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
416 9a6800e1 Michael Hanselmann
    expected.
417 9a6800e1 Michael Hanselmann

418 9a6800e1 Michael Hanselmann
    """
419 9a6800e1 Michael Hanselmann
    raise NotImplementedError
420 9a6800e1 Michael Hanselmann
421 9a6800e1 Michael Hanselmann
422 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
423 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
424 83120a01 Michael Hanselmann

425 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
426 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
427 e4376078 Iustin Pop
  @type nodes: list
428 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
429 e4376078 Iustin Pop
  @rtype: list
430 e4376078 Iustin Pop
  @return: the list of nodes, sorted
431 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
432 83120a01 Michael Hanselmann

433 83120a01 Michael Hanselmann
  """
434 3312b702 Iustin Pop
  if not isinstance(nodes, list):
435 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
436 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
437 dcb93971 Michael Hanselmann
438 ea47808a Guido Trotter
  if not nodes:
439 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
440 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
441 dcb93971 Michael Hanselmann
442 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
443 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
444 3312b702 Iustin Pop
445 3312b702 Iustin Pop
446 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
447 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
448 3312b702 Iustin Pop

449 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
450 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
451 e4376078 Iustin Pop
  @type instances: list
452 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
453 e4376078 Iustin Pop
  @rtype: list
454 e4376078 Iustin Pop
  @return: the list of instances, sorted
455 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
456 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
457 3312b702 Iustin Pop

458 3312b702 Iustin Pop
  """
459 3312b702 Iustin Pop
  if not isinstance(instances, list):
460 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
461 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
462 3312b702 Iustin Pop
463 3312b702 Iustin Pop
  if instances:
464 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
465 3312b702 Iustin Pop
  else:
466 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
467 a7f5dc98 Iustin Pop
  return wanted
468 dcb93971 Michael Hanselmann
469 dcb93971 Michael Hanselmann
470 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
471 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
472 83120a01 Michael Hanselmann

473 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
474 31bf511f Iustin Pop
  @param static: static fields set
475 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
476 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
477 83120a01 Michael Hanselmann

478 83120a01 Michael Hanselmann
  """
479 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
480 31bf511f Iustin Pop
  f.Extend(static)
481 31bf511f Iustin Pop
  f.Extend(dynamic)
482 dcb93971 Michael Hanselmann
483 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
484 31bf511f Iustin Pop
  if delta:
485 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
486 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
487 dcb93971 Michael Hanselmann
488 dcb93971 Michael Hanselmann
489 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
490 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
491 a5961235 Iustin Pop

492 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
493 a5961235 Iustin Pop
  or None (but that it always exists).
494 a5961235 Iustin Pop

495 a5961235 Iustin Pop
  """
496 a5961235 Iustin Pop
  val = getattr(op, name, None)
497 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
498 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
499 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
500 a5961235 Iustin Pop
  setattr(op, name, val)
501 a5961235 Iustin Pop
502 a5961235 Iustin Pop
503 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
504 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
505 7736a5f2 Iustin Pop

506 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
507 7736a5f2 Iustin Pop
  global params.
508 7736a5f2 Iustin Pop

509 7736a5f2 Iustin Pop
  """
510 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
511 7736a5f2 Iustin Pop
  if used_globals:
512 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
513 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
514 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
515 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
516 7736a5f2 Iustin Pop
517 7736a5f2 Iustin Pop
518 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
519 a5961235 Iustin Pop
  """Ensure that a given node is online.
520 a5961235 Iustin Pop

521 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
522 a5961235 Iustin Pop
  @param node: the node to check
523 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
524 a5961235 Iustin Pop

525 a5961235 Iustin Pop
  """
526 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
527 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
528 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
529 a5961235 Iustin Pop
530 a5961235 Iustin Pop
531 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
532 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
533 733a2b6a Iustin Pop

534 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
535 733a2b6a Iustin Pop
  @param node: the node to check
536 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
537 733a2b6a Iustin Pop

538 733a2b6a Iustin Pop
  """
539 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
540 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
541 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
542 733a2b6a Iustin Pop
543 733a2b6a Iustin Pop
544 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
545 cf26a87a Iustin Pop
  """Expand an item name.
546 cf26a87a Iustin Pop

547 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
548 cf26a87a Iustin Pop
  @param name: requested item name
549 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
550 cf26a87a Iustin Pop
  @return: the resolved (full) name
551 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
552 cf26a87a Iustin Pop

553 cf26a87a Iustin Pop
  """
554 cf26a87a Iustin Pop
  full_name = fn(name)
555 cf26a87a Iustin Pop
  if full_name is None:
556 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
557 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
558 cf26a87a Iustin Pop
  return full_name
559 cf26a87a Iustin Pop
560 cf26a87a Iustin Pop
561 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
562 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
563 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
564 cf26a87a Iustin Pop
565 cf26a87a Iustin Pop
566 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
567 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
568 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
569 cf26a87a Iustin Pop
570 cf26a87a Iustin Pop
571 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
572 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
573 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
574 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
575 e4376078 Iustin Pop

576 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
577 e4376078 Iustin Pop

578 e4376078 Iustin Pop
  @type name: string
579 e4376078 Iustin Pop
  @param name: the name of the instance
580 e4376078 Iustin Pop
  @type primary_node: string
581 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
582 e4376078 Iustin Pop
  @type secondary_nodes: list
583 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
584 e4376078 Iustin Pop
  @type os_type: string
585 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
586 0d68c45d Iustin Pop
  @type status: boolean
587 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
588 e4376078 Iustin Pop
  @type memory: string
589 e4376078 Iustin Pop
  @param memory: the memory size of the instance
590 e4376078 Iustin Pop
  @type vcpus: string
591 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
592 e4376078 Iustin Pop
  @type nics: list
593 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
594 5e3d3eb3 Guido Trotter
      the NICs the instance has
595 2c2690c9 Iustin Pop
  @type disk_template: string
596 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
597 2c2690c9 Iustin Pop
  @type disks: list
598 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
599 67fc3042 Iustin Pop
  @type bep: dict
600 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
601 67fc3042 Iustin Pop
  @type hvp: dict
602 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
603 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
604 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
605 e4376078 Iustin Pop
  @rtype: dict
606 e4376078 Iustin Pop
  @return: the hook environment for this instance
607 ecb215b5 Michael Hanselmann

608 396e1b78 Michael Hanselmann
  """
609 0d68c45d Iustin Pop
  if status:
610 0d68c45d Iustin Pop
    str_status = "up"
611 0d68c45d Iustin Pop
  else:
612 0d68c45d Iustin Pop
    str_status = "down"
613 396e1b78 Michael Hanselmann
  env = {
614 0e137c28 Iustin Pop
    "OP_TARGET": name,
615 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
616 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
617 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
618 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
619 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
620 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
621 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
622 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
623 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
624 396e1b78 Michael Hanselmann
  }
625 396e1b78 Michael Hanselmann
626 396e1b78 Michael Hanselmann
  if nics:
627 396e1b78 Michael Hanselmann
    nic_count = len(nics)
628 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
629 396e1b78 Michael Hanselmann
      if ip is None:
630 396e1b78 Michael Hanselmann
        ip = ""
631 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
632 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
633 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
634 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
635 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
636 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
637 396e1b78 Michael Hanselmann
  else:
638 396e1b78 Michael Hanselmann
    nic_count = 0
639 396e1b78 Michael Hanselmann
640 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
641 396e1b78 Michael Hanselmann
642 2c2690c9 Iustin Pop
  if disks:
643 2c2690c9 Iustin Pop
    disk_count = len(disks)
644 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
645 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
646 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
647 2c2690c9 Iustin Pop
  else:
648 2c2690c9 Iustin Pop
    disk_count = 0
649 2c2690c9 Iustin Pop
650 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
651 2c2690c9 Iustin Pop
652 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
653 67fc3042 Iustin Pop
    for key, value in source.items():
654 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
655 67fc3042 Iustin Pop
656 396e1b78 Michael Hanselmann
  return env
657 396e1b78 Michael Hanselmann
658 96acbc09 Michael Hanselmann
659 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
660 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
661 62f0dd02 Guido Trotter

662 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
663 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
664 62f0dd02 Guido Trotter

665 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
666 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
667 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
668 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
669 62f0dd02 Guido Trotter

670 62f0dd02 Guido Trotter
  """
671 62f0dd02 Guido Trotter
  hooks_nics = []
672 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
673 62f0dd02 Guido Trotter
  for nic in nics:
674 62f0dd02 Guido Trotter
    ip = nic.ip
675 62f0dd02 Guido Trotter
    mac = nic.mac
676 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
677 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
678 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
679 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
680 62f0dd02 Guido Trotter
  return hooks_nics
681 396e1b78 Michael Hanselmann
682 96acbc09 Michael Hanselmann
683 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
684 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
685 ecb215b5 Michael Hanselmann

686 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
687 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
688 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
689 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
690 e4376078 Iustin Pop
      environment
691 e4376078 Iustin Pop
  @type override: dict
692 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
693 e4376078 Iustin Pop
      our values
694 e4376078 Iustin Pop
  @rtype: dict
695 e4376078 Iustin Pop
  @return: the hook environment dictionary
696 e4376078 Iustin Pop

697 ecb215b5 Michael Hanselmann
  """
698 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
699 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
700 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
701 396e1b78 Michael Hanselmann
  args = {
702 396e1b78 Michael Hanselmann
    'name': instance.name,
703 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
704 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
705 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
706 0d68c45d Iustin Pop
    'status': instance.admin_up,
707 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
708 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
709 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
710 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
711 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
712 67fc3042 Iustin Pop
    'bep': bep,
713 67fc3042 Iustin Pop
    'hvp': hvp,
714 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
715 396e1b78 Michael Hanselmann
  }
716 396e1b78 Michael Hanselmann
  if override:
717 396e1b78 Michael Hanselmann
    args.update(override)
718 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
719 396e1b78 Michael Hanselmann
720 396e1b78 Michael Hanselmann
721 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
722 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
723 ec0292f1 Iustin Pop

724 ec0292f1 Iustin Pop
  """
725 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
726 ec0292f1 Iustin Pop
  if mod_list:
727 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
728 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
729 ec0292f1 Iustin Pop
    for name in mod_list:
730 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
731 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
732 ec0292f1 Iustin Pop
  if mc_now > mc_max:
733 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
734 ec0292f1 Iustin Pop
               (mc_now, mc_max))
735 ec0292f1 Iustin Pop
736 ec0292f1 Iustin Pop
737 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
738 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
739 6d7e1f20 Guido Trotter

740 6d7e1f20 Guido Trotter
  """
741 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
742 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
743 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
744 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
745 6d7e1f20 Guido Trotter
  return mc_now < mc_should
746 6d7e1f20 Guido Trotter
747 6d7e1f20 Guido Trotter
748 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
749 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
750 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
751 b165e77e Guido Trotter

752 b165e77e Guido Trotter
  """
753 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
754 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
755 b165e77e Guido Trotter
                for nic in target_nics]
756 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
757 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
758 b165e77e Guido Trotter
  if brlist:
759 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
760 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
761 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
762 b165e77e Guido Trotter
763 b165e77e Guido Trotter
764 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
765 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
766 bf6929a2 Alexander Schreiber

767 bf6929a2 Alexander Schreiber
  """
768 b165e77e Guido Trotter
  if node is None:
769 29921401 Iustin Pop
    node = instance.primary_node
770 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
771 bf6929a2 Alexander Schreiber
772 bf6929a2 Alexander Schreiber
773 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
774 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
775 f2c05717 Guido Trotter

776 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
777 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
778 f2c05717 Guido Trotter
  @type name: string
779 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
780 f2c05717 Guido Trotter

781 f2c05717 Guido Trotter
  """
782 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
783 f2c05717 Guido Trotter
    return
784 f2c05717 Guido Trotter
  try:
785 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
786 f2c05717 Guido Trotter
  except IndexError:
787 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
788 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
789 f2c05717 Guido Trotter
790 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
791 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
792 f2c05717 Guido Trotter
793 f2c05717 Guido Trotter
794 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
795 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
796 5ba9701d Michael Hanselmann
797 5ba9701d Michael Hanselmann
798 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
799 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
800 e9721add Michael Hanselmann

801 e9721add Michael Hanselmann
  """
802 e9721add Michael Hanselmann
803 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
804 e9721add Michael Hanselmann
805 e9721add Michael Hanselmann
806 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
807 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
808 80cb875c Michael Hanselmann

809 80cb875c Michael Hanselmann
  """
810 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
811 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
812 80cb875c Michael Hanselmann
813 80cb875c Michael Hanselmann
814 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
815 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
816 692738fc Michael Hanselmann

817 692738fc Michael Hanselmann
  """
818 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
819 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
820 692738fc Michael Hanselmann
821 692738fc Michael Hanselmann
822 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
823 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
824 efb8da02 Michael Hanselmann

825 efb8da02 Michael Hanselmann
  """
826 efb8da02 Michael Hanselmann
  # Special case for file storage
827 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
828 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
829 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
830 efb8da02 Michael Hanselmann
831 efb8da02 Michael Hanselmann
  return []
832 efb8da02 Michael Hanselmann
833 efb8da02 Michael Hanselmann
834 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
835 2d9005d8 Michael Hanselmann
  faulty = []
836 2d9005d8 Michael Hanselmann
837 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
838 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
839 2d9005d8 Michael Hanselmann
840 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
841 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
842 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
843 2d9005d8 Michael Hanselmann
844 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
845 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
846 2d9005d8 Michael Hanselmann
      faulty.append(idx)
847 2d9005d8 Michael Hanselmann
848 2d9005d8 Michael Hanselmann
  return faulty
849 2d9005d8 Michael Hanselmann
850 2d9005d8 Michael Hanselmann
851 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
852 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
853 b5f5fae9 Luca Bigliardi

854 b5f5fae9 Luca Bigliardi
  """
855 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
856 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
857 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
858 b5f5fae9 Luca Bigliardi
859 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
860 b5f5fae9 Luca Bigliardi
    """Build hooks env.
861 b5f5fae9 Luca Bigliardi

862 b5f5fae9 Luca Bigliardi
    """
863 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
864 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
865 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
866 b5f5fae9 Luca Bigliardi
867 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
868 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
869 b5f5fae9 Luca Bigliardi

870 b5f5fae9 Luca Bigliardi
    """
871 b5f5fae9 Luca Bigliardi
    return True
872 b5f5fae9 Luca Bigliardi
873 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
874 b5f5fae9 Luca Bigliardi
    """Nothing to do.
875 b5f5fae9 Luca Bigliardi

876 b5f5fae9 Luca Bigliardi
    """
877 b5f5fae9 Luca Bigliardi
    return True
878 b5f5fae9 Luca Bigliardi
879 b5f5fae9 Luca Bigliardi
880 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
881 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
882 a8083063 Iustin Pop

883 a8083063 Iustin Pop
  """
884 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
885 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
886 a8083063 Iustin Pop
  _OP_REQP = []
887 a8083063 Iustin Pop
888 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
889 b2c750a4 Luca Bigliardi
    """Build hooks env.
890 b2c750a4 Luca Bigliardi

891 b2c750a4 Luca Bigliardi
    """
892 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
893 b2c750a4 Luca Bigliardi
    return env, [], []
894 b2c750a4 Luca Bigliardi
895 a8083063 Iustin Pop
  def CheckPrereq(self):
896 a8083063 Iustin Pop
    """Check prerequisites.
897 a8083063 Iustin Pop

898 a8083063 Iustin Pop
    This checks whether the cluster is empty.
899 a8083063 Iustin Pop

900 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
901 a8083063 Iustin Pop

902 a8083063 Iustin Pop
    """
903 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
904 a8083063 Iustin Pop
905 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
906 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
907 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
908 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
909 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
910 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
911 db915bd1 Michael Hanselmann
    if instancelist:
912 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
913 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
914 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
915 a8083063 Iustin Pop
916 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
917 a8083063 Iustin Pop
    """Destroys the cluster.
918 a8083063 Iustin Pop

919 a8083063 Iustin Pop
    """
920 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
921 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
922 3141ad3b Luca Bigliardi
923 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
924 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
925 3141ad3b Luca Bigliardi
    try:
926 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
927 3141ad3b Luca Bigliardi
    except:
928 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
929 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
930 3141ad3b Luca Bigliardi
931 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
932 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
933 b989b9d9 Ken Wehr
934 b989b9d9 Ken Wehr
    if modify_ssh_setup:
935 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
936 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
937 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
938 b989b9d9 Ken Wehr
939 140aa4a8 Iustin Pop
    return master
940 a8083063 Iustin Pop
941 a8083063 Iustin Pop
942 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
943 a8083063 Iustin Pop
  """Verifies the cluster status.
944 a8083063 Iustin Pop

945 a8083063 Iustin Pop
  """
946 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
947 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
948 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
949 d4b9d97f Guido Trotter
  REQ_BGL = False
950 d4b9d97f Guido Trotter
951 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
952 7c874ee1 Iustin Pop
  TNODE = "node"
953 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
954 7c874ee1 Iustin Pop
955 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
956 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
957 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
958 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
959 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
960 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
961 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
962 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
963 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
964 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
965 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
966 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
967 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
968 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
969 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
970 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
971 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
972 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
973 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
974 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
975 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
976 7c874ee1 Iustin Pop
977 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
978 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
979 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
980 a0c9776a Iustin Pop
981 d4b9d97f Guido Trotter
  def ExpandNames(self):
982 d4b9d97f Guido Trotter
    self.needed_locks = {
983 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
984 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
985 d4b9d97f Guido Trotter
    }
986 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
987 a8083063 Iustin Pop
988 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
989 7c874ee1 Iustin Pop
    """Format an error message.
990 7c874ee1 Iustin Pop

991 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
992 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
993 7c874ee1 Iustin Pop

994 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
995 7c874ee1 Iustin Pop

996 7c874ee1 Iustin Pop
    """
997 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
998 7c874ee1 Iustin Pop
    itype, etxt = ecode
999 7c874ee1 Iustin Pop
    # first complete the msg
1000 7c874ee1 Iustin Pop
    if args:
1001 7c874ee1 Iustin Pop
      msg = msg % args
1002 7c874ee1 Iustin Pop
    # then format the whole message
1003 7c874ee1 Iustin Pop
    if self.op.error_codes:
1004 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1005 7c874ee1 Iustin Pop
    else:
1006 7c874ee1 Iustin Pop
      if item:
1007 7c874ee1 Iustin Pop
        item = " " + item
1008 7c874ee1 Iustin Pop
      else:
1009 7c874ee1 Iustin Pop
        item = ""
1010 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1011 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1012 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1013 7c874ee1 Iustin Pop
1014 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1015 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1016 a0c9776a Iustin Pop

1017 a0c9776a Iustin Pop
    """
1018 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1019 a0c9776a Iustin Pop
    if cond:
1020 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1021 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1022 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1023 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1024 a0c9776a Iustin Pop
1025 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
1026 7c874ee1 Iustin Pop
                  node_result, master_files, drbd_map, vg_name):
1027 a8083063 Iustin Pop
    """Run multiple tests against a node.
1028 a8083063 Iustin Pop

1029 112f18a5 Iustin Pop
    Test list:
1030 e4376078 Iustin Pop

1031 a8083063 Iustin Pop
      - compares ganeti version
1032 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1033 a8083063 Iustin Pop
      - checks config file checksum
1034 a8083063 Iustin Pop
      - checks ssh to other nodes
1035 a8083063 Iustin Pop

1036 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
1037 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
1038 e4376078 Iustin Pop
    @param file_list: required list of files
1039 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1040 e4376078 Iustin Pop
    @param node_result: the results from the node
1041 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
1042 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
1043 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
1044 6d2e83d5 Iustin Pop
        and their running status
1045 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
1046 098c0958 Michael Hanselmann

1047 a8083063 Iustin Pop
    """
1048 112f18a5 Iustin Pop
    node = nodeinfo.name
1049 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1050 25361b9a Iustin Pop
1051 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
1052 a0c9776a Iustin Pop
    test = not node_result or not isinstance(node_result, dict)
1053 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1054 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1055 a0c9776a Iustin Pop
    if test:
1056 a0c9776a Iustin Pop
      return
1057 25361b9a Iustin Pop
1058 a8083063 Iustin Pop
    # compares ganeti version
1059 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1060 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
1061 a0c9776a Iustin Pop
    test = not (remote_version and
1062 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1063 a0c9776a Iustin Pop
                len(remote_version) == 2)
1064 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1065 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1066 a0c9776a Iustin Pop
    if test:
1067 a0c9776a Iustin Pop
      return
1068 a0c9776a Iustin Pop
1069 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1070 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1071 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1072 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1073 a0c9776a Iustin Pop
    if test:
1074 a0c9776a Iustin Pop
      return
1075 a8083063 Iustin Pop
1076 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1077 a8083063 Iustin Pop
1078 e9ce0a64 Iustin Pop
    # full package version
1079 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1080 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1081 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1082 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1083 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1084 e9ce0a64 Iustin Pop
1085 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
1086 cc9e1230 Guido Trotter
    if vg_name is not None:
1087 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
1088 a0c9776a Iustin Pop
      test = not vglist
1089 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1090 a0c9776a Iustin Pop
      if not test:
1091 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1092 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
1093 a0c9776a Iustin Pop
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1094 a8083063 Iustin Pop
1095 a8083063 Iustin Pop
    # checks config file checksum
1096 a8083063 Iustin Pop
1097 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1098 a0c9776a Iustin Pop
    test = not isinstance(remote_cksum, dict)
1099 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1100 a0c9776a Iustin Pop
             "node hasn't returned file checksum data")
1101 a0c9776a Iustin Pop
    if not test:
1102 a8083063 Iustin Pop
      for file_name in file_list:
1103 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
1104 a0c9776a Iustin Pop
        must_have = (file_name not in master_files) or node_is_mc
1105 a0c9776a Iustin Pop
        # missing
1106 a0c9776a Iustin Pop
        test1 = file_name not in remote_cksum
1107 a0c9776a Iustin Pop
        # invalid checksum
1108 a0c9776a Iustin Pop
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1109 a0c9776a Iustin Pop
        # existing and good
1110 a0c9776a Iustin Pop
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1111 a0c9776a Iustin Pop
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1112 a0c9776a Iustin Pop
                 "file '%s' missing", file_name)
1113 a0c9776a Iustin Pop
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1114 a0c9776a Iustin Pop
                 "file '%s' has wrong checksum", file_name)
1115 a0c9776a Iustin Pop
        # not candidate and this is not a must-have file
1116 a0c9776a Iustin Pop
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1117 a0c9776a Iustin Pop
                 "file '%s' should not exist on non master"
1118 a0c9776a Iustin Pop
                 " candidates (and the file is outdated)", file_name)
1119 a0c9776a Iustin Pop
        # all good, except non-master/non-must have combination
1120 a0c9776a Iustin Pop
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1121 a0c9776a Iustin Pop
                 "file '%s' should not exist"
1122 a0c9776a Iustin Pop
                 " on non master candidates", file_name)
1123 a8083063 Iustin Pop
1124 25361b9a Iustin Pop
    # checks ssh to any
1125 25361b9a Iustin Pop
1126 a0c9776a Iustin Pop
    test = constants.NV_NODELIST not in node_result
1127 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1128 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1129 a0c9776a Iustin Pop
    if not test:
1130 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
1131 7c874ee1 Iustin Pop
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1132 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1133 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1134 25361b9a Iustin Pop
1135 a0c9776a Iustin Pop
    test = constants.NV_NODENETTEST not in node_result
1136 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1137 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1138 a0c9776a Iustin Pop
    if not test:
1139 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
1140 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1141 7c874ee1 Iustin Pop
        for anode in nlist:
1142 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1143 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1144 a0c9776a Iustin Pop
                   anode, node_result[constants.NV_NODENETTEST][anode])
1145 9d4bfc96 Iustin Pop
1146 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1147 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
1148 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1149 a0c9776a Iustin Pop
        test = hv_result is not None
1150 a0c9776a Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1151 a0c9776a Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1152 6d2e83d5 Iustin Pop
1153 6d2e83d5 Iustin Pop
    # check used drbd list
1154 cc9e1230 Guido Trotter
    if vg_name is not None:
1155 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1156 a0c9776a Iustin Pop
      test = not isinstance(used_minors, (tuple, list))
1157 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1158 a0c9776a Iustin Pop
               "cannot parse drbd status file: %s", str(used_minors))
1159 a0c9776a Iustin Pop
      if not test:
1160 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
1161 a0c9776a Iustin Pop
          test = minor not in used_minors and must_exist
1162 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1163 a0c9776a Iustin Pop
                   "drbd minor %d of instance %s is not active",
1164 a0c9776a Iustin Pop
                   minor, iname)
1165 cc9e1230 Guido Trotter
        for minor in used_minors:
1166 a0c9776a Iustin Pop
          test = minor not in drbd_map
1167 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1168 a0c9776a Iustin Pop
                   "unallocated drbd minor %d is in use", minor)
1169 7c0aa8e9 Iustin Pop
    test = node_result.get(constants.NV_NODESETUP,
1170 7c0aa8e9 Iustin Pop
                           ["Missing NODESETUP results"])
1171 7c0aa8e9 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1172 7c0aa8e9 Iustin Pop
             "; ".join(test))
1173 a8083063 Iustin Pop
1174 d091393e Iustin Pop
    # check pv names
1175 d091393e Iustin Pop
    if vg_name is not None:
1176 d091393e Iustin Pop
      pvlist = node_result.get(constants.NV_PVLIST, None)
1177 d091393e Iustin Pop
      test = pvlist is None
1178 d091393e Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1179 d091393e Iustin Pop
      if not test:
1180 d091393e Iustin Pop
        # check that ':' is not present in PV names, since it's a
1181 d091393e Iustin Pop
        # special character for lvcreate (denotes the range of PEs to
1182 d091393e Iustin Pop
        # use on the PV)
1183 1122eb25 Iustin Pop
        for _, pvname, owner_vg in pvlist:
1184 d091393e Iustin Pop
          test = ":" in pvname
1185 d091393e Iustin Pop
          _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1186 d091393e Iustin Pop
                   " '%s' of VG '%s'", pvname, owner_vg)
1187 d091393e Iustin Pop
1188 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1189 7c874ee1 Iustin Pop
                      node_instance, n_offline):
1190 a8083063 Iustin Pop
    """Verify an instance.
1191 a8083063 Iustin Pop

1192 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1193 a8083063 Iustin Pop
    available on the instance's node.
1194 a8083063 Iustin Pop

1195 a8083063 Iustin Pop
    """
1196 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1197 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1198 a8083063 Iustin Pop
1199 a8083063 Iustin Pop
    node_vol_should = {}
1200 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1201 a8083063 Iustin Pop
1202 a8083063 Iustin Pop
    for node in node_vol_should:
1203 0a66c968 Iustin Pop
      if node in n_offline:
1204 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
1205 0a66c968 Iustin Pop
        continue
1206 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1207 a0c9776a Iustin Pop
        test = node not in node_vol_is or volume not in node_vol_is[node]
1208 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1209 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1210 a8083063 Iustin Pop
1211 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1212 a0c9776a Iustin Pop
      test = ((node_current not in node_instance or
1213 a0c9776a Iustin Pop
               not instance in node_instance[node_current]) and
1214 a0c9776a Iustin Pop
              node_current not in n_offline)
1215 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1216 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1217 a0c9776a Iustin Pop
               node_current)
1218 a8083063 Iustin Pop
1219 a8083063 Iustin Pop
    for node in node_instance:
1220 a8083063 Iustin Pop
      if (not node == node_current):
1221 a0c9776a Iustin Pop
        test = instance in node_instance[node]
1222 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1223 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1224 a8083063 Iustin Pop
1225 7c874ee1 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1226 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1227 a8083063 Iustin Pop

1228 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1229 a8083063 Iustin Pop
    reported as unknown.
1230 a8083063 Iustin Pop

1231 a8083063 Iustin Pop
    """
1232 a8083063 Iustin Pop
    for node in node_vol_is:
1233 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
1234 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1235 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1236 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1237 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1238 a8083063 Iustin Pop
1239 7c874ee1 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1240 a8083063 Iustin Pop
    """Verify the list of running instances.
1241 a8083063 Iustin Pop

1242 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1243 a8083063 Iustin Pop

1244 a8083063 Iustin Pop
    """
1245 a8083063 Iustin Pop
    for node in node_instance:
1246 7c874ee1 Iustin Pop
      for o_inst in node_instance[node]:
1247 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1248 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1249 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1250 a8083063 Iustin Pop
1251 7c874ee1 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1252 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1253 2b3b6ddd Guido Trotter

1254 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
1255 2b3b6ddd Guido Trotter
    was primary for.
1256 2b3b6ddd Guido Trotter

1257 2b3b6ddd Guido Trotter
    """
1258 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1259 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1260 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1261 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1262 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1263 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1264 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1265 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1266 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1267 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1268 2b3b6ddd Guido Trotter
        needed_mem = 0
1269 2b3b6ddd Guido Trotter
        for instance in instances:
1270 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1271 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1272 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1273 a0c9776a Iustin Pop
        test = nodeinfo['mfree'] < needed_mem
1274 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1275 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1276 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1277 2b3b6ddd Guido Trotter
1278 a8083063 Iustin Pop
  def CheckPrereq(self):
1279 a8083063 Iustin Pop
    """Check prerequisites.
1280 a8083063 Iustin Pop

1281 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1282 e54c4c5e Guido Trotter
    all its members are valid.
1283 a8083063 Iustin Pop

1284 a8083063 Iustin Pop
    """
1285 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1286 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1287 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1288 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1289 a8083063 Iustin Pop
1290 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1291 d8fff41c Guido Trotter
    """Build hooks env.
1292 d8fff41c Guido Trotter

1293 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1294 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1295 d8fff41c Guido Trotter

1296 d8fff41c Guido Trotter
    """
1297 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1298 35e994e9 Iustin Pop
    env = {
1299 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1300 35e994e9 Iustin Pop
      }
1301 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1302 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1303 35e994e9 Iustin Pop
1304 d8fff41c Guido Trotter
    return env, [], all_nodes
1305 d8fff41c Guido Trotter
1306 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1307 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1308 a8083063 Iustin Pop

1309 a8083063 Iustin Pop
    """
1310 a0c9776a Iustin Pop
    self.bad = False
1311 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1312 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1313 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1314 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1315 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1316 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1317 a8083063 Iustin Pop
1318 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1319 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1320 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1321 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1322 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1323 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1324 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1325 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1326 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1327 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1328 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1329 a8083063 Iustin Pop
    node_volume = {}
1330 a8083063 Iustin Pop
    node_instance = {}
1331 9c9c7d30 Guido Trotter
    node_info = {}
1332 26b6af5e Guido Trotter
    instance_cfg = {}
1333 a8083063 Iustin Pop
1334 a8083063 Iustin Pop
    # FIXME: verify OS list
1335 a8083063 Iustin Pop
    # do local checksums
1336 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1337 112f18a5 Iustin Pop
1338 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1339 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1340 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1341 112f18a5 Iustin Pop
    file_names.extend(master_files)
1342 112f18a5 Iustin Pop
1343 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1346 a8083063 Iustin Pop
    node_verify_param = {
1347 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1348 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1349 82e37788 Iustin Pop
                              if not node.offline],
1350 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1351 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1352 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1353 82e37788 Iustin Pop
                                 if not node.offline],
1354 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1355 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1356 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1357 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1358 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1359 a8083063 Iustin Pop
      }
1360 313b2dd4 Michael Hanselmann
1361 cc9e1230 Guido Trotter
    if vg_name is not None:
1362 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1363 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1364 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1365 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1366 313b2dd4 Michael Hanselmann
1367 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1368 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1369 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1370 313b2dd4 Michael Hanselmann
    # window.
1371 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1372 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1373 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1374 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1375 a8083063 Iustin Pop
1376 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1377 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1378 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1379 6d2e83d5 Iustin Pop
1380 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1381 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1382 112f18a5 Iustin Pop
      node = node_i.name
1383 25361b9a Iustin Pop
1384 0a66c968 Iustin Pop
      if node_i.offline:
1385 7c874ee1 Iustin Pop
        if verbose:
1386 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1387 0a66c968 Iustin Pop
        n_offline.append(node)
1388 0a66c968 Iustin Pop
        continue
1389 0a66c968 Iustin Pop
1390 112f18a5 Iustin Pop
      if node == master_node:
1391 25361b9a Iustin Pop
        ntype = "master"
1392 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1393 25361b9a Iustin Pop
        ntype = "master candidate"
1394 22f0f71d Iustin Pop
      elif node_i.drained:
1395 22f0f71d Iustin Pop
        ntype = "drained"
1396 22f0f71d Iustin Pop
        n_drained.append(node)
1397 112f18a5 Iustin Pop
      else:
1398 25361b9a Iustin Pop
        ntype = "regular"
1399 7c874ee1 Iustin Pop
      if verbose:
1400 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1401 25361b9a Iustin Pop
1402 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1403 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1404 6f68a739 Iustin Pop
      if msg:
1405 25361b9a Iustin Pop
        continue
1406 25361b9a Iustin Pop
1407 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1408 6d2e83d5 Iustin Pop
      node_drbd = {}
1409 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1410 a0c9776a Iustin Pop
        test = instance not in instanceinfo
1411 a0c9776a Iustin Pop
        _ErrorIf(test, self.ECLUSTERCFG, None,
1412 a0c9776a Iustin Pop
                 "ghost instance '%s' in temporary DRBD map", instance)
1413 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1414 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1415 c614e5fb Iustin Pop
          # unallocated minor in use)
1416 a0c9776a Iustin Pop
        if test:
1417 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1418 c614e5fb Iustin Pop
        else:
1419 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1420 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1421 313b2dd4 Michael Hanselmann
1422 a0c9776a Iustin Pop
      self._VerifyNode(node_i, file_names, local_checksums,
1423 a0c9776a Iustin Pop
                       nresult, master_files, node_drbd, vg_name)
1424 a8083063 Iustin Pop
1425 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1426 cc9e1230 Guido Trotter
      if vg_name is None:
1427 cc9e1230 Guido Trotter
        node_volume[node] = {}
1428 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1429 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1430 a0c9776a Iustin Pop
                 utils.SafeEncode(lvdata))
1431 b63ed789 Iustin Pop
        node_volume[node] = {}
1432 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1433 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1434 a8083063 Iustin Pop
        continue
1435 b63ed789 Iustin Pop
      else:
1436 25361b9a Iustin Pop
        node_volume[node] = lvdata
1437 a8083063 Iustin Pop
1438 a8083063 Iustin Pop
      # node_instance
1439 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1440 a0c9776a Iustin Pop
      test = not isinstance(idata, list)
1441 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node,
1442 a0c9776a Iustin Pop
               "rpc call to node failed (instancelist)")
1443 a0c9776a Iustin Pop
      if test:
1444 a8083063 Iustin Pop
        continue
1445 a8083063 Iustin Pop
1446 25361b9a Iustin Pop
      node_instance[node] = idata
1447 a8083063 Iustin Pop
1448 9c9c7d30 Guido Trotter
      # node_info
1449 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1450 a0c9776a Iustin Pop
      test = not isinstance(nodeinfo, dict)
1451 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1452 a0c9776a Iustin Pop
      if test:
1453 9c9c7d30 Guido Trotter
        continue
1454 9c9c7d30 Guido Trotter
1455 313b2dd4 Michael Hanselmann
      # Node time
1456 313b2dd4 Michael Hanselmann
      ntime = nresult.get(constants.NV_TIME, None)
1457 313b2dd4 Michael Hanselmann
      try:
1458 313b2dd4 Michael Hanselmann
        ntime_merged = utils.MergeTime(ntime)
1459 313b2dd4 Michael Hanselmann
      except (ValueError, TypeError):
1460 30bb62ea Michael Hanselmann
        _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1461 313b2dd4 Michael Hanselmann
1462 313b2dd4 Michael Hanselmann
      if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1463 313b2dd4 Michael Hanselmann
        ntime_diff = abs(nvinfo_starttime - ntime_merged)
1464 313b2dd4 Michael Hanselmann
      elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1465 313b2dd4 Michael Hanselmann
        ntime_diff = abs(ntime_merged - nvinfo_endtime)
1466 313b2dd4 Michael Hanselmann
      else:
1467 313b2dd4 Michael Hanselmann
        ntime_diff = None
1468 313b2dd4 Michael Hanselmann
1469 313b2dd4 Michael Hanselmann
      _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1470 313b2dd4 Michael Hanselmann
               "Node time diverges by at least %0.1fs from master node time",
1471 313b2dd4 Michael Hanselmann
               ntime_diff)
1472 313b2dd4 Michael Hanselmann
1473 313b2dd4 Michael Hanselmann
      if ntime_diff is not None:
1474 313b2dd4 Michael Hanselmann
        continue
1475 313b2dd4 Michael Hanselmann
1476 9c9c7d30 Guido Trotter
      try:
1477 9c9c7d30 Guido Trotter
        node_info[node] = {
1478 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1479 93e4c50b Guido Trotter
          "pinst": [],
1480 93e4c50b Guido Trotter
          "sinst": [],
1481 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1482 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1483 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1484 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1485 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1486 36e7da50 Guido Trotter
          # secondary.
1487 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1488 9c9c7d30 Guido Trotter
        }
1489 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1490 cc9e1230 Guido Trotter
        if vg_name is not None:
1491 a0c9776a Iustin Pop
          test = (constants.NV_VGLIST not in nresult or
1492 a0c9776a Iustin Pop
                  vg_name not in nresult[constants.NV_VGLIST])
1493 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODELVM, node,
1494 a0c9776a Iustin Pop
                   "node didn't return data for the volume group '%s'"
1495 a0c9776a Iustin Pop
                   " - it is either missing or broken", vg_name)
1496 a0c9776a Iustin Pop
          if test:
1497 9a198532 Iustin Pop
            continue
1498 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1499 9a198532 Iustin Pop
      except (ValueError, KeyError):
1500 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1501 a0c9776a Iustin Pop
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1502 9c9c7d30 Guido Trotter
        continue
1503 9c9c7d30 Guido Trotter
1504 a8083063 Iustin Pop
    node_vol_should = {}
1505 a8083063 Iustin Pop
1506 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1507 a8083063 Iustin Pop
    for instance in instancelist:
1508 7c874ee1 Iustin Pop
      if verbose:
1509 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1510 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1511 a0c9776a Iustin Pop
      self._VerifyInstance(instance, inst_config, node_volume,
1512 a0c9776a Iustin Pop
                           node_instance, n_offline)
1513 832261fd Iustin Pop
      inst_nodes_offline = []
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1516 a8083063 Iustin Pop
1517 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1518 26b6af5e Guido Trotter
1519 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1520 a0c9776a Iustin Pop
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1521 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1522 a0c9776a Iustin Pop
               " primary node failed", instance)
1523 93e4c50b Guido Trotter
      if pnode in node_info:
1524 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1525 93e4c50b Guido Trotter
1526 832261fd Iustin Pop
      if pnode in n_offline:
1527 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1528 832261fd Iustin Pop
1529 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1530 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1531 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1532 93e4c50b Guido Trotter
      # supported either.
1533 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1534 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1535 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1536 a0c9776a Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1537 a0c9776a Iustin Pop
               self.EINSTANCELAYOUT, instance,
1538 a0c9776a Iustin Pop
               "instance has multiple secondary nodes", code="WARNING")
1539 93e4c50b Guido Trotter
1540 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1541 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1542 3924700f Iustin Pop
1543 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1544 a0c9776a Iustin Pop
        _ErrorIf(snode not in node_info and snode not in n_offline,
1545 a0c9776a Iustin Pop
                 self.ENODERPC, snode,
1546 a0c9776a Iustin Pop
                 "instance %s, connection to secondary node"
1547 a0c9776a Iustin Pop
                 "failed", instance)
1548 a0c9776a Iustin Pop
1549 93e4c50b Guido Trotter
        if snode in node_info:
1550 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1551 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1552 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1553 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1554 a0c9776a Iustin Pop
1555 832261fd Iustin Pop
        if snode in n_offline:
1556 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1557 832261fd Iustin Pop
1558 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1559 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1560 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1561 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1562 93e4c50b Guido Trotter
1563 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1564 a0c9776a Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1565 a8083063 Iustin Pop
1566 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1567 a0c9776a Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_instance)
1568 a8083063 Iustin Pop
1569 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1570 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1571 a0c9776a Iustin Pop
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1572 2b3b6ddd Guido Trotter
1573 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1574 2b3b6ddd Guido Trotter
    if i_non_redundant:
1575 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1576 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1577 2b3b6ddd Guido Trotter
1578 3924700f Iustin Pop
    if i_non_a_balanced:
1579 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1580 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1581 3924700f Iustin Pop
1582 0a66c968 Iustin Pop
    if n_offline:
1583 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1584 0a66c968 Iustin Pop
1585 22f0f71d Iustin Pop
    if n_drained:
1586 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1587 22f0f71d Iustin Pop
1588 a0c9776a Iustin Pop
    return not self.bad
1589 a8083063 Iustin Pop
1590 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1591 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1592 e4376078 Iustin Pop

1593 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1594 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1595 d8fff41c Guido Trotter

1596 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1597 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1598 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1599 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1600 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1601 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1602 e4376078 Iustin Pop
        and hook results
1603 d8fff41c Guido Trotter

1604 d8fff41c Guido Trotter
    """
1605 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1606 38206f3c Iustin Pop
    # their results
1607 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1608 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1609 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1610 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1611 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1612 7c874ee1 Iustin Pop
1613 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1614 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1615 7c874ee1 Iustin Pop
        msg = res.fail_msg
1616 a0c9776a Iustin Pop
        test = msg and not res.offline
1617 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1618 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1619 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1620 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1621 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1622 a0c9776a Iustin Pop
          # overrides self.bad
1623 7c874ee1 Iustin Pop
          lu_result = 1
1624 7c874ee1 Iustin Pop
          continue
1625 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1626 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1627 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1628 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1629 a0c9776a Iustin Pop
          if test:
1630 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1631 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1632 6d7b472a Iustin Pop
            lu_result = 0
1633 d8fff41c Guido Trotter
1634 d8fff41c Guido Trotter
      return lu_result
1635 d8fff41c Guido Trotter
1636 a8083063 Iustin Pop
1637 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1638 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1639 2c95a8d4 Iustin Pop

1640 2c95a8d4 Iustin Pop
  """
1641 2c95a8d4 Iustin Pop
  _OP_REQP = []
1642 d4b9d97f Guido Trotter
  REQ_BGL = False
1643 d4b9d97f Guido Trotter
1644 d4b9d97f Guido Trotter
  def ExpandNames(self):
1645 d4b9d97f Guido Trotter
    self.needed_locks = {
1646 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1647 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1648 d4b9d97f Guido Trotter
    }
1649 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1650 2c95a8d4 Iustin Pop
1651 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1652 2c95a8d4 Iustin Pop
    """Check prerequisites.
1653 2c95a8d4 Iustin Pop

1654 2c95a8d4 Iustin Pop
    This has no prerequisites.
1655 2c95a8d4 Iustin Pop

1656 2c95a8d4 Iustin Pop
    """
1657 2c95a8d4 Iustin Pop
    pass
1658 2c95a8d4 Iustin Pop
1659 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1660 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1661 2c95a8d4 Iustin Pop

1662 29d376ec Iustin Pop
    @rtype: tuple of three items
1663 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1664 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1665 29d376ec Iustin Pop
        missing volumes
1666 29d376ec Iustin Pop

1667 2c95a8d4 Iustin Pop
    """
1668 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1669 2c95a8d4 Iustin Pop
1670 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1671 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1672 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1673 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1674 2c95a8d4 Iustin Pop
1675 2c95a8d4 Iustin Pop
    nv_dict = {}
1676 2c95a8d4 Iustin Pop
    for inst in instances:
1677 2c95a8d4 Iustin Pop
      inst_lvs = {}
1678 0d68c45d Iustin Pop
      if (not inst.admin_up or
1679 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1680 2c95a8d4 Iustin Pop
        continue
1681 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1682 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1683 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1684 2c95a8d4 Iustin Pop
        for vol in vol_list:
1685 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1686 2c95a8d4 Iustin Pop
1687 2c95a8d4 Iustin Pop
    if not nv_dict:
1688 2c95a8d4 Iustin Pop
      return result
1689 2c95a8d4 Iustin Pop
1690 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1691 2c95a8d4 Iustin Pop
1692 2c95a8d4 Iustin Pop
    for node in nodes:
1693 2c95a8d4 Iustin Pop
      # node_volume
1694 29d376ec Iustin Pop
      node_res = node_lvs[node]
1695 29d376ec Iustin Pop
      if node_res.offline:
1696 ea9ddc07 Iustin Pop
        continue
1697 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1698 29d376ec Iustin Pop
      if msg:
1699 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1700 29d376ec Iustin Pop
        res_nodes[node] = msg
1701 2c95a8d4 Iustin Pop
        continue
1702 2c95a8d4 Iustin Pop
1703 29d376ec Iustin Pop
      lvs = node_res.payload
1704 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
1705 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1706 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1707 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1708 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1709 2c95a8d4 Iustin Pop
1710 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1711 b63ed789 Iustin Pop
    # data better
1712 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1713 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1714 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1715 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1716 b63ed789 Iustin Pop
1717 2c95a8d4 Iustin Pop
    return result
1718 2c95a8d4 Iustin Pop
1719 2c95a8d4 Iustin Pop
1720 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1721 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1722 60975797 Iustin Pop

1723 60975797 Iustin Pop
  """
1724 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1725 60975797 Iustin Pop
  REQ_BGL = False
1726 60975797 Iustin Pop
1727 60975797 Iustin Pop
  def ExpandNames(self):
1728 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1729 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
1730 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1731 60975797 Iustin Pop
1732 60975797 Iustin Pop
    if self.op.instances:
1733 60975797 Iustin Pop
      self.wanted_names = []
1734 60975797 Iustin Pop
      for name in self.op.instances:
1735 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
1736 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1737 60975797 Iustin Pop
      self.needed_locks = {
1738 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1739 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1740 60975797 Iustin Pop
        }
1741 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1742 60975797 Iustin Pop
    else:
1743 60975797 Iustin Pop
      self.wanted_names = None
1744 60975797 Iustin Pop
      self.needed_locks = {
1745 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1746 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1747 60975797 Iustin Pop
        }
1748 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1749 60975797 Iustin Pop
1750 60975797 Iustin Pop
  def DeclareLocks(self, level):
1751 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1752 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1753 60975797 Iustin Pop
1754 60975797 Iustin Pop
  def CheckPrereq(self):
1755 60975797 Iustin Pop
    """Check prerequisites.
1756 60975797 Iustin Pop

1757 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1758 60975797 Iustin Pop

1759 60975797 Iustin Pop
    """
1760 60975797 Iustin Pop
    if self.wanted_names is None:
1761 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1762 60975797 Iustin Pop
1763 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1764 60975797 Iustin Pop
                             in self.wanted_names]
1765 60975797 Iustin Pop
1766 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1767 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1768 b775c337 Iustin Pop

1769 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1770 b775c337 Iustin Pop
    children have smaller disk size.
1771 b775c337 Iustin Pop

1772 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1773 b775c337 Iustin Pop

1774 b775c337 Iustin Pop
    """
1775 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1776 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1777 b775c337 Iustin Pop
      fchild = disk.children[0]
1778 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1779 b775c337 Iustin Pop
      if mismatch:
1780 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1781 b775c337 Iustin Pop
                     fchild.size, disk.size)
1782 b775c337 Iustin Pop
        fchild.size = disk.size
1783 b775c337 Iustin Pop
1784 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1785 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1786 b775c337 Iustin Pop
    else:
1787 b775c337 Iustin Pop
      return False
1788 b775c337 Iustin Pop
1789 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1790 60975797 Iustin Pop
    """Verify the size of cluster disks.
1791 60975797 Iustin Pop

1792 60975797 Iustin Pop
    """
1793 60975797 Iustin Pop
    # TODO: check child disks too
1794 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1795 60975797 Iustin Pop
    per_node_disks = {}
1796 60975797 Iustin Pop
    for instance in self.wanted_instances:
1797 60975797 Iustin Pop
      pnode = instance.primary_node
1798 60975797 Iustin Pop
      if pnode not in per_node_disks:
1799 60975797 Iustin Pop
        per_node_disks[pnode] = []
1800 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1801 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1802 60975797 Iustin Pop
1803 60975797 Iustin Pop
    changed = []
1804 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1805 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1806 4d9e6835 Iustin Pop
      for dsk in newl:
1807 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1808 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1809 3cebe102 Michael Hanselmann
      if result.fail_msg:
1810 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1811 60975797 Iustin Pop
                        " %s, ignoring", node)
1812 60975797 Iustin Pop
        continue
1813 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1814 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1815 60975797 Iustin Pop
                        node)
1816 60975797 Iustin Pop
        continue
1817 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1818 60975797 Iustin Pop
        if size is None:
1819 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1820 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1821 60975797 Iustin Pop
          continue
1822 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1823 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1824 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1825 60975797 Iustin Pop
          continue
1826 60975797 Iustin Pop
        size = size >> 20
1827 60975797 Iustin Pop
        if size != disk.size:
1828 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1829 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1830 60975797 Iustin Pop
                       instance.name, disk.size, size)
1831 60975797 Iustin Pop
          disk.size = size
1832 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1833 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1834 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1835 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1836 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1837 60975797 Iustin Pop
    return changed
1838 60975797 Iustin Pop
1839 60975797 Iustin Pop
1840 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1841 07bd8a51 Iustin Pop
  """Rename the cluster.
1842 07bd8a51 Iustin Pop

1843 07bd8a51 Iustin Pop
  """
1844 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1845 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1846 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1847 07bd8a51 Iustin Pop
1848 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1849 07bd8a51 Iustin Pop
    """Build hooks env.
1850 07bd8a51 Iustin Pop

1851 07bd8a51 Iustin Pop
    """
1852 07bd8a51 Iustin Pop
    env = {
1853 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1854 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1855 07bd8a51 Iustin Pop
      }
1856 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1857 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1858 47a72f18 Iustin Pop
    return env, [mn], all_nodes
1859 07bd8a51 Iustin Pop
1860 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1861 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1862 07bd8a51 Iustin Pop

1863 07bd8a51 Iustin Pop
    """
1864 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
1865 07bd8a51 Iustin Pop
1866 bcf043c9 Iustin Pop
    new_name = hostname.name
1867 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1868 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1869 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1870 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1871 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1872 5c983ee5 Iustin Pop
                                 " cluster has changed",
1873 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1874 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1875 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1876 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1877 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1878 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
1879 07bd8a51 Iustin Pop
1880 07bd8a51 Iustin Pop
    self.op.name = new_name
1881 07bd8a51 Iustin Pop
1882 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1883 07bd8a51 Iustin Pop
    """Rename the cluster.
1884 07bd8a51 Iustin Pop

1885 07bd8a51 Iustin Pop
    """
1886 07bd8a51 Iustin Pop
    clustername = self.op.name
1887 07bd8a51 Iustin Pop
    ip = self.ip
1888 07bd8a51 Iustin Pop
1889 07bd8a51 Iustin Pop
    # shutdown the master IP
1890 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1891 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1892 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1893 07bd8a51 Iustin Pop
1894 07bd8a51 Iustin Pop
    try:
1895 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1896 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1897 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1898 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
1899 ec85e3d5 Iustin Pop
1900 ec85e3d5 Iustin Pop
      # update the known hosts file
1901 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1902 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1903 ec85e3d5 Iustin Pop
      try:
1904 ec85e3d5 Iustin Pop
        node_list.remove(master)
1905 ec85e3d5 Iustin Pop
      except ValueError:
1906 ec85e3d5 Iustin Pop
        pass
1907 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1908 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1909 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1910 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1911 6f7d4e75 Iustin Pop
        if msg:
1912 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1913 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1914 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1915 ec85e3d5 Iustin Pop
1916 07bd8a51 Iustin Pop
    finally:
1917 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1918 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1919 b726aff0 Iustin Pop
      if msg:
1920 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1921 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1922 07bd8a51 Iustin Pop
1923 07bd8a51 Iustin Pop
1924 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1925 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1926 8084f9f6 Manuel Franceschini

1927 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1928 e4376078 Iustin Pop
  @param disk: the disk to check
1929 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1930 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1931 8084f9f6 Manuel Franceschini

1932 8084f9f6 Manuel Franceschini
  """
1933 8084f9f6 Manuel Franceschini
  if disk.children:
1934 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1935 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1936 8084f9f6 Manuel Franceschini
        return True
1937 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1938 8084f9f6 Manuel Franceschini
1939 8084f9f6 Manuel Franceschini
1940 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1941 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1942 8084f9f6 Manuel Franceschini

1943 8084f9f6 Manuel Franceschini
  """
1944 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1945 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1946 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1947 c53279cf Guido Trotter
  REQ_BGL = False
1948 c53279cf Guido Trotter
1949 3994f455 Iustin Pop
  def CheckArguments(self):
1950 4b7735f9 Iustin Pop
    """Check parameters
1951 4b7735f9 Iustin Pop

1952 4b7735f9 Iustin Pop
    """
1953 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1954 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1955 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1956 4b7735f9 Iustin Pop
      try:
1957 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1958 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1959 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1960 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
1961 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1962 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
1963 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
1964 4b7735f9 Iustin Pop
1965 c53279cf Guido Trotter
  def ExpandNames(self):
1966 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1967 c53279cf Guido Trotter
    # all nodes to be modified.
1968 c53279cf Guido Trotter
    self.needed_locks = {
1969 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1970 c53279cf Guido Trotter
    }
1971 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1972 8084f9f6 Manuel Franceschini
1973 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1974 8084f9f6 Manuel Franceschini
    """Build hooks env.
1975 8084f9f6 Manuel Franceschini

1976 8084f9f6 Manuel Franceschini
    """
1977 8084f9f6 Manuel Franceschini
    env = {
1978 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1979 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1980 8084f9f6 Manuel Franceschini
      }
1981 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1982 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1983 8084f9f6 Manuel Franceschini
1984 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1985 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1986 8084f9f6 Manuel Franceschini

1987 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1988 5f83e263 Iustin Pop
    if the given volume group is valid.
1989 8084f9f6 Manuel Franceschini

1990 8084f9f6 Manuel Franceschini
    """
1991 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1992 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1993 8084f9f6 Manuel Franceschini
      for inst in instances:
1994 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1995 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1996 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1997 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
1998 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
1999 8084f9f6 Manuel Franceschini
2000 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2001 779c15bb Iustin Pop
2002 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2003 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2004 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2005 8084f9f6 Manuel Franceschini
      for node in node_list:
2006 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2007 e480923b Iustin Pop
        if msg:
2008 781de953 Iustin Pop
          # ignoring down node
2009 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2010 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2011 781de953 Iustin Pop
          continue
2012 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2013 781de953 Iustin Pop
                                              self.op.vg_name,
2014 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2015 8084f9f6 Manuel Franceschini
        if vgstatus:
2016 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2017 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2018 8084f9f6 Manuel Franceschini
2019 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2020 5af3da74 Guido Trotter
    # validate params changes
2021 779c15bb Iustin Pop
    if self.op.beparams:
2022 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2023 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2024 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2025 779c15bb Iustin Pop
2026 5af3da74 Guido Trotter
    if self.op.nicparams:
2027 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2028 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2029 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2030 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2031 90b704a1 Guido Trotter
      nic_errors = []
2032 90b704a1 Guido Trotter
2033 90b704a1 Guido Trotter
      # check all instances for consistency
2034 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2035 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2036 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2037 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2038 90b704a1 Guido Trotter
2039 90b704a1 Guido Trotter
          # check parameter syntax
2040 90b704a1 Guido Trotter
          try:
2041 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2042 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2043 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2044 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2045 90b704a1 Guido Trotter
2046 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2047 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2048 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2049 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2050 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2051 90b704a1 Guido Trotter
      if nic_errors:
2052 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2053 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2054 5af3da74 Guido Trotter
2055 779c15bb Iustin Pop
    # hypervisor list/parameters
2056 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
2057 779c15bb Iustin Pop
    if self.op.hvparams:
2058 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2059 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2060 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2061 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2062 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2063 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2064 779c15bb Iustin Pop
        else:
2065 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2066 779c15bb Iustin Pop
2067 17463d22 René Nussbaumer
    # os hypervisor parameters
2068 17463d22 René Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2069 17463d22 René Nussbaumer
    if self.op.os_hvp:
2070 17463d22 René Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2071 17463d22 René Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2072 17463d22 René Nussbaumer
                                   errors.ECODE_INVAL)
2073 17463d22 René Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2074 17463d22 René Nussbaumer
        if not isinstance(hvs, dict):
2075 17463d22 René Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2076 17463d22 René Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2077 17463d22 René Nussbaumer
        if os_name not in self.new_os_hvp:
2078 17463d22 René Nussbaumer
          self.new_os_hvp[os_name] = hvs
2079 17463d22 René Nussbaumer
        else:
2080 17463d22 René Nussbaumer
          for hv_name, hv_dict in hvs.items():
2081 17463d22 René Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2082 17463d22 René Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2083 17463d22 René Nussbaumer
            else:
2084 17463d22 René Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2085 17463d22 René Nussbaumer
2086 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2087 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2088 b119bccb Guido Trotter
      if not self.hv_list:
2089 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2090 5c983ee5 Iustin Pop
                                   " least one member",
2091 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2092 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2093 b119bccb Guido Trotter
      if invalid_hvs:
2094 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2095 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2096 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2097 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2098 779c15bb Iustin Pop
    else:
2099 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2100 779c15bb Iustin Pop
2101 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2102 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2103 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2104 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2105 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2106 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2107 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2108 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2109 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2110 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2111 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2112 779c15bb Iustin Pop
2113 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2114 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2115 8084f9f6 Manuel Franceschini

2116 8084f9f6 Manuel Franceschini
    """
2117 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2118 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2119 b2482333 Guido Trotter
      if not new_volume:
2120 b2482333 Guido Trotter
        new_volume = None
2121 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2122 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2123 779c15bb Iustin Pop
      else:
2124 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2125 779c15bb Iustin Pop
                    " state, not changing")
2126 779c15bb Iustin Pop
    if self.op.hvparams:
2127 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2128 17463d22 René Nussbaumer
    if self.op.os_hvp:
2129 17463d22 René Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2130 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2131 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2132 779c15bb Iustin Pop
    if self.op.beparams:
2133 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2134 5af3da74 Guido Trotter
    if self.op.nicparams:
2135 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2136 5af3da74 Guido Trotter
2137 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2138 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2139 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2140 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2141 4b7735f9 Iustin Pop
2142 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2143 8084f9f6 Manuel Franceschini
2144 8084f9f6 Manuel Franceschini
2145 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2146 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2147 28eddce5 Guido Trotter

2148 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2149 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2150 28eddce5 Guido Trotter
  makes sure those are copied.
2151 28eddce5 Guido Trotter

2152 28eddce5 Guido Trotter
  @param lu: calling logical unit
2153 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2154 28eddce5 Guido Trotter

2155 28eddce5 Guido Trotter
  """
2156 28eddce5 Guido Trotter
  # 1. Gather target nodes
2157 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2158 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2159 28eddce5 Guido Trotter
  if additional_nodes is not None:
2160 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2161 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2162 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2163 a4eae71f Michael Hanselmann
2164 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2165 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2166 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2167 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2168 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2169 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
2170 28eddce5 Guido Trotter
                   ])
2171 e1b8653f Guido Trotter
2172 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2173 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2174 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2175 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2176 e1b8653f Guido Trotter
2177 28eddce5 Guido Trotter
  # 3. Perform the files upload
2178 28eddce5 Guido Trotter
  for fname in dist_files:
2179 28eddce5 Guido Trotter
    if os.path.exists(fname):
2180 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2181 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2182 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2183 6f7d4e75 Iustin Pop
        if msg:
2184 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2185 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2186 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2187 28eddce5 Guido Trotter
2188 28eddce5 Guido Trotter
2189 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2190 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2191 afee0879 Iustin Pop

2192 afee0879 Iustin Pop
  This is a very simple LU.
2193 afee0879 Iustin Pop

2194 afee0879 Iustin Pop
  """
2195 afee0879 Iustin Pop
  _OP_REQP = []
2196 afee0879 Iustin Pop
  REQ_BGL = False
2197 afee0879 Iustin Pop
2198 afee0879 Iustin Pop
  def ExpandNames(self):
2199 afee0879 Iustin Pop
    self.needed_locks = {
2200 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2201 afee0879 Iustin Pop
    }
2202 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2203 afee0879 Iustin Pop
2204 afee0879 Iustin Pop
  def CheckPrereq(self):
2205 afee0879 Iustin Pop
    """Check prerequisites.
2206 afee0879 Iustin Pop

2207 afee0879 Iustin Pop
    """
2208 afee0879 Iustin Pop
2209 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2210 afee0879 Iustin Pop
    """Redistribute the configuration.
2211 afee0879 Iustin Pop

2212 afee0879 Iustin Pop
    """
2213 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2214 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2215 afee0879 Iustin Pop
2216 afee0879 Iustin Pop
2217 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2218 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2219 a8083063 Iustin Pop

2220 a8083063 Iustin Pop
  """
2221 a8083063 Iustin Pop
  if not instance.disks:
2222 a8083063 Iustin Pop
    return True
2223 a8083063 Iustin Pop
2224 a8083063 Iustin Pop
  if not oneshot:
2225 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2226 a8083063 Iustin Pop
2227 a8083063 Iustin Pop
  node = instance.primary_node
2228 a8083063 Iustin Pop
2229 a8083063 Iustin Pop
  for dev in instance.disks:
2230 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2231 a8083063 Iustin Pop
2232 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2233 6bcb1446 Michael Hanselmann
2234 a8083063 Iustin Pop
  retries = 0
2235 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2236 a8083063 Iustin Pop
  while True:
2237 a8083063 Iustin Pop
    max_time = 0
2238 a8083063 Iustin Pop
    done = True
2239 a8083063 Iustin Pop
    cumul_degraded = False
2240 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2241 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2242 3efa9051 Iustin Pop
    if msg:
2243 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2244 a8083063 Iustin Pop
      retries += 1
2245 a8083063 Iustin Pop
      if retries >= 10:
2246 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2247 3ecf6786 Iustin Pop
                                 " aborting." % node)
2248 a8083063 Iustin Pop
      time.sleep(6)
2249 a8083063 Iustin Pop
      continue
2250 3efa9051 Iustin Pop
    rstats = rstats.payload
2251 a8083063 Iustin Pop
    retries = 0
2252 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2253 a8083063 Iustin Pop
      if mstat is None:
2254 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2255 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2256 a8083063 Iustin Pop
        continue
2257 36145b12 Michael Hanselmann
2258 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2259 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2260 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2261 a8083063 Iustin Pop
        done = False
2262 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2263 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2264 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2265 a8083063 Iustin Pop
        else:
2266 a8083063 Iustin Pop
          rem_time = "no time estimate"
2267 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2268 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2269 4d4a651d Michael Hanselmann
                         rem_time))
2270 fbafd7a8 Iustin Pop
2271 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2272 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2273 fbafd7a8 Iustin Pop
    # we force restart of the loop
2274 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2275 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2276 fbafd7a8 Iustin Pop
      degr_retries -= 1
2277 fbafd7a8 Iustin Pop
      time.sleep(1)
2278 fbafd7a8 Iustin Pop
      continue
2279 fbafd7a8 Iustin Pop
2280 a8083063 Iustin Pop
    if done or oneshot:
2281 a8083063 Iustin Pop
      break
2282 a8083063 Iustin Pop
2283 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2284 a8083063 Iustin Pop
2285 a8083063 Iustin Pop
  if done:
2286 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2287 a8083063 Iustin Pop
  return not cumul_degraded
2288 a8083063 Iustin Pop
2289 a8083063 Iustin Pop
2290 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2291 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2292 a8083063 Iustin Pop

2293 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2294 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2295 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2296 0834c866 Iustin Pop

2297 a8083063 Iustin Pop
  """
2298 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2299 a8083063 Iustin Pop
2300 a8083063 Iustin Pop
  result = True
2301 96acbc09 Michael Hanselmann
2302 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2303 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2304 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2305 23829f6f Iustin Pop
    if msg:
2306 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2307 23829f6f Iustin Pop
      result = False
2308 23829f6f Iustin Pop
    elif not rstats.payload:
2309 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2310 a8083063 Iustin Pop
      result = False
2311 a8083063 Iustin Pop
    else:
2312 96acbc09 Michael Hanselmann
      if ldisk:
2313 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2314 96acbc09 Michael Hanselmann
      else:
2315 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2316 96acbc09 Michael Hanselmann
2317 a8083063 Iustin Pop
  if dev.children:
2318 a8083063 Iustin Pop
    for child in dev.children:
2319 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
  return result
2322 a8083063 Iustin Pop
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2325 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2326 a8083063 Iustin Pop

2327 a8083063 Iustin Pop
  """
2328 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2329 6bf01bbb Guido Trotter
  REQ_BGL = False
2330 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2331 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2332 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2333 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2334 a8083063 Iustin Pop
2335 6bf01bbb Guido Trotter
  def ExpandNames(self):
2336 1f9430d6 Iustin Pop
    if self.op.names:
2337 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2338 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2339 1f9430d6 Iustin Pop
2340 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2341 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2342 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2343 1f9430d6 Iustin Pop
2344 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2345 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2346 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2347 6bf01bbb Guido Trotter
    self.needed_locks = {}
2348 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2349 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2350 6bf01bbb Guido Trotter
2351 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2352 6bf01bbb Guido Trotter
    """Check prerequisites.
2353 6bf01bbb Guido Trotter

2354 6bf01bbb Guido Trotter
    """
2355 6bf01bbb Guido Trotter
2356 1f9430d6 Iustin Pop
  @staticmethod
2357 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2358 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2359 1f9430d6 Iustin Pop

2360 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2361 1f9430d6 Iustin Pop

2362 e4376078 Iustin Pop
    @rtype: dict
2363 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2364 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2365 e4376078 Iustin Pop

2366 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2367 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2368 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2369 e4376078 Iustin Pop
          }
2370 1f9430d6 Iustin Pop

2371 1f9430d6 Iustin Pop
    """
2372 1f9430d6 Iustin Pop
    all_os = {}
2373 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2374 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2375 a6ab004b Iustin Pop
    # make all OSes invalid
2376 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2377 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2378 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2379 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2380 1f9430d6 Iustin Pop
        continue
2381 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2382 255dcebd Iustin Pop
        if name not in all_os:
2383 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2384 1f9430d6 Iustin Pop
          # for each node in node_list
2385 255dcebd Iustin Pop
          all_os[name] = {}
2386 a6ab004b Iustin Pop
          for nname in good_nodes:
2387 255dcebd Iustin Pop
            all_os[name][nname] = []
2388 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2389 1f9430d6 Iustin Pop
    return all_os
2390 a8083063 Iustin Pop
2391 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2392 a8083063 Iustin Pop
    """Compute the list of OSes.
2393 a8083063 Iustin Pop

2394 a8083063 Iustin Pop
    """
2395 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2396 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2397 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2398 1f9430d6 Iustin Pop
    output = []
2399 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2400 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2401 1e288a26 Guido Trotter
2402 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2403 1f9430d6 Iustin Pop
      row = []
2404 1e288a26 Guido Trotter
      if calc_valid:
2405 1e288a26 Guido Trotter
        valid = True
2406 1e288a26 Guido Trotter
        variants = None
2407 1e288a26 Guido Trotter
        for osl in os_data.values():
2408 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2409 1e288a26 Guido Trotter
          if not valid:
2410 1e288a26 Guido Trotter
            variants = None
2411 1e288a26 Guido Trotter
            break
2412 1e288a26 Guido Trotter
          if calc_variants:
2413 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2414 1e288a26 Guido Trotter
            if variants is None:
2415 1e288a26 Guido Trotter
              variants = node_variants
2416 1e288a26 Guido Trotter
            else:
2417 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2418 1e288a26 Guido Trotter
2419 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2420 1f9430d6 Iustin Pop
        if field == "name":
2421 1f9430d6 Iustin Pop
          val = os_name
2422 1f9430d6 Iustin Pop
        elif field == "valid":
2423 1e288a26 Guido Trotter
          val = valid
2424 1f9430d6 Iustin Pop
        elif field == "node_status":
2425 255dcebd Iustin Pop
          # this is just a copy of the dict
2426 1f9430d6 Iustin Pop
          val = {}
2427 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2428 255dcebd Iustin Pop
            val[node_name] = nos_list
2429 1e288a26 Guido Trotter
        elif field == "variants":
2430 1e288a26 Guido Trotter
          val =  variants
2431 1f9430d6 Iustin Pop
        else:
2432 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2433 1f9430d6 Iustin Pop
        row.append(val)
2434 1f9430d6 Iustin Pop
      output.append(row)
2435 1f9430d6 Iustin Pop
2436 1f9430d6 Iustin Pop
    return output
2437 a8083063 Iustin Pop
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2440 a8083063 Iustin Pop
  """Logical unit for removing a node.
2441 a8083063 Iustin Pop

2442 a8083063 Iustin Pop
  """
2443 a8083063 Iustin Pop
  HPATH = "node-remove"
2444 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2445 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2446 a8083063 Iustin Pop
2447 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2448 a8083063 Iustin Pop
    """Build hooks env.
2449 a8083063 Iustin Pop

2450 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2451 d08869ee Guido Trotter
    node would then be impossible to remove.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
    """
2454 396e1b78 Michael Hanselmann
    env = {
2455 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2456 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2457 396e1b78 Michael Hanselmann
      }
2458 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2459 9bb31ea8 Iustin Pop
    try:
2460 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2461 9bb31ea8 Iustin Pop
    except ValueError:
2462 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2463 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2464 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2465 a8083063 Iustin Pop
2466 a8083063 Iustin Pop
  def CheckPrereq(self):
2467 a8083063 Iustin Pop
    """Check prerequisites.
2468 a8083063 Iustin Pop

2469 a8083063 Iustin Pop
    This checks:
2470 a8083063 Iustin Pop
     - the node exists in the configuration
2471 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2472 a8083063 Iustin Pop
     - it's not the master
2473 a8083063 Iustin Pop

2474 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2475 a8083063 Iustin Pop

2476 a8083063 Iustin Pop
    """
2477 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2478 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2479 cf26a87a Iustin Pop
    assert node is not None
2480 a8083063 Iustin Pop
2481 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2482 a8083063 Iustin Pop
2483 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2484 a8083063 Iustin Pop
    if node.name == masternode:
2485 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2486 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2487 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
    for instance_name in instance_list:
2490 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2491 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2492 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2493 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2494 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2495 a8083063 Iustin Pop
    self.op.node_name = node.name
2496 a8083063 Iustin Pop
    self.node = node
2497 a8083063 Iustin Pop
2498 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2499 a8083063 Iustin Pop
    """Removes the node from the cluster.
2500 a8083063 Iustin Pop

2501 a8083063 Iustin Pop
    """
2502 a8083063 Iustin Pop
    node = self.node
2503 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2504 9a4f63d1 Iustin Pop
                 node.name)
2505 a8083063 Iustin Pop
2506 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2507 b989b9d9 Ken Wehr
2508 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2509 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2510 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2511 a8083063 Iustin Pop
2512 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2513 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2514 cd46f3b4 Luca Bigliardi
    try:
2515 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2516 3cb5c1e3 Luca Bigliardi
    except:
2517 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2518 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2519 cd46f3b4 Luca Bigliardi
2520 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2521 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2522 0623d351 Iustin Pop
    if msg:
2523 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2524 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2525 c8a0948f Michael Hanselmann
2526 a8083063 Iustin Pop
2527 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2528 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2529 a8083063 Iustin Pop

2530 a8083063 Iustin Pop
  """
2531 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2532 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2533 35705d8f Guido Trotter
  REQ_BGL = False
2534 19bed813 Iustin Pop
2535 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2536 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2537 19bed813 Iustin Pop
2538 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2539 31bf511f Iustin Pop
    "dtotal", "dfree",
2540 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2541 31bf511f Iustin Pop
    "bootid",
2542 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2543 31bf511f Iustin Pop
    )
2544 31bf511f Iustin Pop
2545 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2546 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2547 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2548 31bf511f Iustin Pop
    "pip", "sip", "tags",
2549 0e67cdbe Iustin Pop
    "master",
2550 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2551 31bf511f Iustin Pop
    )
2552 a8083063 Iustin Pop
2553 35705d8f Guido Trotter
  def ExpandNames(self):
2554 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2555 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2556 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2557 a8083063 Iustin Pop
2558 35705d8f Guido Trotter
    self.needed_locks = {}
2559 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2560 c8d8b4c8 Iustin Pop
2561 c8d8b4c8 Iustin Pop
    if self.op.names:
2562 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2563 35705d8f Guido Trotter
    else:
2564 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2565 c8d8b4c8 Iustin Pop
2566 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2567 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2568 c8d8b4c8 Iustin Pop
    if self.do_locking:
2569 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2570 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2571 c8d8b4c8 Iustin Pop
2572 35705d8f Guido Trotter
  def CheckPrereq(self):
2573 35705d8f Guido Trotter
    """Check prerequisites.
2574 35705d8f Guido Trotter

2575 35705d8f Guido Trotter
    """
2576 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2577 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2578 c8d8b4c8 Iustin Pop
    pass
2579 a8083063 Iustin Pop
2580 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2581 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2582 a8083063 Iustin Pop

2583 a8083063 Iustin Pop
    """
2584 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2585 c8d8b4c8 Iustin Pop
    if self.do_locking:
2586 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2587 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2588 3fa93523 Guido Trotter
      nodenames = self.wanted
2589 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2590 3fa93523 Guido Trotter
      if missing:
2591 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2592 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2593 c8d8b4c8 Iustin Pop
    else:
2594 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2595 c1f1cbb2 Iustin Pop
2596 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2597 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
    # begin data gathering
2600 a8083063 Iustin Pop
2601 bc8e4a1a Iustin Pop
    if self.do_node_query:
2602 a8083063 Iustin Pop
      live_data = {}
2603 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2604 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2605 a8083063 Iustin Pop
      for name in nodenames:
2606 781de953 Iustin Pop
        nodeinfo = node_data[name]
2607 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2608 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2609 d599d686 Iustin Pop
          fn = utils.TryConvert
2610 a8083063 Iustin Pop
          live_data[name] = {
2611 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2612 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2613 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2614 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2615 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2616 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2617 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2618 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2619 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2620 a8083063 Iustin Pop
            }
2621 a8083063 Iustin Pop
        else:
2622 a8083063 Iustin Pop
          live_data[name] = {}
2623 a8083063 Iustin Pop
    else:
2624 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2625 a8083063 Iustin Pop
2626 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2627 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2628 a8083063 Iustin Pop
2629 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2630 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2631 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2632 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2633 a8083063 Iustin Pop
2634 1122eb25 Iustin Pop
      for inst in inst_data.values():
2635 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2636 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2637 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2638 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2639 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2640 a8083063 Iustin Pop
2641 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2642 0e67cdbe Iustin Pop
2643 a8083063 Iustin Pop
    # end data gathering
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
    output = []
2646 a8083063 Iustin Pop
    for node in nodelist:
2647 a8083063 Iustin Pop
      node_output = []
2648 a8083063 Iustin Pop
      for field in self.op.output_fields:
2649 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2650 19bed813 Iustin Pop
          val = getattr(node, field)
2651 ec223efb Iustin Pop
        elif field == "pinst_list":
2652 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2653 ec223efb Iustin Pop
        elif field == "sinst_list":
2654 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2655 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2656 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2657 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2658 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2659 a8083063 Iustin Pop
        elif field == "pip":
2660 a8083063 Iustin Pop
          val = node.primary_ip
2661 a8083063 Iustin Pop
        elif field == "sip":
2662 a8083063 Iustin Pop
          val = node.secondary_ip
2663 130a6a6f Iustin Pop
        elif field == "tags":
2664 130a6a6f Iustin Pop
          val = list(node.GetTags())
2665 0e67cdbe Iustin Pop
        elif field == "master":
2666 0e67cdbe Iustin Pop
          val = node.name == master_node
2667 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2668 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2669 c120ff34 Iustin Pop
        elif field == "role":
2670 c120ff34 Iustin Pop
          if node.name == master_node:
2671 c120ff34 Iustin Pop
            val = "M"
2672 c120ff34 Iustin Pop
          elif node.master_candidate:
2673 c120ff34 Iustin Pop
            val = "C"
2674 c120ff34 Iustin Pop
          elif node.drained:
2675 c120ff34 Iustin Pop
            val = "D"
2676 c120ff34 Iustin Pop
          elif node.offline:
2677 c120ff34 Iustin Pop
            val = "O"
2678 c120ff34 Iustin Pop
          else:
2679 c120ff34 Iustin Pop
            val = "R"
2680 a8083063 Iustin Pop
        else:
2681 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2682 a8083063 Iustin Pop
        node_output.append(val)
2683 a8083063 Iustin Pop
      output.append(node_output)
2684 a8083063 Iustin Pop
2685 a8083063 Iustin Pop
    return output
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
2688 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2689 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2690 dcb93971 Michael Hanselmann

2691 dcb93971 Michael Hanselmann
  """
2692 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2693 21a15682 Guido Trotter
  REQ_BGL = False
2694 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2695 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2696 21a15682 Guido Trotter
2697 21a15682 Guido Trotter
  def ExpandNames(self):
2698 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2699 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2700 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2701 21a15682 Guido Trotter
2702 21a15682 Guido Trotter
    self.needed_locks = {}
2703 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2704 21a15682 Guido Trotter
    if not self.op.nodes:
2705 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2706 21a15682 Guido Trotter
    else:
2707 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2708 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2709 dcb93971 Michael Hanselmann
2710 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2711 dcb93971 Michael Hanselmann
    """Check prerequisites.
2712 dcb93971 Michael Hanselmann

2713 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2714 dcb93971 Michael Hanselmann

2715 dcb93971 Michael Hanselmann
    """
2716 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2717 dcb93971 Michael Hanselmann
2718 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2719 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2720 dcb93971 Michael Hanselmann

2721 dcb93971 Michael Hanselmann
    """
2722 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2723 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2724 dcb93971 Michael Hanselmann
2725 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2726 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2727 dcb93971 Michael Hanselmann
2728 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2729 dcb93971 Michael Hanselmann
2730 dcb93971 Michael Hanselmann
    output = []
2731 dcb93971 Michael Hanselmann
    for node in nodenames:
2732 10bfe6cb Iustin Pop
      nresult = volumes[node]
2733 10bfe6cb Iustin Pop
      if nresult.offline:
2734 10bfe6cb Iustin Pop
        continue
2735 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2736 10bfe6cb Iustin Pop
      if msg:
2737 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2738 37d19eb2 Michael Hanselmann
        continue
2739 37d19eb2 Michael Hanselmann
2740 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2741 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2742 dcb93971 Michael Hanselmann
2743 dcb93971 Michael Hanselmann
      for vol in node_vols:
2744 dcb93971 Michael Hanselmann
        node_output = []
2745 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2746 dcb93971 Michael Hanselmann
          if field == "node":
2747 dcb93971 Michael Hanselmann
            val = node
2748 dcb93971 Michael Hanselmann
          elif field == "phys":
2749 dcb93971 Michael Hanselmann
            val = vol['dev']
2750 dcb93971 Michael Hanselmann
          elif field == "vg":
2751 dcb93971 Michael Hanselmann
            val = vol['vg']
2752 dcb93971 Michael Hanselmann
          elif field == "name":
2753 dcb93971 Michael Hanselmann
            val = vol['name']
2754 dcb93971 Michael Hanselmann
          elif field == "size":
2755 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2756 dcb93971 Michael Hanselmann
          elif field == "instance":
2757 dcb93971 Michael Hanselmann
            for inst in ilist:
2758 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2759 dcb93971 Michael Hanselmann
                continue
2760 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2761 dcb93971 Michael Hanselmann
                val = inst.name
2762 dcb93971 Michael Hanselmann
                break
2763 dcb93971 Michael Hanselmann
            else:
2764 dcb93971 Michael Hanselmann
              val = '-'
2765 dcb93971 Michael Hanselmann
          else:
2766 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2767 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2768 dcb93971 Michael Hanselmann
2769 dcb93971 Michael Hanselmann
        output.append(node_output)
2770 dcb93971 Michael Hanselmann
2771 dcb93971 Michael Hanselmann
    return output
2772 dcb93971 Michael Hanselmann
2773 dcb93971 Michael Hanselmann
2774 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
2775 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
2776 9e5442ce Michael Hanselmann

2777 9e5442ce Michael Hanselmann
  """
2778 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2779 9e5442ce Michael Hanselmann
  REQ_BGL = False
2780 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
2781 9e5442ce Michael Hanselmann
2782 9e5442ce Michael Hanselmann
  def ExpandNames(self):
2783 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
2784 9e5442ce Michael Hanselmann
2785 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2786 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2787 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2788 9e5442ce Michael Hanselmann
2789 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
2790 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
2791 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
2792 9e5442ce Michael Hanselmann
2793 9e5442ce Michael Hanselmann
    self.needed_locks = {}
2794 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
2795 9e5442ce Michael Hanselmann
2796 9e5442ce Michael Hanselmann
    if self.op.nodes:
2797 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
2798 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
2799 9e5442ce Michael Hanselmann
    else:
2800 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2801 9e5442ce Michael Hanselmann
2802 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
2803 9e5442ce Michael Hanselmann
    """Check prerequisites.
2804 9e5442ce Michael Hanselmann

2805 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
2806 9e5442ce Michael Hanselmann

2807 9e5442ce Michael Hanselmann
    """
2808 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
2809 9e5442ce Michael Hanselmann
2810 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2811 9e5442ce Michael Hanselmann
2812 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
2813 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
2814 9e5442ce Michael Hanselmann

2815 9e5442ce Michael Hanselmann
    """
2816 9e5442ce Michael Hanselmann
    # Always get name to sort by
2817 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
2818 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
2819 9e5442ce Michael Hanselmann
    else:
2820 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
2821 9e5442ce Michael Hanselmann
2822 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
2823 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
2824 620a85fd Iustin Pop
      while extra in fields:
2825 620a85fd Iustin Pop
        fields.remove(extra)
2826 9e5442ce Michael Hanselmann
2827 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2828 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
2829 9e5442ce Michael Hanselmann
2830 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2831 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
2832 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
2833 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
2834 9e5442ce Michael Hanselmann
2835 9e5442ce Michael Hanselmann
    result = []
2836 9e5442ce Michael Hanselmann
2837 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
2838 9e5442ce Michael Hanselmann
      nresult = data[node]
2839 9e5442ce Michael Hanselmann
      if nresult.offline:
2840 9e5442ce Michael Hanselmann
        continue
2841 9e5442ce Michael Hanselmann
2842 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
2843 9e5442ce Michael Hanselmann
      if msg:
2844 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2845 9e5442ce Michael Hanselmann
        continue
2846 9e5442ce Michael Hanselmann
2847 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2848 9e5442ce Michael Hanselmann
2849 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
2850 9e5442ce Michael Hanselmann
        row = rows[name]
2851 9e5442ce Michael Hanselmann
2852 9e5442ce Michael Hanselmann
        out = []
2853 9e5442ce Michael Hanselmann
2854 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
2855 620a85fd Iustin Pop
          if field == constants.SF_NODE:
2856 9e5442ce Michael Hanselmann
            val = node
2857 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
2858 620a85fd Iustin Pop
            val = self.op.storage_type
2859 9e5442ce Michael Hanselmann
          elif field in field_idx:
2860 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
2861 9e5442ce Michael Hanselmann
          else:
2862 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
2863 9e5442ce Michael Hanselmann
2864 9e5442ce Michael Hanselmann
          out.append(val)
2865 9e5442ce Michael Hanselmann
2866 9e5442ce Michael Hanselmann
        result.append(out)
2867 9e5442ce Michael Hanselmann
2868 9e5442ce Michael Hanselmann
    return result
2869 9e5442ce Michael Hanselmann
2870 9e5442ce Michael Hanselmann
2871 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
2872 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
2873 efb8da02 Michael Hanselmann

2874 efb8da02 Michael Hanselmann
  """
2875 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2876 efb8da02 Michael Hanselmann
  REQ_BGL = False
2877 efb8da02 Michael Hanselmann
2878 efb8da02 Michael Hanselmann
  def CheckArguments(self):
2879 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
2880 efb8da02 Michael Hanselmann
2881 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2882 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2883 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
2884 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2885 efb8da02 Michael Hanselmann
2886 efb8da02 Michael Hanselmann
  def ExpandNames(self):
2887 efb8da02 Michael Hanselmann
    self.needed_locks = {
2888 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
2889 efb8da02 Michael Hanselmann
      }
2890 efb8da02 Michael Hanselmann
2891 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
2892 efb8da02 Michael Hanselmann
    """Check prerequisites.
2893 efb8da02 Michael Hanselmann

2894 efb8da02 Michael Hanselmann
    """
2895 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2896 efb8da02 Michael Hanselmann
2897 efb8da02 Michael Hanselmann
    try:
2898 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2899 efb8da02 Michael Hanselmann
    except KeyError:
2900 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2901 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
2902 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2903 efb8da02 Michael Hanselmann
2904 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
2905 efb8da02 Michael Hanselmann
    if diff:
2906 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
2907 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
2908 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
2909 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2910 efb8da02 Michael Hanselmann
2911 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
2912 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2913 efb8da02 Michael Hanselmann

2914 efb8da02 Michael Hanselmann
    """
2915 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2916 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
2917 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
2918 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
2919 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
2920 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
2921 efb8da02 Michael Hanselmann
2922 efb8da02 Michael Hanselmann
2923 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2924 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2925 a8083063 Iustin Pop

2926 a8083063 Iustin Pop
  """
2927 a8083063 Iustin Pop
  HPATH = "node-add"
2928 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2929 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2930 a8083063 Iustin Pop
2931 44caf5a8 Iustin Pop
  def CheckArguments(self):
2932 44caf5a8 Iustin Pop
    # validate/normalize the node name
2933 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
2934 44caf5a8 Iustin Pop
2935 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2936 a8083063 Iustin Pop
    """Build hooks env.
2937 a8083063 Iustin Pop

2938 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2939 a8083063 Iustin Pop

2940 a8083063 Iustin Pop
    """
2941 a8083063 Iustin Pop
    env = {
2942 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2943 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2944 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2945 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2946 a8083063 Iustin Pop
      }
2947 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2948 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2949 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2950 a8083063 Iustin Pop
2951 a8083063 Iustin Pop
  def CheckPrereq(self):
2952 a8083063 Iustin Pop
    """Check prerequisites.
2953 a8083063 Iustin Pop

2954 a8083063 Iustin Pop
    This checks:
2955 a8083063 Iustin Pop
     - the new node is not already in the config
2956 a8083063 Iustin Pop
     - it is resolvable
2957 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2958 a8083063 Iustin Pop

2959 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2960 a8083063 Iustin Pop

2961 a8083063 Iustin Pop
    """
2962 a8083063 Iustin Pop
    node_name = self.op.node_name
2963 a8083063 Iustin Pop
    cfg = self.cfg
2964 a8083063 Iustin Pop
2965 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
2966 a8083063 Iustin Pop
2967 bcf043c9 Iustin Pop
    node = dns_data.name
2968 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2969 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2970 a8083063 Iustin Pop
    if secondary_ip is None:
2971 a8083063 Iustin Pop
      secondary_ip = primary_ip
2972 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2973 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
2974 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2975 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2976 e7c6e02b Michael Hanselmann
2977 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2978 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2979 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2980 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
2981 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2982 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
2983 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
2984 a8083063 Iustin Pop
2985 a8083063 Iustin Pop
    for existing_node_name in node_list:
2986 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2987 e7c6e02b Michael Hanselmann
2988 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2989 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2990 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2991 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2992 5c983ee5 Iustin Pop
                                     " address configuration as before",
2993 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
2994 e7c6e02b Michael Hanselmann
        continue
2995 e7c6e02b Michael Hanselmann
2996 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2997 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2998 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2999 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3000 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3001 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3002 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3003 a8083063 Iustin Pop
3004 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3005 a8083063 Iustin Pop
    # same as for the master
3006 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3007 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3008 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3009 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3010 a8083063 Iustin Pop
      if master_singlehomed:
3011 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3012 5c983ee5 Iustin Pop
                                   " new node has one",
3013 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3014 a8083063 Iustin Pop
      else:
3015 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3016 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3017 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3018 a8083063 Iustin Pop
3019 5bbd3f7f Michael Hanselmann
    # checks reachability
3020 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3021 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3022 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3023 a8083063 Iustin Pop
3024 a8083063 Iustin Pop
    if not newbie_singlehomed:
3025 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3026 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3027 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3028 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3029 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3030 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3031 a8083063 Iustin Pop
3032 a8ae3eb5 Iustin Pop
    if self.op.readd:
3033 a8ae3eb5 Iustin Pop
      exceptions = [node]
3034 a8ae3eb5 Iustin Pop
    else:
3035 a8ae3eb5 Iustin Pop
      exceptions = []
3036 6d7e1f20 Guido Trotter
3037 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3038 0fff97e9 Guido Trotter
3039 a8ae3eb5 Iustin Pop
    if self.op.readd:
3040 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3041 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3042 a8ae3eb5 Iustin Pop
    else:
3043 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3044 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3045 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3046 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3047 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3050 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3051 a8083063 Iustin Pop

3052 a8083063 Iustin Pop
    """
3053 a8083063 Iustin Pop
    new_node = self.new_node
3054 a8083063 Iustin Pop
    node = new_node.name
3055 a8083063 Iustin Pop
3056 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3057 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3058 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3059 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3060 a8ae3eb5 Iustin Pop
    if self.op.readd:
3061 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3062 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3063 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3064 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3065 a8ae3eb5 Iustin Pop
3066 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3067 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3068 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3069 a8ae3eb5 Iustin Pop
3070 a8083063 Iustin Pop
    # check connectivity
3071 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3072 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3073 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3074 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3075 90b54c26 Iustin Pop
                   node, result.payload)
3076 a8083063 Iustin Pop
    else:
3077 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3078 90b54c26 Iustin Pop
                               " node version %s" %
3079 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3080 a8083063 Iustin Pop
3081 a8083063 Iustin Pop
    # setup ssh on node
3082 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3083 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3084 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3085 b989b9d9 Ken Wehr
      keyarray = []
3086 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3087 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3088 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3089 b989b9d9 Ken Wehr
3090 b989b9d9 Ken Wehr
      for i in keyfiles:
3091 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3092 b989b9d9 Ken Wehr
3093 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3094 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3095 b989b9d9 Ken Wehr
                                      keyarray[5])
3096 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3097 a8083063 Iustin Pop
3098 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3099 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3100 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3101 c8a0948f Michael Hanselmann
3102 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3103 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3104 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3105 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3106 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3107 c2fc8250 Iustin Pop
      if not result.payload:
3108 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3109 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3110 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3111 a8083063 Iustin Pop
3112 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3113 5c0527ed Guido Trotter
    node_verify_param = {
3114 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3115 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3116 5c0527ed Guido Trotter
    }
3117 5c0527ed Guido Trotter
3118 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3119 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3120 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3121 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3122 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3123 6f68a739 Iustin Pop
      if nl_payload:
3124 6f68a739 Iustin Pop
        for failed in nl_payload:
3125 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3126 31821208 Iustin Pop
                      " (checking from %s): %s" %
3127 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3128 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3129 ff98055b Iustin Pop
3130 d8470559 Michael Hanselmann
    if self.op.readd:
3131 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3132 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3133 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3134 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3135 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3136 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3137 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3138 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3139 a8ae3eb5 Iustin Pop
        if msg:
3140 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3141 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3142 d8470559 Michael Hanselmann
    else:
3143 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3144 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
3147 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3148 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3149 b31c8676 Iustin Pop

3150 b31c8676 Iustin Pop
  """
3151 b31c8676 Iustin Pop
  HPATH = "node-modify"
3152 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3153 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3154 b31c8676 Iustin Pop
  REQ_BGL = False
3155 b31c8676 Iustin Pop
3156 b31c8676 Iustin Pop
  def CheckArguments(self):
3157 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3158 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3159 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3160 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3161 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3162 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3163 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3164 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3165 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3166 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3167 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3168 5c983ee5 Iustin Pop
                                 " state at the same time",
3169 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3170 b31c8676 Iustin Pop
3171 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3172 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3173 601908d0 Iustin Pop
                             self.op.drained == True)
3174 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3175 601908d0 Iustin Pop
                               self.op.drained == False)
3176 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3177 601908d0 Iustin Pop
                         self.offline_or_drain)
3178 601908d0 Iustin Pop
3179 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3180 601908d0 Iustin Pop
3181 601908d0 Iustin Pop
3182 b31c8676 Iustin Pop
  def ExpandNames(self):
3183 601908d0 Iustin Pop
    if self.lock_all:
3184 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3185 601908d0 Iustin Pop
    else:
3186 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3187 b31c8676 Iustin Pop
3188 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3189 b31c8676 Iustin Pop
    """Build hooks env.
3190 b31c8676 Iustin Pop

3191 b31c8676 Iustin Pop
    This runs on the master node.
3192 b31c8676 Iustin Pop

3193 b31c8676 Iustin Pop
    """
3194 b31c8676 Iustin Pop
    env = {
3195 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3196 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3197 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3198 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3199 b31c8676 Iustin Pop
      }
3200 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3201 b31c8676 Iustin Pop
          self.op.node_name]
3202 b31c8676 Iustin Pop
    return env, nl, nl
3203 b31c8676 Iustin Pop
3204 b31c8676 Iustin Pop
  def CheckPrereq(self):
3205 b31c8676 Iustin Pop
    """Check prerequisites.
3206 b31c8676 Iustin Pop

3207 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3208 b31c8676 Iustin Pop

3209 b31c8676 Iustin Pop
    """
3210 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3211 b31c8676 Iustin Pop
3212 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3213 97c61d46 Iustin Pop
        self.op.drained is not None or
3214 97c61d46 Iustin Pop
        self.op.offline is not None):
3215 97c61d46 Iustin Pop
      # we can't change the master's node flags
3216 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3217 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3218 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3219 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3220 97c61d46 Iustin Pop
3221 601908d0 Iustin Pop
3222 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3223 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3224 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3225 601908d0 Iustin Pop
      # candidates
3226 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3227 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3228 601908d0 Iustin Pop
      if mc_remaining != mc_should:
3229 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3230 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3231 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3232 3e83dd48 Iustin Pop
3233 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3234 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3235 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3236 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3237 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3238 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3239 3a5ba66a Iustin Pop
3240 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3241 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3242 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3243 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3244 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3245 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3246 3d9eb52b Guido Trotter
3247 b31c8676 Iustin Pop
    return
3248 b31c8676 Iustin Pop
3249 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3250 b31c8676 Iustin Pop
    """Modifies a node.
3251 b31c8676 Iustin Pop

3252 b31c8676 Iustin Pop
    """
3253 3a5ba66a Iustin Pop
    node = self.node
3254 b31c8676 Iustin Pop
3255 b31c8676 Iustin Pop
    result = []
3256 c9d443ea Iustin Pop
    changed_mc = False
3257 b31c8676 Iustin Pop
3258 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3259 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3260 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3261 c9d443ea Iustin Pop
      if self.op.offline == True:
3262 c9d443ea Iustin Pop
        if node.master_candidate:
3263 c9d443ea Iustin Pop
          node.master_candidate = False
3264 c9d443ea Iustin Pop
          changed_mc = True
3265 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3266 c9d443ea Iustin Pop
        if node.drained:
3267 c9d443ea Iustin Pop
          node.drained = False
3268 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3269 3a5ba66a Iustin Pop
3270 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3271 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3272 c9d443ea Iustin Pop
      changed_mc = True
3273 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3274 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3275 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3276 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3277 0959c824 Iustin Pop
        if msg:
3278 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3279 b31c8676 Iustin Pop
3280 c9d443ea Iustin Pop
    if self.op.drained is not None:
3281 c9d443ea Iustin Pop
      node.drained = self.op.drained
3282 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3283 c9d443ea Iustin Pop
      if self.op.drained == True:
3284 c9d443ea Iustin Pop
        if node.master_candidate:
3285 c9d443ea Iustin Pop
          node.master_candidate = False
3286 c9d443ea Iustin Pop
          changed_mc = True
3287 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3288 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3289 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3290 dec0d9da Iustin Pop
          if msg:
3291 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3292 c9d443ea Iustin Pop
        if node.offline:
3293 c9d443ea Iustin Pop
          node.offline = False
3294 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3295 c9d443ea Iustin Pop
3296 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3297 601908d0 Iustin Pop
    if self.lock_all:
3298 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3299 601908d0 Iustin Pop
3300 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3301 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3302 601908d0 Iustin Pop
3303 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3304 c9d443ea Iustin Pop
    if changed_mc:
3305 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3306 b31c8676 Iustin Pop
3307 b31c8676 Iustin Pop
    return result
3308 b31c8676 Iustin Pop
3309 b31c8676 Iustin Pop
3310 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3311 f5118ade Iustin Pop
  """Powercycles a node.
3312 f5118ade Iustin Pop

3313 f5118ade Iustin Pop
  """
3314 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3315 f5118ade Iustin Pop
  REQ_BGL = False
3316 f5118ade Iustin Pop
3317 f5118ade Iustin Pop
  def CheckArguments(self):
3318 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3319 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3320 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3321 5c983ee5 Iustin Pop
                                 " parameter was not set",
3322 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3323 f5118ade Iustin Pop
3324 f5118ade Iustin Pop
  def ExpandNames(self):
3325 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3326 f5118ade Iustin Pop

3327 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3328 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3329 f5118ade Iustin Pop

3330 f5118ade Iustin Pop
    """
3331 f5118ade Iustin Pop
    self.needed_locks = {}
3332 f5118ade Iustin Pop
3333 f5118ade Iustin Pop
  def CheckPrereq(self):
3334 f5118ade Iustin Pop
    """Check prerequisites.
3335 f5118ade Iustin Pop

3336 f5118ade Iustin Pop
    This LU has no prereqs.
3337 f5118ade Iustin Pop

3338 f5118ade Iustin Pop
    """
3339 f5118ade Iustin Pop
    pass
3340 f5118ade Iustin Pop
3341 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3342 f5118ade Iustin Pop
    """Reboots a node.
3343 f5118ade Iustin Pop

3344 f5118ade Iustin Pop
    """
3345 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3346 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3347 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3348 f5118ade Iustin Pop
    return result.payload
3349 f5118ade Iustin Pop
3350 f5118ade Iustin Pop
3351 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3352 a8083063 Iustin Pop
  """Query cluster configuration.
3353 a8083063 Iustin Pop

3354 a8083063 Iustin Pop
  """
3355 a8083063 Iustin Pop
  _OP_REQP = []
3356 642339cf Guido Trotter
  REQ_BGL = False
3357 642339cf Guido Trotter
3358 642339cf Guido Trotter
  def ExpandNames(self):
3359 642339cf Guido Trotter
    self.needed_locks = {}
3360 a8083063 Iustin Pop
3361 a8083063 Iustin Pop
  def CheckPrereq(self):
3362 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3363 a8083063 Iustin Pop

3364 a8083063 Iustin Pop
    """
3365 a8083063 Iustin Pop
    pass
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3368 a8083063 Iustin Pop
    """Return cluster config.
3369 a8083063 Iustin Pop

3370 a8083063 Iustin Pop
    """
3371 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3372 17463d22 René Nussbaumer
    os_hvp = {}
3373 17463d22 René Nussbaumer
3374 17463d22 René Nussbaumer
    # Filter just for enabled hypervisors
3375 17463d22 René Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3376 17463d22 René Nussbaumer
      os_hvp[os_name] = {}
3377 17463d22 René Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3378 17463d22 René Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3379 17463d22 René Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3380 17463d22 René Nussbaumer
3381 a8083063 Iustin Pop
    result = {
3382 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3383 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3384 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3385 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3386 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3387 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3388 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3389 469f88e1 Iustin Pop
      "master": cluster.master_node,
3390 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3391 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3392 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3393 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3394 17463d22 René Nussbaumer
      "os_hvp": os_hvp,
3395 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3396 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3397 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3398 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3399 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3400 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3401 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3402 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3403 259578eb Iustin Pop
      "uuid": cluster.uuid,
3404 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3405 a8083063 Iustin Pop
      }
3406 a8083063 Iustin Pop
3407 a8083063 Iustin Pop
    return result
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
3410 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3411 ae5849b5 Michael Hanselmann
  """Return configuration values.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
  """
3414 a8083063 Iustin Pop
  _OP_REQP = []
3415 642339cf Guido Trotter
  REQ_BGL = False
3416 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3417 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3418 05e50653 Michael Hanselmann
                                  "watcher_pause")
3419 642339cf Guido Trotter
3420 642339cf Guido Trotter
  def ExpandNames(self):
3421 642339cf Guido Trotter
    self.needed_locks = {}
3422 a8083063 Iustin Pop
3423 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3424 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3425 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3426 ae5849b5 Michael Hanselmann
3427 a8083063 Iustin Pop
  def CheckPrereq(self):
3428 a8083063 Iustin Pop
    """No prerequisites.
3429 a8083063 Iustin Pop

3430 a8083063 Iustin Pop
    """
3431 a8083063 Iustin Pop
    pass
3432 a8083063 Iustin Pop
3433 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3434 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3435 a8083063 Iustin Pop

3436 a8083063 Iustin Pop
    """
3437 ae5849b5 Michael Hanselmann
    values = []
3438 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3439 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3440 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3441 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3442 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3443 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3444 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3445 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3446 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3447 ae5849b5 Michael Hanselmann
      else:
3448 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3449 3ccafd0e Iustin Pop
      values.append(entry)
3450 ae5849b5 Michael Hanselmann
    return values
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
3453 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3454 a8083063 Iustin Pop
  """Bring up an instance's disks.
3455 a8083063 Iustin Pop

3456 a8083063 Iustin Pop
  """
3457 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3458 f22a8ba3 Guido Trotter
  REQ_BGL = False
3459 f22a8ba3 Guido Trotter
3460 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3461 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3462 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3463 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3464 f22a8ba3 Guido Trotter
3465 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3466 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3467 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3468 a8083063 Iustin Pop
3469 a8083063 Iustin Pop
  def CheckPrereq(self):
3470 a8083063 Iustin Pop
    """Check prerequisites.
3471 a8083063 Iustin Pop

3472 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3473 a8083063 Iustin Pop

3474 a8083063 Iustin Pop
    """
3475 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3476 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3477 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3478 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3479 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3480 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3481 a8083063 Iustin Pop
3482 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3483 a8083063 Iustin Pop
    """Activate the disks.
3484 a8083063 Iustin Pop

3485 a8083063 Iustin Pop
    """
3486 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3487 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3488 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3489 a8083063 Iustin Pop
    if not disks_ok:
3490 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3491 a8083063 Iustin Pop
3492 a8083063 Iustin Pop
    return disks_info
3493 a8083063 Iustin Pop
3494 a8083063 Iustin Pop
3495 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3496 e3443b36 Iustin Pop
                           ignore_size=False):
3497 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3498 a8083063 Iustin Pop

3499 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3500 a8083063 Iustin Pop

3501 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3502 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3503 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3504 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3505 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3506 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3507 e4376078 Iustin Pop
      won't result in an error return from the function
3508 e3443b36 Iustin Pop
  @type ignore_size: boolean
3509 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3510 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3511 e3443b36 Iustin Pop
      when the size is wrong
3512 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3513 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3514 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3515 a8083063 Iustin Pop

3516 a8083063 Iustin Pop
  """
3517 a8083063 Iustin Pop
  device_info = []
3518 a8083063 Iustin Pop
  disks_ok = True
3519 fdbd668d Iustin Pop
  iname = instance.name
3520 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3521 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3522 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3523 fdbd668d Iustin Pop
3524 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3525 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3526 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3527 fdbd668d Iustin Pop
  # SyncSource, etc.)
3528 fdbd668d Iustin Pop
3529 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3530 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3531 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3532 e3443b36 Iustin Pop
      if ignore_size:
3533 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3534 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3535 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3536 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3537 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3538 53c14ef1 Iustin Pop
      if msg:
3539 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3540 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3541 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3542 fdbd668d Iustin Pop
        if not ignore_secondaries:
3543 a8083063 Iustin Pop
          disks_ok = False
3544 fdbd668d Iustin Pop
3545 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3546 fdbd668d Iustin Pop
3547 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3548 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3549 d52ea991 Michael Hanselmann
    dev_path = None
3550 d52ea991 Michael Hanselmann
3551 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3552 fdbd668d Iustin Pop
      if node != instance.primary_node:
3553 fdbd668d Iustin Pop
        continue
3554 e3443b36 Iustin Pop
      if ignore_size:
3555 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3556 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3557 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3558 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3559 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3560 53c14ef1 Iustin Pop
      if msg:
3561 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3562 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3563 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3564 fdbd668d Iustin Pop
        disks_ok = False
3565 d52ea991 Michael Hanselmann
      else:
3566 d52ea991 Michael Hanselmann
        dev_path = result.payload
3567 d52ea991 Michael Hanselmann
3568 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3569 a8083063 Iustin Pop
3570 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3571 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3572 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3573 b352ab5b Iustin Pop
  for disk in instance.disks:
3574 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3575 b352ab5b Iustin Pop
3576 a8083063 Iustin Pop
  return disks_ok, device_info
3577 a8083063 Iustin Pop
3578 a8083063 Iustin Pop
3579 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3580 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3581 3ecf6786 Iustin Pop

3582 3ecf6786 Iustin Pop
  """
3583 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3584 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3585 fe7b0351 Michael Hanselmann
  if not disks_ok:
3586 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3587 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3588 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3589 86d9d3bb Iustin Pop
                         " secondary node,"
3590 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3591 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3592 fe7b0351 Michael Hanselmann
3593 fe7b0351 Michael Hanselmann
3594 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3595 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3596 a8083063 Iustin Pop

3597 a8083063 Iustin Pop
  """
3598 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3599 f22a8ba3 Guido Trotter
  REQ_BGL = False
3600 f22a8ba3 Guido Trotter
3601 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3602 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3603 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3604 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3605 f22a8ba3 Guido Trotter
3606 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3607 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3608 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3609 a8083063 Iustin Pop
3610 a8083063 Iustin Pop
  def CheckPrereq(self):
3611 a8083063 Iustin Pop
    """Check prerequisites.
3612 a8083063 Iustin Pop

3613 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3614 a8083063 Iustin Pop

3615 a8083063 Iustin Pop
    """
3616 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3617 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3618 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3619 a8083063 Iustin Pop
3620 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3621 a8083063 Iustin Pop
    """Deactivate the disks
3622 a8083063 Iustin Pop

3623 a8083063 Iustin Pop
    """
3624 a8083063 Iustin Pop
    instance = self.instance
3625 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3626 a8083063 Iustin Pop
3627 a8083063 Iustin Pop
3628 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3629 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3630 155d6c75 Guido Trotter

3631 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3632 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3633 155d6c75 Guido Trotter

3634 155d6c75 Guido Trotter
  """
3635 aca13712 Iustin Pop
  pnode = instance.primary_node
3636 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3637 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
3638 aca13712 Iustin Pop
3639 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
3640 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
3641 155d6c75 Guido Trotter
                             " block devices.")
3642 155d6c75 Guido Trotter
3643 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3644 a8083063 Iustin Pop
3645 a8083063 Iustin Pop
3646 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3647 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3648 a8083063 Iustin Pop

3649 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3650 a8083063 Iustin Pop

3651 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3652 a8083063 Iustin Pop
  ignored.
3653 a8083063 Iustin Pop

3654 a8083063 Iustin Pop
  """
3655 cacfd1fd Iustin Pop
  all_result = True
3656 a8083063 Iustin Pop
  for disk in instance.disks:
3657 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3658 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3659 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3660 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3661 cacfd1fd Iustin Pop
      if msg:
3662 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3663 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3664 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3665 cacfd1fd Iustin Pop
          all_result = False
3666 cacfd1fd Iustin Pop
  return all_result
3667 a8083063 Iustin Pop
3668 a8083063 Iustin Pop
3669 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3670 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3671 d4f16fd9 Iustin Pop

3672 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3673 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3674 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3675 d4f16fd9 Iustin Pop
  exception.
3676 d4f16fd9 Iustin Pop

3677 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3678 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3679 e69d05fd Iustin Pop
  @type node: C{str}
3680 e69d05fd Iustin Pop
  @param node: the node to check
3681 e69d05fd Iustin Pop
  @type reason: C{str}
3682 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3683 e69d05fd Iustin Pop
  @type requested: C{int}
3684 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3685 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3686 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3687 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3688 e69d05fd Iustin Pop
      we cannot check the node
3689 d4f16fd9 Iustin Pop

3690 d4f16fd9 Iustin Pop
  """
3691 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3692 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
3693 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
3694 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3695 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3696 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3697 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
3698 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
3699 d4f16fd9 Iustin Pop
  if requested > free_mem:
3700 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3701 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3702 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
3703 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
3704 d4f16fd9 Iustin Pop
3705 d4f16fd9 Iustin Pop
3706 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
3707 a8083063 Iustin Pop
  """Starts an instance.
3708 a8083063 Iustin Pop

3709 a8083063 Iustin Pop
  """
3710 a8083063 Iustin Pop
  HPATH = "instance-start"
3711 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3712 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3713 e873317a Guido Trotter
  REQ_BGL = False
3714 e873317a Guido Trotter
3715 e873317a Guido Trotter
  def ExpandNames(self):
3716 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3717 a8083063 Iustin Pop
3718 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3719 a8083063 Iustin Pop
    """Build hooks env.
3720 a8083063 Iustin Pop

3721 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3722 a8083063 Iustin Pop

3723 a8083063 Iustin Pop
    """
3724 a8083063 Iustin Pop
    env = {
3725 a8083063 Iustin Pop
      "FORCE": self.op.force,
3726 a8083063 Iustin Pop
      }
3727 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3728 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3729 a8083063 Iustin Pop
    return env, nl, nl
3730 a8083063 Iustin Pop
3731 a8083063 Iustin Pop
  def CheckPrereq(self):
3732 a8083063 Iustin Pop
    """Check prerequisites.
3733 a8083063 Iustin Pop

3734 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3735 a8083063 Iustin Pop

3736 a8083063 Iustin Pop
    """
3737 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3738 e873317a Guido Trotter
    assert self.instance is not None, \
3739 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3740 a8083063 Iustin Pop
3741 d04aaa2f Iustin Pop
    # extra beparams
3742 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3743 d04aaa2f Iustin Pop
    if self.beparams:
3744 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3745 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3746 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
3747 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3748 d04aaa2f Iustin Pop
      # fill the beparams dict
3749 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3750 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3751 d04aaa2f Iustin Pop
3752 d04aaa2f Iustin Pop
    # extra hvparams
3753 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3754 d04aaa2f Iustin Pop
    if self.hvparams:
3755 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3756 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3757 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
3758 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3759 d04aaa2f Iustin Pop
3760 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3761 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3762 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3763 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3764 d04aaa2f Iustin Pop
                                    instance.hvparams)
3765 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3766 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3767 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3768 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3769 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3770 d04aaa2f Iustin Pop
3771 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3772 7527a8a4 Iustin Pop
3773 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3774 5bbd3f7f Michael Hanselmann
    # check bridges existence
3775 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3776 a8083063 Iustin Pop
3777 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3778 f1926756 Guido Trotter
                                              instance.name,
3779 f1926756 Guido Trotter
                                              instance.hypervisor)
3780 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3781 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
3782 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3783 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3784 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3785 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3786 d4f16fd9 Iustin Pop
3787 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3788 a8083063 Iustin Pop
    """Start the instance.
3789 a8083063 Iustin Pop

3790 a8083063 Iustin Pop
    """
3791 a8083063 Iustin Pop
    instance = self.instance
3792 a8083063 Iustin Pop
    force = self.op.force
3793 a8083063 Iustin Pop
3794 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3795 fe482621 Iustin Pop
3796 a8083063 Iustin Pop
    node_current = instance.primary_node
3797 a8083063 Iustin Pop
3798 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3799 a8083063 Iustin Pop
3800 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3801 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3802 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3803 dd279568 Iustin Pop
    if msg:
3804 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3805 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3806 a8083063 Iustin Pop
3807 a8083063 Iustin Pop
3808 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3809 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3810 bf6929a2 Alexander Schreiber

3811 bf6929a2 Alexander Schreiber
  """
3812 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3813 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3814 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3815 e873317a Guido Trotter
  REQ_BGL = False
3816 e873317a Guido Trotter
3817 17c3f802 Guido Trotter
  def CheckArguments(self):
3818 17c3f802 Guido Trotter
    """Check the arguments.
3819 17c3f802 Guido Trotter

3820 17c3f802 Guido Trotter
    """
3821 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
3822 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
3823 17c3f802 Guido Trotter
3824 e873317a Guido Trotter
  def ExpandNames(self):
3825 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3826 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3827 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3828 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3829 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3830 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3831 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3832 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3833 bf6929a2 Alexander Schreiber
3834 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3835 bf6929a2 Alexander Schreiber
    """Build hooks env.
3836 bf6929a2 Alexander Schreiber

3837 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3838 bf6929a2 Alexander Schreiber

3839 bf6929a2 Alexander Schreiber
    """
3840 bf6929a2 Alexander Schreiber
    env = {
3841 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3842 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3843 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
3844 bf6929a2 Alexander Schreiber
      }
3845 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3846 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3847 bf6929a2 Alexander Schreiber
    return env, nl, nl
3848 bf6929a2 Alexander Schreiber
3849 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3850 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3851 bf6929a2 Alexander Schreiber

3852 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3853 bf6929a2 Alexander Schreiber

3854 bf6929a2 Alexander Schreiber
    """
3855 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3856 e873317a Guido Trotter
    assert self.instance is not None, \
3857 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3858 bf6929a2 Alexander Schreiber
3859 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3860 7527a8a4 Iustin Pop
3861 5bbd3f7f Michael Hanselmann
    # check bridges existence
3862 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3863 bf6929a2 Alexander Schreiber
3864 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3865 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3866 bf6929a2 Alexander Schreiber

3867 bf6929a2 Alexander Schreiber
    """
3868 bf6929a2 Alexander Schreiber
    instance = self.instance
3869 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3870 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3871 bf6929a2 Alexander Schreiber
3872 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3873 bf6929a2 Alexander Schreiber
3874 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3875 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3876 ae48ac32 Iustin Pop
      for disk in instance.disks:
3877 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3878 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3879 17c3f802 Guido Trotter
                                             reboot_type,
3880 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
3881 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3882 bf6929a2 Alexander Schreiber
    else:
3883 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
3884 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
3885 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3886 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3887 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3888 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3889 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3890 dd279568 Iustin Pop
      if msg:
3891 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3892 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3893 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3894 bf6929a2 Alexander Schreiber
3895 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3896 bf6929a2 Alexander Schreiber
3897 bf6929a2 Alexander Schreiber
3898 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3899 a8083063 Iustin Pop
  """Shutdown an instance.
3900 a8083063 Iustin Pop

3901 a8083063 Iustin Pop
  """
3902 a8083063 Iustin Pop
  HPATH = "instance-stop"
3903 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3904 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3905 e873317a Guido Trotter
  REQ_BGL = False
3906 e873317a Guido Trotter
3907 6263189c Guido Trotter
  def CheckArguments(self):
3908 6263189c Guido Trotter
    """Check the arguments.
3909 6263189c Guido Trotter

3910 6263189c Guido Trotter
    """
3911 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
3912 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
3913 6263189c Guido Trotter
3914 e873317a Guido Trotter
  def ExpandNames(self):
3915 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3916 a8083063 Iustin Pop
3917 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3918 a8083063 Iustin Pop
    """Build hooks env.
3919 a8083063 Iustin Pop

3920 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3921 a8083063 Iustin Pop

3922 a8083063 Iustin Pop
    """
3923 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3924 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
3925 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3926 a8083063 Iustin Pop
    return env, nl, nl
3927 a8083063 Iustin Pop
3928 a8083063 Iustin Pop
  def CheckPrereq(self):
3929 a8083063 Iustin Pop
    """Check prerequisites.
3930 a8083063 Iustin Pop

3931 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3932 a8083063 Iustin Pop

3933 a8083063 Iustin Pop
    """
3934 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3935 e873317a Guido Trotter
    assert self.instance is not None, \
3936 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3937 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3938 a8083063 Iustin Pop
3939 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3940 a8083063 Iustin Pop
    """Shutdown the instance.
3941 a8083063 Iustin Pop

3942 a8083063 Iustin Pop
    """
3943 a8083063 Iustin Pop
    instance = self.instance
3944 a8083063 Iustin Pop
    node_current = instance.primary_node
3945 6263189c Guido Trotter
    timeout = self.timeout
3946 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3947 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
3948 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3949 1fae010f Iustin Pop
    if msg:
3950 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3951 a8083063 Iustin Pop
3952 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3953 a8083063 Iustin Pop
3954 a8083063 Iustin Pop
3955 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3956 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3957 fe7b0351 Michael Hanselmann

3958 fe7b0351 Michael Hanselmann
  """
3959 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3960 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3961 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3962 4e0b4d2d Guido Trotter
  REQ_BGL = False
3963 4e0b4d2d Guido Trotter
3964 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3965 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3966 fe7b0351 Michael Hanselmann
3967 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3968 fe7b0351 Michael Hanselmann
    """Build hooks env.
3969 fe7b0351 Michael Hanselmann

3970 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3971 fe7b0351 Michael Hanselmann

3972 fe7b0351 Michael Hanselmann
    """
3973 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3974 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3975 fe7b0351 Michael Hanselmann
    return env, nl, nl
3976 fe7b0351 Michael Hanselmann
3977 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3978 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3979 fe7b0351 Michael Hanselmann

3980 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3981 fe7b0351 Michael Hanselmann

3982 fe7b0351 Michael Hanselmann
    """
3983 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3984 4e0b4d2d Guido Trotter
    assert instance is not None, \
3985 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3986 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3987 4e0b4d2d Guido Trotter
3988 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3989 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3990 5c983ee5 Iustin Pop
                                 self.op.instance_name,
3991 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3992 0d68c45d Iustin Pop
    if instance.admin_up:
3993 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3994 5c983ee5 Iustin Pop
                                 self.op.instance_name,
3995 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
3996 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3997 72737a7f Iustin Pop
                                              instance.name,
3998 72737a7f Iustin Pop
                                              instance.hypervisor)
3999 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4000 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4001 7ad1af4a Iustin Pop
    if remote_info.payload:
4002 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4003 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
4004 5c983ee5 Iustin Pop
                                  instance.primary_node),
4005 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4006 d0834de3 Michael Hanselmann
4007 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4008 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4009 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4010 d0834de3 Michael Hanselmann
      # OS verification
4011 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4012 cf26a87a Iustin Pop
      result = self.rpc.call_os_get(pnode, self.op.os_type)
4013 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
4014 cf26a87a Iustin Pop
                   (self.op.os_type, pnode),
4015 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_INVAL)
4016 f2c05717 Guido Trotter
      if not self.op.force_variant:
4017 f2c05717 Guido Trotter
        _CheckOSVariant(result.payload, self.op.os_type)
4018 d0834de3 Michael Hanselmann
4019 fe7b0351 Michael Hanselmann
    self.instance = instance
4020 fe7b0351 Michael Hanselmann
4021 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4022 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4023 fe7b0351 Michael Hanselmann

4024 fe7b0351 Michael Hanselmann
    """
4025 fe7b0351 Michael Hanselmann
    inst = self.instance
4026 fe7b0351 Michael Hanselmann
4027 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4028 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4029 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4030 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4031 d0834de3 Michael Hanselmann
4032 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4033 fe7b0351 Michael Hanselmann
    try:
4034 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4035 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4036 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4037 dd713605 Iustin Pop
                                             self.op.debug_level)
4038 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4039 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4040 fe7b0351 Michael Hanselmann
    finally:
4041 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4042 fe7b0351 Michael Hanselmann
4043 fe7b0351 Michael Hanselmann
4044 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4045 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4046 bd315bfa Iustin Pop

4047 bd315bfa Iustin Pop
  """
4048 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4049 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4050 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4051 bd315bfa Iustin Pop
  REQ_BGL = False
4052 bd315bfa Iustin Pop
4053 bd315bfa Iustin Pop
  def CheckArguments(self):
4054 bd315bfa Iustin Pop
    """Check the arguments.
4055 bd315bfa Iustin Pop

4056 bd315bfa Iustin Pop
    """
4057 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4058 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4059 bd315bfa Iustin Pop
    for item in self.op.disks:
4060 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4061 bd315bfa Iustin Pop
          item < 0):
4062 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4063 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4064 bd315bfa Iustin Pop
4065 bd315bfa Iustin Pop
  def ExpandNames(self):
4066 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4067 bd315bfa Iustin Pop
4068 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4069 bd315bfa Iustin Pop
    """Build hooks env.
4070 bd315bfa Iustin Pop

4071 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4072 bd315bfa Iustin Pop

4073 bd315bfa Iustin Pop
    """
4074 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4075 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4076 bd315bfa Iustin Pop
    return env, nl, nl
4077 bd315bfa Iustin Pop
4078 bd315bfa Iustin Pop
  def CheckPrereq(self):
4079 bd315bfa Iustin Pop
    """Check prerequisites.
4080 bd315bfa Iustin Pop

4081 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4082 bd315bfa Iustin Pop

4083 bd315bfa Iustin Pop
    """
4084 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4085 bd315bfa Iustin Pop
    assert instance is not None, \
4086 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4087 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4088 bd315bfa Iustin Pop
4089 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4090 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4091 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4092 bd315bfa Iustin Pop
    if instance.admin_up:
4093 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
4094 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_STATE)
4095 bd315bfa Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4096 bd315bfa Iustin Pop
                                              instance.name,
4097 bd315bfa Iustin Pop
                                              instance.hypervisor)
4098 bd315bfa Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4099 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4100 bd315bfa Iustin Pop
    if remote_info.payload:
4101 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4102 bd315bfa Iustin Pop
                                 (self.op.instance_name,
4103 5c983ee5 Iustin Pop
                                  instance.primary_node), errors.ECODE_STATE)
4104 bd315bfa Iustin Pop
4105 bd315bfa Iustin Pop
    if not self.op.disks:
4106 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4107 bd315bfa Iustin Pop
    else:
4108 bd315bfa Iustin Pop
      for idx in self.op.disks:
4109 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4110 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4111 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4112 bd315bfa Iustin Pop
4113 bd315bfa Iustin Pop
    self.instance = instance
4114 bd315bfa Iustin Pop
4115 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4116 bd315bfa Iustin Pop
    """Recreate the disks.
4117 bd315bfa Iustin Pop

4118 bd315bfa Iustin Pop
    """
4119 bd315bfa Iustin Pop
    to_skip = []
4120 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4121 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4122 bd315bfa Iustin Pop
        to_skip.append(idx)
4123 bd315bfa Iustin Pop
        continue
4124 bd315bfa Iustin Pop
4125 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4126 bd315bfa Iustin Pop
4127 bd315bfa Iustin Pop
4128 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4129 decd5f45 Iustin Pop
  """Rename an instance.
4130 decd5f45 Iustin Pop

4131 decd5f45 Iustin Pop
  """
4132 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4133 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4134 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4135 decd5f45 Iustin Pop
4136 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4137 decd5f45 Iustin Pop
    """Build hooks env.
4138 decd5f45 Iustin Pop

4139 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4140 decd5f45 Iustin Pop

4141 decd5f45 Iustin Pop
    """
4142 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4143 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4144 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4145 decd5f45 Iustin Pop
    return env, nl, nl
4146 decd5f45 Iustin Pop
4147 decd5f45 Iustin Pop
  def CheckPrereq(self):
4148 decd5f45 Iustin Pop
    """Check prerequisites.
4149 decd5f45 Iustin Pop

4150 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4151 decd5f45 Iustin Pop

4152 decd5f45 Iustin Pop
    """
4153 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4154 cf26a87a Iustin Pop
                                                self.op.instance_name)
4155 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4156 cf26a87a Iustin Pop
    assert instance is not None
4157 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4158 7527a8a4 Iustin Pop
4159 0d68c45d Iustin Pop
    if instance.admin_up:
4160 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
4161 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_STATE)
4162 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4163 72737a7f Iustin Pop
                                              instance.name,
4164 72737a7f Iustin Pop
                                              instance.hypervisor)
4165 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4166 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4167 7ad1af4a Iustin Pop
    if remote_info.payload:
4168 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
4169 decd5f45 Iustin Pop
                                 (self.op.instance_name,
4170 5c983ee5 Iustin Pop
                                  instance.primary_node), errors.ECODE_STATE)
4171 decd5f45 Iustin Pop
    self.instance = instance
4172 decd5f45 Iustin Pop
4173 decd5f45 Iustin Pop
    # new name verification
4174 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4175 decd5f45 Iustin Pop
4176 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4177 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4178 7bde3275 Guido Trotter
    if new_name in instance_list:
4179 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4180 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4181 7bde3275 Guido Trotter
4182 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4183 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4184 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4185 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4186 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4187 decd5f45 Iustin Pop
4188 decd5f45 Iustin Pop
4189 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4190 decd5f45 Iustin Pop
    """Reinstall the instance.
4191 decd5f45 Iustin Pop

4192 decd5f45 Iustin Pop
    """
4193 decd5f45 Iustin Pop
    inst = self.instance
4194 decd5f45 Iustin Pop
    old_name = inst.name
4195 decd5f45 Iustin Pop
4196 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4197 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4198 b23c4333 Manuel Franceschini
4199 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4200 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4201 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4202 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4203 decd5f45 Iustin Pop
4204 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4205 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4206 decd5f45 Iustin Pop
4207 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4208 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4209 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4210 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4211 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4212 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4213 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4214 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4215 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4216 b23c4333 Manuel Franceschini
4217 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4218 decd5f45 Iustin Pop
    try:
4219 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4220 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4221 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4222 96841384 Iustin Pop
      if msg:
4223 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4224 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4225 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4226 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4227 decd5f45 Iustin Pop
    finally:
4228 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4229 decd5f45 Iustin Pop
4230 decd5f45 Iustin Pop
4231 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4232 a8083063 Iustin Pop
  """Remove an instance.
4233 a8083063 Iustin Pop

4234 a8083063 Iustin Pop
  """
4235 a8083063 Iustin Pop
  HPATH = "instance-remove"
4236 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4237 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4238 cf472233 Guido Trotter
  REQ_BGL = False
4239 cf472233 Guido Trotter
4240 17c3f802 Guido Trotter
  def CheckArguments(self):
4241 17c3f802 Guido Trotter
    """Check the arguments.
4242 17c3f802 Guido Trotter

4243 17c3f802 Guido Trotter
    """
4244 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4245 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4246 17c3f802 Guido Trotter
4247 cf472233 Guido Trotter
  def ExpandNames(self):
4248 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4249 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4250 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4251 cf472233 Guido Trotter
4252 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4253 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4254 cf472233 Guido Trotter
      self._LockInstancesNodes()
4255 a8083063 Iustin Pop
4256 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4257 a8083063 Iustin Pop
    """Build hooks env.
4258 a8083063 Iustin Pop

4259 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4260 a8083063 Iustin Pop

4261 a8083063 Iustin Pop
    """
4262 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4263 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4264 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4265 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4266 abd8e836 Iustin Pop
    return env, nl, nl_post
4267 a8083063 Iustin Pop
4268 a8083063 Iustin Pop
  def CheckPrereq(self):
4269 a8083063 Iustin Pop
    """Check prerequisites.
4270 a8083063 Iustin Pop

4271 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4272 a8083063 Iustin Pop

4273 a8083063 Iustin Pop
    """
4274 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4275 cf472233 Guido Trotter
    assert self.instance is not None, \
4276 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4277 a8083063 Iustin Pop
4278 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4279 a8083063 Iustin Pop
    """Remove the instance.
4280 a8083063 Iustin Pop

4281 a8083063 Iustin Pop
    """
4282 a8083063 Iustin Pop
    instance = self.instance
4283 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4284 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4285 a8083063 Iustin Pop
4286 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4287 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4288 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4289 1fae010f Iustin Pop
    if msg:
4290 1d67656e Iustin Pop
      if self.op.ignore_failures:
4291 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4292 1d67656e Iustin Pop
      else:
4293 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4294 1fae010f Iustin Pop
                                 " node %s: %s" %
4295 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4296 a8083063 Iustin Pop
4297 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4298 a8083063 Iustin Pop
4299 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4300 1d67656e Iustin Pop
      if self.op.ignore_failures:
4301 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4302 1d67656e Iustin Pop
      else:
4303 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4304 a8083063 Iustin Pop
4305 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4306 a8083063 Iustin Pop
4307 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4308 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4309 a8083063 Iustin Pop
4310 a8083063 Iustin Pop
4311 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4312 a8083063 Iustin Pop
  """Logical unit for querying instances.
4313 a8083063 Iustin Pop

4314 a8083063 Iustin Pop
  """
4315 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4316 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4317 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4318 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4319 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4320 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4321 5b460366 Iustin Pop
                                    "admin_state",
4322 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4323 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4324 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4325 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4326 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4327 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4328 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4329 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4330 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4331 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4332 19bed813 Iustin Pop
                                    "hvparams",
4333 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4334 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4335 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4336 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4337 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4338 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4339 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4340 31bf511f Iustin Pop
4341 a8083063 Iustin Pop
4342 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4343 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4344 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4345 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4346 a8083063 Iustin Pop
4347 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4348 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4349 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4350 7eb9d8f7 Guido Trotter
4351 57a2fb91 Iustin Pop
    if self.op.names:
4352 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4353 7eb9d8f7 Guido Trotter
    else:
4354 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4355 7eb9d8f7 Guido Trotter
4356 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4357 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4358 57a2fb91 Iustin Pop
    if self.do_locking:
4359 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4360 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4361 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4362 7eb9d8f7 Guido Trotter
4363 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4364 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4365 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4366 7eb9d8f7 Guido Trotter
4367 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4368 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4369 7eb9d8f7 Guido Trotter

4370 7eb9d8f7 Guido Trotter
    """
4371 57a2fb91 Iustin Pop
    pass
4372 069dcc86 Iustin Pop
4373 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4374 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4375 a8083063 Iustin Pop

4376 a8083063 Iustin Pop
    """
4377 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4378 7260cfbe Iustin Pop
    # way too many branches here
4379 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4380 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4381 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4382 a7f5dc98 Iustin Pop
      if self.do_locking:
4383 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4384 a7f5dc98 Iustin Pop
      else:
4385 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4386 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4387 57a2fb91 Iustin Pop
    else:
4388 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4389 a7f5dc98 Iustin Pop
      if self.do_locking:
4390 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4391 a7f5dc98 Iustin Pop
      else:
4392 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4393 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4394 a7f5dc98 Iustin Pop
      if missing:
4395 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4396 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4397 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4398 c1f1cbb2 Iustin Pop
4399 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4400 a8083063 Iustin Pop
4401 a8083063 Iustin Pop
    # begin data gathering
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4404 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4405 a8083063 Iustin Pop
4406 a8083063 Iustin Pop
    bad_nodes = []
4407 cbfc4681 Iustin Pop
    off_nodes = []
4408 ec79568d Iustin Pop
    if self.do_node_query:
4409 a8083063 Iustin Pop
      live_data = {}
4410 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4411 a8083063 Iustin Pop
      for name in nodes:
4412 a8083063 Iustin Pop
        result = node_data[name]
4413 cbfc4681 Iustin Pop
        if result.offline:
4414 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4415 cbfc4681 Iustin Pop
          off_nodes.append(name)
4416 3cebe102 Michael Hanselmann
        if result.fail_msg:
4417 a8083063 Iustin Pop
          bad_nodes.append(name)
4418 781de953 Iustin Pop
        else:
4419 2fa74ef4 Iustin Pop
          if result.payload:
4420 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4421 2fa74ef4 Iustin Pop
          # else no instance is alive
4422 a8083063 Iustin Pop
    else:
4423 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4424 a8083063 Iustin Pop
4425 a8083063 Iustin Pop
    # end data gathering
4426 a8083063 Iustin Pop
4427 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4428 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4429 a8083063 Iustin Pop
    output = []
4430 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4431 a8083063 Iustin Pop
    for instance in instance_list:
4432 a8083063 Iustin Pop
      iout = []
4433 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4434 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4435 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4436 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4437 a8083063 Iustin Pop
      for field in self.op.output_fields:
4438 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4439 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4440 19bed813 Iustin Pop
          val = getattr(instance, field)
4441 a8083063 Iustin Pop
        elif field == "pnode":
4442 a8083063 Iustin Pop
          val = instance.primary_node
4443 a8083063 Iustin Pop
        elif field == "snodes":
4444 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4445 a8083063 Iustin Pop
        elif field == "admin_state":
4446 0d68c45d Iustin Pop
          val = instance.admin_up
4447 a8083063 Iustin Pop
        elif field == "oper_state":
4448 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4449 8a23d2d3 Iustin Pop
            val = None
4450 a8083063 Iustin Pop
          else:
4451 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4452 d8052456 Iustin Pop
        elif field == "status":
4453 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4454 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4455 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4456 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4457 d8052456 Iustin Pop
          else:
4458 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4459 d8052456 Iustin Pop
            if running:
4460 0d68c45d Iustin Pop
              if instance.admin_up:
4461 d8052456 Iustin Pop
                val = "running"
4462 d8052456 Iustin Pop
              else:
4463 d8052456 Iustin Pop
                val = "ERROR_up"
4464 d8052456 Iustin Pop
            else:
4465 0d68c45d Iustin Pop
              if instance.admin_up:
4466 d8052456 Iustin Pop
                val = "ERROR_down"
4467 d8052456 Iustin Pop
              else:
4468 d8052456 Iustin Pop
                val = "ADMIN_down"
4469 a8083063 Iustin Pop
        elif field == "oper_ram":
4470 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4471 8a23d2d3 Iustin Pop
            val = None
4472 a8083063 Iustin Pop
          elif instance.name in live_data:
4473 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4474 a8083063 Iustin Pop
          else:
4475 a8083063 Iustin Pop
            val = "-"
4476 c1ce76bb Iustin Pop
        elif field == "vcpus":
4477 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4478 a8083063 Iustin Pop
        elif field == "disk_template":
4479 a8083063 Iustin Pop
          val = instance.disk_template
4480 a8083063 Iustin Pop
        elif field == "ip":
4481 39a02558 Guido Trotter
          if instance.nics:
4482 39a02558 Guido Trotter
            val = instance.nics[0].ip
4483 39a02558 Guido Trotter
          else:
4484 39a02558 Guido Trotter
            val = None
4485 638c6349 Guido Trotter
        elif field == "nic_mode":
4486 638c6349 Guido Trotter
          if instance.nics:
4487 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4488 638c6349 Guido Trotter
          else:
4489 638c6349 Guido Trotter
            val = None
4490 638c6349 Guido Trotter
        elif field == "nic_link":
4491 39a02558 Guido Trotter
          if instance.nics:
4492 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4493 638c6349 Guido Trotter
          else:
4494 638c6349 Guido Trotter
            val = None
4495 638c6349 Guido Trotter
        elif field == "bridge":
4496 638c6349 Guido Trotter
          if (instance.nics and
4497 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4498 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4499 39a02558 Guido Trotter
          else:
4500 39a02558 Guido Trotter
            val = None
4501 a8083063 Iustin Pop
        elif field == "mac":
4502 39a02558 Guido Trotter
          if instance.nics:
4503 39a02558 Guido Trotter
            val = instance.nics[0].mac
4504 39a02558 Guido Trotter
          else:
4505 39a02558 Guido Trotter
            val = None
4506 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4507 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4508 ad24e046 Iustin Pop
          try:
4509 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4510 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4511 8a23d2d3 Iustin Pop
            val = None
4512 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4513 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4514 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4515 130a6a6f Iustin Pop
        elif field == "tags":
4516 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4517 338e51e8 Iustin Pop
        elif field == "hvparams":
4518 338e51e8 Iustin Pop
          val = i_hv
4519 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4520 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4521 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4522 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4523 338e51e8 Iustin Pop
        elif field == "beparams":
4524 338e51e8 Iustin Pop
          val = i_be
4525 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4526 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4527 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4528 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4529 71c1af58 Iustin Pop
          # matches a variable list
4530 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4531 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4532 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4533 71c1af58 Iustin Pop
              val = len(instance.disks)
4534 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4535 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4536 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4537 3e0cea06 Iustin Pop
              try:
4538 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4539 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4540 71c1af58 Iustin Pop
                val = None
4541 71c1af58 Iustin Pop
            else:
4542 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4543 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4544 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4545 71c1af58 Iustin Pop
              val = len(instance.nics)
4546 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4547 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4548 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4549 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4550 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4551 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4552 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4553 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4554 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4555 638c6349 Guido Trotter
              val = []
4556 638c6349 Guido Trotter
              for nicp in i_nicp:
4557 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4558 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4559 638c6349 Guido Trotter
                else:
4560 638c6349 Guido Trotter
                  val.append(None)
4561 71c1af58 Iustin Pop
            else:
4562 71c1af58 Iustin Pop
              # index-based item
4563 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4564 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4565 71c1af58 Iustin Pop
                val = None
4566 71c1af58 Iustin Pop
              else:
4567 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4568 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4569 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4570 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4571 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4572 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4573 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4574 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4575 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4576 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4577 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4578 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4579 638c6349 Guido Trotter
                  else:
4580 638c6349 Guido Trotter
                    val = None
4581 71c1af58 Iustin Pop
                else:
4582 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4583 71c1af58 Iustin Pop
          else:
4584 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4585 c1ce76bb Iustin Pop
                           field)
4586 a8083063 Iustin Pop
        else:
4587 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4588 a8083063 Iustin Pop
        iout.append(val)
4589 a8083063 Iustin Pop
      output.append(iout)
4590 a8083063 Iustin Pop
4591 a8083063 Iustin Pop
    return output
4592 a8083063 Iustin Pop
4593 a8083063 Iustin Pop
4594 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4595 a8083063 Iustin Pop
  """Failover an instance.
4596 a8083063 Iustin Pop

4597 a8083063 Iustin Pop
  """
4598 a8083063 Iustin Pop
  HPATH = "instance-failover"
4599 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4600 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4601 c9e5c064 Guido Trotter
  REQ_BGL = False
4602 c9e5c064 Guido Trotter
4603 17c3f802 Guido Trotter
  def CheckArguments(self):
4604 17c3f802 Guido Trotter
    """Check the arguments.
4605 17c3f802 Guido Trotter

4606 17c3f802 Guido Trotter
    """
4607 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4608 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4609 17c3f802 Guido Trotter
4610 c9e5c064 Guido Trotter
  def ExpandNames(self):
4611 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4612 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4613 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4614 c9e5c064 Guido Trotter
4615 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4616 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4617 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4618 a8083063 Iustin Pop
4619 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4620 a8083063 Iustin Pop
    """Build hooks env.
4621 a8083063 Iustin Pop

4622 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4623 a8083063 Iustin Pop

4624 a8083063 Iustin Pop
    """
4625 08eec276 Iustin Pop
    instance = self.instance
4626 08eec276 Iustin Pop
    source_node = instance.primary_node
4627 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4628 a8083063 Iustin Pop
    env = {
4629 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4630 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4631 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4632 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4633 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4634 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4635 a8083063 Iustin Pop
      }
4636 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4637 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4638 abd8e836 Iustin Pop
    nl_post = list(nl)
4639 abd8e836 Iustin Pop
    nl_post.append(source_node)
4640 abd8e836 Iustin Pop
    return env, nl, nl_post
4641 a8083063 Iustin Pop
4642 a8083063 Iustin Pop
  def CheckPrereq(self):
4643 a8083063 Iustin Pop
    """Check prerequisites.
4644 a8083063 Iustin Pop

4645 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4646 a8083063 Iustin Pop

4647 a8083063 Iustin Pop
    """
4648 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4649 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4650 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4651 a8083063 Iustin Pop
4652 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4653 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4654 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4655 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
4656 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4657 2a710df1 Michael Hanselmann
4658 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4659 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4660 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4661 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4662 2a710df1 Michael Hanselmann
4663 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4664 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4665 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4666 d27776f0 Iustin Pop
    if instance.admin_up:
4667 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4668 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4669 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4670 d27776f0 Iustin Pop
                           instance.hypervisor)
4671 d27776f0 Iustin Pop
    else:
4672 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4673 d27776f0 Iustin Pop
                   " instance will not be started")
4674 3a7c308e Guido Trotter
4675 a8083063 Iustin Pop
    # check bridge existance
4676 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4677 a8083063 Iustin Pop
4678 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4679 a8083063 Iustin Pop
    """Failover an instance.
4680 a8083063 Iustin Pop

4681 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4682 a8083063 Iustin Pop
    starting it on the secondary.
4683 a8083063 Iustin Pop

4684 a8083063 Iustin Pop
    """
4685 a8083063 Iustin Pop
    instance = self.instance
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
    source_node = instance.primary_node
4688 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4689 a8083063 Iustin Pop
4690 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4691 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4692 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4693 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4694 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
4695 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
4696 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
4697 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
4698 1df79ce6 Michael Hanselmann
    else:
4699 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
4700 a8083063 Iustin Pop
4701 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4702 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4703 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4704 a8083063 Iustin Pop
4705 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4706 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4707 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4708 1fae010f Iustin Pop
    if msg:
4709 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4710 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4711 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4712 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4713 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4714 24a40d57 Iustin Pop
      else:
4715 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4716 1fae010f Iustin Pop
                                 " node %s: %s" %
4717 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4718 a8083063 Iustin Pop
4719 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
4720 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4721 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
4722 a8083063 Iustin Pop
4723 a8083063 Iustin Pop
    instance.primary_node = target_node
4724 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
4725 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4726 a8083063 Iustin Pop
4727 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
4728 0d68c45d Iustin Pop
    if instance.admin_up:
4729 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
4730 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
4731 9a4f63d1 Iustin Pop
                   instance.name, target_node)
4732 12a0cfbe Guido Trotter
4733 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4734 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
4735 12a0cfbe Guido Trotter
      if not disks_ok:
4736 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4737 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
4738 a8083063 Iustin Pop
4739 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
4740 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4741 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4742 dd279568 Iustin Pop
      if msg:
4743 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4744 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4745 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
4746 a8083063 Iustin Pop
4747 a8083063 Iustin Pop
4748 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
4749 53c776b5 Iustin Pop
  """Migrate an instance.
4750 53c776b5 Iustin Pop

4751 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
4752 53c776b5 Iustin Pop
  which is done with shutdown.
4753 53c776b5 Iustin Pop

4754 53c776b5 Iustin Pop
  """
4755 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
4756 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4757 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
4758 53c776b5 Iustin Pop
4759 53c776b5 Iustin Pop
  REQ_BGL = False
4760 53c776b5 Iustin Pop
4761 53c776b5 Iustin Pop
  def ExpandNames(self):
4762 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
4763 3e06e001 Michael Hanselmann
4764 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4765 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4766 53c776b5 Iustin Pop
4767 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4768 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
4769 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
4770 3e06e001 Michael Hanselmann
4771 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
4772 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
4773 53c776b5 Iustin Pop
      self._LockInstancesNodes()
4774 53c776b5 Iustin Pop
4775 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
4776 53c776b5 Iustin Pop
    """Build hooks env.
4777 53c776b5 Iustin Pop

4778 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4779 53c776b5 Iustin Pop

4780 53c776b5 Iustin Pop
    """
4781 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
4782 08eec276 Iustin Pop
    source_node = instance.primary_node
4783 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4784 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
4785 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
4786 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4787 08eec276 Iustin Pop
    env.update({
4788 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
4789 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
4790 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
4791 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
4792 08eec276 Iustin Pop
        })
4793 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4794 abd8e836 Iustin Pop
    nl_post = list(nl)
4795 abd8e836 Iustin Pop
    nl_post.append(source_node)
4796 abd8e836 Iustin Pop
    return env, nl, nl_post
4797 53c776b5 Iustin Pop
4798 3e06e001 Michael Hanselmann
4799 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
4800 313bcead Iustin Pop
  """Move an instance by data-copying.
4801 313bcead Iustin Pop

4802 313bcead Iustin Pop
  """
4803 313bcead Iustin Pop
  HPATH = "instance-move"
4804 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4805 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
4806 313bcead Iustin Pop
  REQ_BGL = False
4807 313bcead Iustin Pop
4808 17c3f802 Guido Trotter
  def CheckArguments(self):
4809 17c3f802 Guido Trotter
    """Check the arguments.
4810 17c3f802 Guido Trotter

4811 17c3f802 Guido Trotter
    """
4812 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4813 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4814 17c3f802 Guido Trotter
4815 313bcead Iustin Pop
  def ExpandNames(self):
4816 313bcead Iustin Pop
    self._ExpandAndLockInstance()
4817 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
4818 313bcead Iustin Pop
    self.op.target_node = target_node
4819 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4820 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4821 313bcead Iustin Pop
4822 313bcead Iustin Pop
  def DeclareLocks(self, level):
4823 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
4824 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
4825 313bcead Iustin Pop
4826 313bcead Iustin Pop
  def BuildHooksEnv(self):
4827 313bcead Iustin Pop
    """Build hooks env.
4828 313bcead Iustin Pop

4829 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4830 313bcead Iustin Pop

4831 313bcead Iustin Pop
    """
4832 313bcead Iustin Pop
    env = {
4833 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
4834 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4835 313bcead Iustin Pop
      }
4836 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4837 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4838 313bcead Iustin Pop
                                       self.op.target_node]
4839 313bcead Iustin Pop
    return env, nl, nl
4840 313bcead Iustin Pop
4841 313bcead Iustin Pop
  def CheckPrereq(self):
4842 313bcead Iustin Pop
    """Check prerequisites.
4843 313bcead Iustin Pop

4844 313bcead Iustin Pop
    This checks that the instance is in the cluster.
4845 313bcead Iustin Pop

4846 313bcead Iustin Pop
    """
4847 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4848 313bcead Iustin Pop
    assert self.instance is not None, \
4849 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4850 313bcead Iustin Pop
4851 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
4852 313bcead Iustin Pop
    assert node is not None, \
4853 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
4854 313bcead Iustin Pop
4855 313bcead Iustin Pop
    self.target_node = target_node = node.name
4856 313bcead Iustin Pop
4857 313bcead Iustin Pop
    if target_node == instance.primary_node:
4858 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4859 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
4860 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4861 313bcead Iustin Pop
4862 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4863 313bcead Iustin Pop
4864 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
4865 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4866 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4867 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
4868 313bcead Iustin Pop
4869 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
4870 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4871 313bcead Iustin Pop
4872 313bcead Iustin Pop
    if instance.admin_up:
4873 313bcead Iustin Pop
      # check memory requirements on the secondary node
4874 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4875 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4876 313bcead Iustin Pop
                           instance.hypervisor)
4877 313bcead Iustin Pop
    else:
4878 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4879 313bcead Iustin Pop
                   " instance will not be started")
4880 313bcead Iustin Pop
4881 313bcead Iustin Pop
    # check bridge existance
4882 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4883 313bcead Iustin Pop
4884 313bcead Iustin Pop
  def Exec(self, feedback_fn):
4885 313bcead Iustin Pop
    """Move an instance.
4886 313bcead Iustin Pop

4887 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
4888 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
4889 313bcead Iustin Pop

4890 313bcead Iustin Pop
    """
4891 313bcead Iustin Pop
    instance = self.instance
4892 313bcead Iustin Pop
4893 313bcead Iustin Pop
    source_node = instance.primary_node
4894 313bcead Iustin Pop
    target_node = self.target_node
4895 313bcead Iustin Pop
4896 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
4897 313bcead Iustin Pop
                 instance.name, source_node)
4898 313bcead Iustin Pop
4899 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4900 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4901 313bcead Iustin Pop
    msg = result.fail_msg
4902 313bcead Iustin Pop
    if msg:
4903 313bcead Iustin Pop
      if self.op.ignore_consistency:
4904 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4905 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
4906 313bcead Iustin Pop
                             " %s is down. Error details: %s",
4907 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
4908 313bcead Iustin Pop
      else:
4909 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4910 313bcead Iustin Pop
                                 " node %s: %s" %
4911 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
4912 313bcead Iustin Pop
4913 313bcead Iustin Pop
    # create the target disks
4914 313bcead Iustin Pop
    try:
4915 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
4916 313bcead Iustin Pop
    except errors.OpExecError:
4917 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4918 313bcead Iustin Pop
      try:
4919 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4920 313bcead Iustin Pop
      finally:
4921 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4922 313bcead Iustin Pop
        raise
4923 313bcead Iustin Pop
4924 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4925 313bcead Iustin Pop
4926 313bcead Iustin Pop
    errs = []
4927 313bcead Iustin Pop
    # activate, get path, copy the data over
4928 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
4929 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
4930 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4931 313bcead Iustin Pop
                                               instance.name, True)
4932 313bcead Iustin Pop
      if result.fail_msg:
4933 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
4934 313bcead Iustin Pop
                        idx, result.fail_msg)
4935 313bcead Iustin Pop
        errs.append(result.fail_msg)
4936 313bcead Iustin Pop
        break
4937 313bcead Iustin Pop
      dev_path = result.payload
4938 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
4939 313bcead Iustin Pop
                                             target_node, dev_path,
4940 313bcead Iustin Pop
                                             cluster_name)
4941 313bcead Iustin Pop
      if result.fail_msg:
4942 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
4943 313bcead Iustin Pop
                        idx, result.fail_msg)
4944 313bcead Iustin Pop
        errs.append(result.fail_msg)
4945 313bcead Iustin Pop
        break
4946 313bcead Iustin Pop
4947 313bcead Iustin Pop
    if errs:
4948 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
4949 313bcead Iustin Pop
      try:
4950 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4951 313bcead Iustin Pop
      finally:
4952 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4953 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
4954 313bcead Iustin Pop
                                 (",".join(errs),))
4955 313bcead Iustin Pop
4956 313bcead Iustin Pop
    instance.primary_node = target_node
4957 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4958 313bcead Iustin Pop
4959 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
4960 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
4961 313bcead Iustin Pop
4962 313bcead Iustin Pop
    # Only start the instance if it's marked as up
4963 313bcead Iustin Pop
    if instance.admin_up:
4964 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
4965 313bcead Iustin Pop
                   instance.name, target_node)
4966 313bcead Iustin Pop
4967 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4968 313bcead Iustin Pop
                                           ignore_secondaries=True)
4969 313bcead Iustin Pop
      if not disks_ok:
4970 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4971 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
4972 313bcead Iustin Pop
4973 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4974 313bcead Iustin Pop
      msg = result.fail_msg
4975 313bcead Iustin Pop
      if msg:
4976 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4977 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4978 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
4979 313bcead Iustin Pop
4980 313bcead Iustin Pop
4981 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
4982 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
4983 80cb875c Michael Hanselmann

4984 80cb875c Michael Hanselmann
  """
4985 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
4986 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
4987 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
4988 80cb875c Michael Hanselmann
  REQ_BGL = False
4989 80cb875c Michael Hanselmann
4990 80cb875c Michael Hanselmann
  def ExpandNames(self):
4991 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4992 80cb875c Michael Hanselmann
4993 80cb875c Michael Hanselmann
    self.needed_locks = {
4994 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
4995 80cb875c Michael Hanselmann
      }
4996 80cb875c Michael Hanselmann
4997 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4998 80cb875c Michael Hanselmann
4999 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5000 80cb875c Michael Hanselmann
    names = []
5001 80cb875c Michael Hanselmann
    tasklets = []
5002 80cb875c Michael Hanselmann
5003 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5004 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5005 80cb875c Michael Hanselmann
      names.append(inst.name)
5006 80cb875c Michael Hanselmann
5007 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5008 80cb875c Michael Hanselmann
5009 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5010 80cb875c Michael Hanselmann
5011 80cb875c Michael Hanselmann
    # Declare instance locks
5012 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5013 80cb875c Michael Hanselmann
5014 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5015 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5016 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5017 80cb875c Michael Hanselmann
5018 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5019 80cb875c Michael Hanselmann
    """Build hooks env.
5020 80cb875c Michael Hanselmann

5021 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5022 80cb875c Michael Hanselmann

5023 80cb875c Michael Hanselmann
    """
5024 80cb875c Michael Hanselmann
    env = {
5025 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5026 80cb875c Michael Hanselmann
      }
5027 80cb875c Michael Hanselmann
5028 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5029 80cb875c Michael Hanselmann
5030 80cb875c Michael Hanselmann
    return (env, nl, nl)
5031 80cb875c Michael Hanselmann
5032 80cb875c Michael Hanselmann
5033 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5034 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5035 3e06e001 Michael Hanselmann
    """Initializes this class.
5036 3e06e001 Michael Hanselmann

5037 3e06e001 Michael Hanselmann
    """
5038 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5039 464243a7 Michael Hanselmann
5040 3e06e001 Michael Hanselmann
    # Parameters
5041 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5042 3e06e001 Michael Hanselmann
    self.live = live
5043 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5044 3e06e001 Michael Hanselmann
5045 53c776b5 Iustin Pop
  def CheckPrereq(self):
5046 53c776b5 Iustin Pop
    """Check prerequisites.
5047 53c776b5 Iustin Pop

5048 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5049 53c776b5 Iustin Pop

5050 53c776b5 Iustin Pop
    """
5051 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5052 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5053 cf26a87a Iustin Pop
    assert instance is not None
5054 53c776b5 Iustin Pop
5055 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5056 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5057 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5058 53c776b5 Iustin Pop
5059 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5060 53c776b5 Iustin Pop
    if not secondary_nodes:
5061 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5062 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5063 53c776b5 Iustin Pop
5064 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5065 53c776b5 Iustin Pop
5066 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5067 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5068 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5069 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5070 53c776b5 Iustin Pop
                         instance.hypervisor)
5071 53c776b5 Iustin Pop
5072 53c776b5 Iustin Pop
    # check bridge existance
5073 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5074 53c776b5 Iustin Pop
5075 3e06e001 Michael Hanselmann
    if not self.cleanup:
5076 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5077 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5078 53c776b5 Iustin Pop
                                                 instance)
5079 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5080 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5081 53c776b5 Iustin Pop
5082 53c776b5 Iustin Pop
    self.instance = instance
5083 53c776b5 Iustin Pop
5084 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5085 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5086 53c776b5 Iustin Pop

5087 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5088 53c776b5 Iustin Pop

5089 53c776b5 Iustin Pop
    """
5090 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5091 53c776b5 Iustin Pop
    all_done = False
5092 53c776b5 Iustin Pop
    while not all_done:
5093 53c776b5 Iustin Pop
      all_done = True
5094 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5095 53c776b5 Iustin Pop
                                            self.nodes_ip,
5096 53c776b5 Iustin Pop
                                            self.instance.disks)
5097 53c776b5 Iustin Pop
      min_percent = 100
5098 53c776b5 Iustin Pop
      for node, nres in result.items():
5099 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5100 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5101 53c776b5 Iustin Pop
        all_done = all_done and node_done
5102 53c776b5 Iustin Pop
        if node_percent is not None:
5103 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5104 53c776b5 Iustin Pop
      if not all_done:
5105 53c776b5 Iustin Pop
        if min_percent < 100:
5106 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5107 53c776b5 Iustin Pop
        time.sleep(2)
5108 53c776b5 Iustin Pop
5109 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5110 53c776b5 Iustin Pop
    """Demote a node to secondary.
5111 53c776b5 Iustin Pop

5112 53c776b5 Iustin Pop
    """
5113 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5114 53c776b5 Iustin Pop
5115 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5116 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5117 53c776b5 Iustin Pop
5118 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5119 53c776b5 Iustin Pop
                                          self.instance.disks)
5120 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5121 53c776b5 Iustin Pop
5122 53c776b5 Iustin Pop
  def _GoStandalone(self):
5123 53c776b5 Iustin Pop
    """Disconnect from the network.
5124 53c776b5 Iustin Pop

5125 53c776b5 Iustin Pop
    """
5126 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5127 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5128 53c776b5 Iustin Pop
                                               self.instance.disks)
5129 53c776b5 Iustin Pop
    for node, nres in result.items():
5130 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5131 53c776b5 Iustin Pop
5132 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5133 53c776b5 Iustin Pop
    """Reconnect to the network.
5134 53c776b5 Iustin Pop

5135 53c776b5 Iustin Pop
    """
5136 53c776b5 Iustin Pop
    if multimaster:
5137 53c776b5 Iustin Pop
      msg = "dual-master"
5138 53c776b5 Iustin Pop
    else:
5139 53c776b5 Iustin Pop
      msg = "single-master"
5140 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5141 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5142 53c776b5 Iustin Pop
                                           self.instance.disks,
5143 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5144 53c776b5 Iustin Pop
    for node, nres in result.items():
5145 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5146 53c776b5 Iustin Pop
5147 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5148 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5149 53c776b5 Iustin Pop

5150 53c776b5 Iustin Pop
    The cleanup is done by:
5151 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5152 53c776b5 Iustin Pop
        (and update the config if needed)
5153 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5154 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5155 53c776b5 Iustin Pop
      - disconnect from the network
5156 53c776b5 Iustin Pop
      - change disks into single-master mode
5157 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5158 53c776b5 Iustin Pop

5159 53c776b5 Iustin Pop
    """
5160 53c776b5 Iustin Pop
    instance = self.instance
5161 53c776b5 Iustin Pop
    target_node = self.target_node
5162 53c776b5 Iustin Pop
    source_node = self.source_node
5163 53c776b5 Iustin Pop
5164 53c776b5 Iustin Pop
    # check running on only one node
5165 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5166 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5167 53c776b5 Iustin Pop
                     " a bad state)")
5168 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5169 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5170 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5171 53c776b5 Iustin Pop
5172 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5173 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5174 53c776b5 Iustin Pop
5175 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5176 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5177 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5178 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5179 53c776b5 Iustin Pop
                               " and restart this operation.")
5180 53c776b5 Iustin Pop
5181 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5182 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5183 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5184 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5185 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5186 53c776b5 Iustin Pop
5187 53c776b5 Iustin Pop
    if runningon_target:
5188 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5189 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5190 53c776b5 Iustin Pop
                       " updating config" % target_node)
5191 53c776b5 Iustin Pop
      instance.primary_node = target_node
5192 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5193 53c776b5 Iustin Pop
      demoted_node = source_node
5194 53c776b5 Iustin Pop
    else:
5195 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5196 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5197 53c776b5 Iustin Pop
      demoted_node = target_node
5198 53c776b5 Iustin Pop
5199 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5200 53c776b5 Iustin Pop
    try:
5201 53c776b5 Iustin Pop
      self._WaitUntilSync()
5202 53c776b5 Iustin Pop
    except errors.OpExecError:
5203 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5204 53c776b5 Iustin Pop
      # won't be able to sync
5205 53c776b5 Iustin Pop
      pass
5206 53c776b5 Iustin Pop
    self._GoStandalone()
5207 53c776b5 Iustin Pop
    self._GoReconnect(False)
5208 53c776b5 Iustin Pop
    self._WaitUntilSync()
5209 53c776b5 Iustin Pop
5210 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5211 53c776b5 Iustin Pop
5212 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5213 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5214 6906a9d8 Guido Trotter

5215 6906a9d8 Guido Trotter
    """
5216 6906a9d8 Guido Trotter
    target_node = self.target_node
5217 6906a9d8 Guido Trotter
    try:
5218 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5219 6906a9d8 Guido Trotter
      self._GoStandalone()
5220 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5221 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5222 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5223 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5224 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5225 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5226 3e06e001 Michael Hanselmann
                         str(err))
5227 6906a9d8 Guido Trotter
5228 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5229 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5230 6906a9d8 Guido Trotter

5231 6906a9d8 Guido Trotter
    """
5232 6906a9d8 Guido Trotter
    instance = self.instance
5233 6906a9d8 Guido Trotter
    target_node = self.target_node
5234 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5235 6906a9d8 Guido Trotter
5236 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5237 6906a9d8 Guido Trotter
                                                    instance,
5238 6906a9d8 Guido Trotter
                                                    migration_info,
5239 6906a9d8 Guido Trotter
                                                    False)
5240 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5241 6906a9d8 Guido Trotter
    if abort_msg:
5242 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5243 099c52ad Iustin Pop
                    target_node, abort_msg)
5244 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5245 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5246 6906a9d8 Guido Trotter
5247 53c776b5 Iustin Pop
  def _ExecMigration(self):
5248 53c776b5 Iustin Pop
    """Migrate an instance.
5249 53c776b5 Iustin Pop

5250 53c776b5 Iustin Pop
    The migrate is done by:
5251 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5252 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5253 53c776b5 Iustin Pop
      - migrate the instance
5254 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5255 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5256 53c776b5 Iustin Pop
      - change disks into single-master mode
5257 53c776b5 Iustin Pop

5258 53c776b5 Iustin Pop
    """
5259 53c776b5 Iustin Pop
    instance = self.instance
5260 53c776b5 Iustin Pop
    target_node = self.target_node
5261 53c776b5 Iustin Pop
    source_node = self.source_node
5262 53c776b5 Iustin Pop
5263 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5264 53c776b5 Iustin Pop
    for dev in instance.disks:
5265 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5266 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5267 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5268 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5269 53c776b5 Iustin Pop
5270 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5271 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5272 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5273 6906a9d8 Guido Trotter
    if msg:
5274 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5275 0959c824 Iustin Pop
                 (source_node, msg))
5276 6906a9d8 Guido Trotter
      logging.error(log_err)
5277 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5278 6906a9d8 Guido Trotter
5279 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5280 6906a9d8 Guido Trotter
5281 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5282 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5283 53c776b5 Iustin Pop
    self._GoStandalone()
5284 53c776b5 Iustin Pop
    self._GoReconnect(True)
5285 53c776b5 Iustin Pop
    self._WaitUntilSync()
5286 53c776b5 Iustin Pop
5287 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5288 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5289 6906a9d8 Guido Trotter
                                           instance,
5290 6906a9d8 Guido Trotter
                                           migration_info,
5291 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5292 6906a9d8 Guido Trotter
5293 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5294 6906a9d8 Guido Trotter
    if msg:
5295 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5296 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5297 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5298 6906a9d8 Guido Trotter
      self._AbortMigration()
5299 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5300 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5301 6906a9d8 Guido Trotter
                               (instance.name, msg))
5302 6906a9d8 Guido Trotter
5303 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5304 53c776b5 Iustin Pop
    time.sleep(10)
5305 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5306 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5307 3e06e001 Michael Hanselmann
                                            self.live)
5308 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5309 53c776b5 Iustin Pop
    if msg:
5310 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5311 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5312 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5313 6906a9d8 Guido Trotter
      self._AbortMigration()
5314 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5315 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5316 53c776b5 Iustin Pop
                               (instance.name, msg))
5317 53c776b5 Iustin Pop
    time.sleep(10)
5318 53c776b5 Iustin Pop
5319 53c776b5 Iustin Pop
    instance.primary_node = target_node
5320 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5321 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5322 53c776b5 Iustin Pop
5323 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5324 6906a9d8 Guido Trotter
                                              instance,
5325 6906a9d8 Guido Trotter
                                              migration_info,
5326 6906a9d8 Guido Trotter
                                              True)
5327 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5328 6906a9d8 Guido Trotter
    if msg:
5329 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5330 099c52ad Iustin Pop
                    " %s", msg)
5331 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5332 6906a9d8 Guido Trotter
                               msg)
5333 6906a9d8 Guido Trotter
5334 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5335 53c776b5 Iustin Pop
    self._WaitUntilSync()
5336 53c776b5 Iustin Pop
    self._GoStandalone()
5337 53c776b5 Iustin Pop
    self._GoReconnect(False)
5338 53c776b5 Iustin Pop
    self._WaitUntilSync()
5339 53c776b5 Iustin Pop
5340 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5341 53c776b5 Iustin Pop
5342 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5343 53c776b5 Iustin Pop
    """Perform the migration.
5344 53c776b5 Iustin Pop

5345 53c776b5 Iustin Pop
    """
5346 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5347 80cb875c Michael Hanselmann
5348 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5349 53c776b5 Iustin Pop
5350 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5351 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5352 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5353 53c776b5 Iustin Pop
    self.nodes_ip = {
5354 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5355 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5356 53c776b5 Iustin Pop
      }
5357 3e06e001 Michael Hanselmann
5358 3e06e001 Michael Hanselmann
    if self.cleanup:
5359 53c776b5 Iustin Pop
      return self._ExecCleanup()
5360 53c776b5 Iustin Pop
    else:
5361 53c776b5 Iustin Pop
      return self._ExecMigration()
5362 53c776b5 Iustin Pop
5363 53c776b5 Iustin Pop
5364 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5365 428958aa Iustin Pop
                    info, force_open):
5366 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5367 a8083063 Iustin Pop

5368 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5369 a8083063 Iustin Pop
  all its children.
5370 a8083063 Iustin Pop

5371 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5372 a8083063 Iustin Pop

5373 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5374 428958aa Iustin Pop
  @param node: the node on which to create the device
5375 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5376 428958aa Iustin Pop
  @param instance: the instance which owns the device
5377 428958aa Iustin Pop
  @type device: L{objects.Disk}
5378 428958aa Iustin Pop
  @param device: the device to create
5379 428958aa Iustin Pop
  @type force_create: boolean
5380 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5381 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5382 428958aa Iustin Pop
      CreateOnSecondary() attribute
5383 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5384 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5385 428958aa Iustin Pop
  @type force_open: boolean
5386 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5387 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5388 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5389 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5390 428958aa Iustin Pop

5391 a8083063 Iustin Pop
  """
5392 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5393 428958aa Iustin Pop
    force_create = True
5394 796cab27 Iustin Pop
5395 a8083063 Iustin Pop
  if device.children:
5396 a8083063 Iustin Pop
    for child in device.children:
5397 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5398 428958aa Iustin Pop
                      info, force_open)
5399 a8083063 Iustin Pop
5400 428958aa Iustin Pop
  if not force_create:
5401 796cab27 Iustin Pop
    return
5402 796cab27 Iustin Pop
5403 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5404 de12473a Iustin Pop
5405 de12473a Iustin Pop
5406 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5407 de12473a Iustin Pop
  """Create a single block device on a given node.
5408 de12473a Iustin Pop

5409 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5410 de12473a Iustin Pop
  created in advance.
5411 de12473a Iustin Pop

5412 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5413 de12473a Iustin Pop
  @param node: the node on which to create the device
5414 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5415 de12473a Iustin Pop
  @param instance: the instance which owns the device
5416 de12473a Iustin Pop
  @type device: L{objects.Disk}
5417 de12473a Iustin Pop
  @param device: the device to create
5418 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5419 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5420 de12473a Iustin Pop
  @type force_open: boolean
5421 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5422 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5423 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5424 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5425 de12473a Iustin Pop

5426 de12473a Iustin Pop
  """
5427 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5428 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5429 428958aa Iustin Pop
                                       instance.name, force_open, info)
5430 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5431 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5432 a8083063 Iustin Pop
  if device.physical_id is None:
5433 0959c824 Iustin Pop
    device.physical_id = result.payload
5434 a8083063 Iustin Pop
5435 a8083063 Iustin Pop
5436 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5437 923b1523 Iustin Pop
  """Generate a suitable LV name.
5438 923b1523 Iustin Pop

5439 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5440 923b1523 Iustin Pop

5441 923b1523 Iustin Pop
  """
5442 923b1523 Iustin Pop
  results = []
5443 923b1523 Iustin Pop
  for val in exts:
5444 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5445 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5446 923b1523 Iustin Pop
  return results
5447 923b1523 Iustin Pop
5448 923b1523 Iustin Pop
5449 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5450 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5451 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5452 a1f445d3 Iustin Pop

5453 a1f445d3 Iustin Pop
  """
5454 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5455 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5456 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5457 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5458 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5459 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5460 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5461 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5462 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5463 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5464 f9518d38 Iustin Pop
                                      shared_secret),
5465 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5466 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5467 a1f445d3 Iustin Pop
  return drbd_dev
5468 a1f445d3 Iustin Pop
5469 7c0d6283 Michael Hanselmann
5470 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5471 a8083063 Iustin Pop
                          instance_name, primary_node,
5472 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5473 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5474 e2a65344 Iustin Pop
                          base_index):
5475 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5476 a8083063 Iustin Pop

5477 a8083063 Iustin Pop
  """
5478 a8083063 Iustin Pop
  #TODO: compute space requirements
5479 a8083063 Iustin Pop
5480 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5481 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5482 08db7c5c Iustin Pop
  disks = []
5483 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5484 08db7c5c Iustin Pop
    pass
5485 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5486 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5487 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5488 923b1523 Iustin Pop
5489 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5490 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5491 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5492 e2a65344 Iustin Pop
      disk_index = idx + base_index
5493 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5494 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5495 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5496 6ec66eae Iustin Pop
                              mode=disk["mode"])
5497 08db7c5c Iustin Pop
      disks.append(disk_dev)
5498 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5499 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5500 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5501 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5502 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5503 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5504 08db7c5c Iustin Pop
5505 e6c1ff2f Iustin Pop
    names = []
5506 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5507 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5508 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5509 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5510 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5511 112050d9 Iustin Pop
      disk_index = idx + base_index
5512 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5513 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5514 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5515 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5516 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5517 08db7c5c Iustin Pop
      disks.append(disk_dev)
5518 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5519 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5520 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5521 0f1a06e3 Manuel Franceschini
5522 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5523 112050d9 Iustin Pop
      disk_index = idx + base_index
5524 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5525 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5526 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5527 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5528 43e99cff Guido Trotter
                                                         disk_index)),
5529 6ec66eae Iustin Pop
                              mode=disk["mode"])
5530 08db7c5c Iustin Pop
      disks.append(disk_dev)
5531 a8083063 Iustin Pop
  else:
5532 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5533 a8083063 Iustin Pop
  return disks
5534 a8083063 Iustin Pop
5535 a8083063 Iustin Pop
5536 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5537 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5538 3ecf6786 Iustin Pop

5539 3ecf6786 Iustin Pop
  """
5540 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5541 a0c3fea1 Michael Hanselmann
5542 a0c3fea1 Michael Hanselmann
5543 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5544 a8083063 Iustin Pop
  """Create all disks for an instance.
5545 a8083063 Iustin Pop

5546 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5547 a8083063 Iustin Pop

5548 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5549 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5550 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5551 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5552 bd315bfa Iustin Pop
  @type to_skip: list
5553 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5554 621b7678 Iustin Pop
  @type target_node: string
5555 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5556 e4376078 Iustin Pop
  @rtype: boolean
5557 e4376078 Iustin Pop
  @return: the success of the creation
5558 a8083063 Iustin Pop

5559 a8083063 Iustin Pop
  """
5560 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5561 621b7678 Iustin Pop
  if target_node is None:
5562 621b7678 Iustin Pop
    pnode = instance.primary_node
5563 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5564 621b7678 Iustin Pop
  else:
5565 621b7678 Iustin Pop
    pnode = target_node
5566 621b7678 Iustin Pop
    all_nodes = [pnode]
5567 a0c3fea1 Michael Hanselmann
5568 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5569 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5570 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5571 0f1a06e3 Manuel Franceschini
5572 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5573 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5574 0f1a06e3 Manuel Franceschini
5575 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5576 24991749 Iustin Pop
  # LUSetInstanceParams
5577 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5578 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5579 bd315bfa Iustin Pop
      continue
5580 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5581 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5582 a8083063 Iustin Pop
    #HARDCODE
5583 621b7678 Iustin Pop
    for node in all_nodes:
5584 428958aa Iustin Pop
      f_create = node == pnode
5585 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5586 a8083063 Iustin Pop
5587 a8083063 Iustin Pop
5588 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5589 a8083063 Iustin Pop
  """Remove all disks for an instance.
5590 a8083063 Iustin Pop

5591 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5592 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5593 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5594 a8083063 Iustin Pop
  with `_CreateDisks()`).
5595 a8083063 Iustin Pop

5596 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5597 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5598 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5599 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5600 621b7678 Iustin Pop
  @type target_node: string
5601 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5602 e4376078 Iustin Pop
  @rtype: boolean
5603 e4376078 Iustin Pop
  @return: the success of the removal
5604 a8083063 Iustin Pop

5605 a8083063 Iustin Pop
  """
5606 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5607 a8083063 Iustin Pop
5608 e1bc0878 Iustin Pop
  all_result = True
5609 a8083063 Iustin Pop
  for device in instance.disks:
5610 621b7678 Iustin Pop
    if target_node:
5611 621b7678 Iustin Pop
      edata = [(target_node, device)]
5612 621b7678 Iustin Pop
    else:
5613 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5614 621b7678 Iustin Pop
    for node, disk in edata:
5615 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5616 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5617 e1bc0878 Iustin Pop
      if msg:
5618 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5619 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5620 e1bc0878 Iustin Pop
        all_result = False
5621 0f1a06e3 Manuel Franceschini
5622 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5623 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5624 dfc2a24c Guido Trotter
    if target_node:
5625 dfc2a24c Guido Trotter
      tgt = target_node
5626 621b7678 Iustin Pop
    else:
5627 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5628 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5629 621b7678 Iustin Pop
    if result.fail_msg:
5630 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5631 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5632 e1bc0878 Iustin Pop
      all_result = False
5633 0f1a06e3 Manuel Franceschini
5634 e1bc0878 Iustin Pop
  return all_result
5635 a8083063 Iustin Pop
5636 a8083063 Iustin Pop
5637 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5638 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5639 e2fe6369 Iustin Pop

5640 e2fe6369 Iustin Pop
  """
5641 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5642 e2fe6369 Iustin Pop
  req_size_dict = {
5643 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5644 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5645 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5646 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5647 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5648 e2fe6369 Iustin Pop
  }
5649 e2fe6369 Iustin Pop
5650 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5651 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5652 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5653 e2fe6369 Iustin Pop
5654 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5655 e2fe6369 Iustin Pop
5656 e2fe6369 Iustin Pop
5657 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5658 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5659 74409b12 Iustin Pop

5660 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5661 74409b12 Iustin Pop
  used in both instance create and instance modify.
5662 74409b12 Iustin Pop

5663 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5664 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5665 74409b12 Iustin Pop
  @type nodenames: list
5666 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5667 74409b12 Iustin Pop
  @type hvname: string
5668 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5669 74409b12 Iustin Pop
  @type hvparams: dict
5670 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5671 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5672 74409b12 Iustin Pop

5673 74409b12 Iustin Pop
  """
5674 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5675 74409b12 Iustin Pop
                                                  hvname,
5676 74409b12 Iustin Pop
                                                  hvparams)
5677 74409b12 Iustin Pop
  for node in nodenames:
5678 781de953 Iustin Pop
    info = hvinfo[node]
5679 68c6f21c Iustin Pop
    if info.offline:
5680 68c6f21c Iustin Pop
      continue
5681 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5682 74409b12 Iustin Pop
5683 74409b12 Iustin Pop
5684 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5685 a8083063 Iustin Pop
  """Create an instance.
5686 a8083063 Iustin Pop

5687 a8083063 Iustin Pop
  """
5688 a8083063 Iustin Pop
  HPATH = "instance-add"
5689 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5690 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5691 08db7c5c Iustin Pop
              "mode", "start",
5692 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5693 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5694 7baf741d Guido Trotter
  REQ_BGL = False
5695 7baf741d Guido Trotter
5696 5f23e043 Iustin Pop
  def CheckArguments(self):
5697 5f23e043 Iustin Pop
    """Check arguments.
5698 5f23e043 Iustin Pop

5699 5f23e043 Iustin Pop
    """
5700 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
5701 5f23e043 Iustin Pop
    # for tools
5702 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
5703 5f23e043 Iustin Pop
      self.op.name_check = True
5704 44caf5a8 Iustin Pop
    # validate/normalize the instance name
5705 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
5706 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
5707 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
5708 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
5709 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
5710 cb7c0198 Iustin Pop
    if (self.op.disk_template == constants.DT_FILE and
5711 cb7c0198 Iustin Pop
        not constants.ENABLE_FILE_STORAGE):
5712 cb7c0198 Iustin Pop
      raise errors.OpPrereqError("File storage disabled at configure time",
5713 cb7c0198 Iustin Pop
                                 errors.ECODE_INVAL)
5714 5f23e043 Iustin Pop
5715 7baf741d Guido Trotter
  def ExpandNames(self):
5716 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
5717 7baf741d Guido Trotter

5718 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
5719 7baf741d Guido Trotter

5720 7baf741d Guido Trotter
    """
5721 7baf741d Guido Trotter
    self.needed_locks = {}
5722 7baf741d Guido Trotter
5723 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
5724 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5725 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
5726 7baf741d Guido Trotter
        setattr(self.op, attr, None)
5727 7baf741d Guido Trotter
5728 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
5729 4b2f38dd Iustin Pop
5730 7baf741d Guido Trotter
    # verify creation mode
5731 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
5732 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
5733 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5734 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
5735 4b2f38dd Iustin Pop
5736 7baf741d Guido Trotter
    # disk template and mirror node verification
5737 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5738 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name",
5739 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5740 7baf741d Guido Trotter
5741 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
5742 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
5743 4b2f38dd Iustin Pop
5744 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5745 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
5746 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
5747 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5748 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
5749 5c983ee5 Iustin Pop
                                  ",".join(enabled_hvs)),
5750 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5751 4b2f38dd Iustin Pop
5752 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
5753 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5754 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5755 8705eb96 Iustin Pop
                                  self.op.hvparams)
5756 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5757 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
5758 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
5759 7736a5f2 Iustin Pop
    # check that we don't specify global parameters on an instance
5760 7736a5f2 Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
5761 6785674e Iustin Pop
5762 338e51e8 Iustin Pop
    # fill and remember the beparams dict
5763 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5764 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5765 338e51e8 Iustin Pop
                                    self.op.beparams)
5766 338e51e8 Iustin Pop
5767 7baf741d Guido Trotter
    #### instance parameters check
5768 7baf741d Guido Trotter
5769 7baf741d Guido Trotter
    # instance name verification
5770 5f23e043 Iustin Pop
    if self.op.name_check:
5771 5f23e043 Iustin Pop
      hostname1 = utils.GetHostInfo(self.op.instance_name)
5772 5f23e043 Iustin Pop
      self.op.instance_name = instance_name = hostname1.name
5773 5f23e043 Iustin Pop
      # used in CheckPrereq for ip ping check
5774 5f23e043 Iustin Pop
      self.check_ip = hostname1.ip
5775 5f23e043 Iustin Pop
    else:
5776 5f23e043 Iustin Pop
      instance_name = self.op.instance_name
5777 5f23e043 Iustin Pop
      self.check_ip = None
5778 7baf741d Guido Trotter
5779 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
5780 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
5781 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
5782 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5783 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
5784 7baf741d Guido Trotter
5785 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5786 7baf741d Guido Trotter
5787 08db7c5c Iustin Pop
    # NIC buildup
5788 08db7c5c Iustin Pop
    self.nics = []
5789 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
5790 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
5791 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
5792 9dce4771 Guido Trotter
      if nic_mode is None:
5793 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5794 9dce4771 Guido Trotter
5795 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
5796 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5797 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
5798 9dce4771 Guido Trotter
      else:
5799 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
5800 9dce4771 Guido Trotter
5801 08db7c5c Iustin Pop
      # ip validity checks
5802 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
5803 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
5804 08db7c5c Iustin Pop
        nic_ip = None
5805 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
5806 5f23e043 Iustin Pop
        if not self.op.name_check:
5807 5f23e043 Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
5808 5f23e043 Iustin Pop
                                     " have been skipped. Aborting.",
5809 5f23e043 Iustin Pop
                                     errors.ECODE_INVAL)
5810 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
5811 08db7c5c Iustin Pop
      else:
5812 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
5813 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5814 5c983ee5 Iustin Pop
                                     " like a valid IP" % ip,
5815 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
5816 08db7c5c Iustin Pop
        nic_ip = ip
5817 08db7c5c Iustin Pop
5818 b8716596 Michael Hanselmann
      # TODO: check the ip address for uniqueness
5819 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5820 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
5821 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5822 9dce4771 Guido Trotter
5823 08db7c5c Iustin Pop
      # MAC address verification
5824 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
5825 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5826 82187135 René Nussbaumer
        mac = utils.NormalizeAndValidateMac(mac)
5827 82187135 René Nussbaumer
5828 82187135 René Nussbaumer
        try:
5829 82187135 René Nussbaumer
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
5830 82187135 René Nussbaumer
        except errors.ReservationError:
5831 82187135 René Nussbaumer
          raise errors.OpPrereqError("MAC address %s already in use"
5832 82187135 René Nussbaumer
                                     " in cluster" % mac,
5833 82187135 René Nussbaumer
                                     errors.ECODE_NOTUNIQUE)
5834 87e43988 Iustin Pop
5835 08db7c5c Iustin Pop
      # bridge verification
5836 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
5837 9dce4771 Guido Trotter
      link = nic.get("link", None)
5838 9dce4771 Guido Trotter
      if bridge and link:
5839 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5840 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
5841 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5842 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
5843 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5844 9dce4771 Guido Trotter
      elif bridge:
5845 9dce4771 Guido Trotter
        link = bridge
5846 9dce4771 Guido Trotter
5847 9dce4771 Guido Trotter
      nicparams = {}
5848 9dce4771 Guido Trotter
      if nic_mode_req:
5849 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
5850 9dce4771 Guido Trotter
      if link:
5851 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
5852 9dce4771 Guido Trotter
5853 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5854 9dce4771 Guido Trotter
                                      nicparams)
5855 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
5856 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5857 08db7c5c Iustin Pop
5858 08db7c5c Iustin Pop
    # disk checks/pre-build
5859 08db7c5c Iustin Pop
    self.disks = []
5860 08db7c5c Iustin Pop
    for disk in self.op.disks:
5861 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
5862 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
5863 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5864 5c983ee5 Iustin Pop
                                   mode, errors.ECODE_INVAL)
5865 08db7c5c Iustin Pop
      size = disk.get("size", None)
5866 08db7c5c Iustin Pop
      if size is None:
5867 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
5868 08db7c5c Iustin Pop
      try:
5869 08db7c5c Iustin Pop
        size = int(size)
5870 691744c4 Iustin Pop
      except (TypeError, ValueError):
5871 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
5872 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5873 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
5874 08db7c5c Iustin Pop
5875 7baf741d Guido Trotter
    # file storage checks
5876 7baf741d Guido Trotter
    if (self.op.file_driver and
5877 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
5878 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5879 5c983ee5 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
5880 7baf741d Guido Trotter
5881 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5882 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
5883 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5884 7baf741d Guido Trotter
5885 7baf741d Guido Trotter
    ### Node/iallocator related checks
5886 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5887 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5888 5c983ee5 Iustin Pop
                                 " node must be given",
5889 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
5890 7baf741d Guido Trotter
5891 7baf741d Guido Trotter
    if self.op.iallocator:
5892 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5893 7baf741d Guido Trotter
    else:
5894 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
5895 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
5896 7baf741d Guido Trotter
      if self.op.snode is not None:
5897 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
5898 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
5899 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5900 7baf741d Guido Trotter
5901 7baf741d Guido Trotter
    # in case of import lock the source node too
5902 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5903 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
5904 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
5905 7baf741d Guido Trotter
5906 b9322a9f Guido Trotter
      if src_path is None:
5907 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
5908 b9322a9f Guido Trotter
5909 b9322a9f Guido Trotter
      if src_node is None:
5910 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5911 b9322a9f Guido Trotter
        self.op.src_node = None
5912 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
5913 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
5914 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
5915 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
5916 b9322a9f Guido Trotter
      else:
5917 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
5918 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5919 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5920 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
5921 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
5922 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
5923 7baf741d Guido Trotter
5924 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
5925 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
5926 f2c05717 Guido Trotter
      # works again!
5927 f2c05717 Guido Trotter
      self.op.force_variant = True
5928 f2c05717 Guido Trotter
5929 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
5930 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
5931 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
5932 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
5933 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
5934 a8083063 Iustin Pop
5935 538475ca Iustin Pop
  def _RunAllocator(self):
5936 538475ca Iustin Pop
    """Run the allocator based on input opcode.
5937 538475ca Iustin Pop

5938 538475ca Iustin Pop
    """
5939 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
5940 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
5941 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5942 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
5943 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
5944 d1c2dd75 Iustin Pop
                     tags=[],
5945 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
5946 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
5947 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
5948 08db7c5c Iustin Pop
                     disks=self.disks,
5949 d1c2dd75 Iustin Pop
                     nics=nics,
5950 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
5951 29859cb7 Iustin Pop
                     )
5952 d1c2dd75 Iustin Pop
5953 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
5954 d1c2dd75 Iustin Pop
5955 d1c2dd75 Iustin Pop
    if not ial.success:
5956 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5957 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
5958 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
5959 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
5960 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
5961 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5962 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
5963 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
5964 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
5965 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
5966 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5967 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
5968 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
5969 27579978 Iustin Pop
    if ial.required_nodes == 2:
5970 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
5971 538475ca Iustin Pop
5972 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5973 a8083063 Iustin Pop
    """Build hooks env.
5974 a8083063 Iustin Pop

5975 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5976 a8083063 Iustin Pop

5977 a8083063 Iustin Pop
    """
5978 a8083063 Iustin Pop
    env = {
5979 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
5980 a8083063 Iustin Pop
      }
5981 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5982 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
5983 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
5984 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
5985 396e1b78 Michael Hanselmann
5986 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
5987 2c2690c9 Iustin Pop
      name=self.op.instance_name,
5988 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
5989 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
5990 4978db17 Iustin Pop
      status=self.op.start,
5991 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
5992 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
5993 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
5994 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
5995 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
5996 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
5997 67fc3042 Iustin Pop
      bep=self.be_full,
5998 67fc3042 Iustin Pop
      hvp=self.hv_full,
5999 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6000 396e1b78 Michael Hanselmann
    ))
6001 a8083063 Iustin Pop
6002 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6003 a8083063 Iustin Pop
          self.secondaries)
6004 a8083063 Iustin Pop
    return env, nl, nl
6005 a8083063 Iustin Pop
6006 a8083063 Iustin Pop
6007 a8083063 Iustin Pop
  def CheckPrereq(self):
6008 a8083063 Iustin Pop
    """Check prerequisites.
6009 a8083063 Iustin Pop

6010 a8083063 Iustin Pop
    """
6011 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6012 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6013 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6014 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6015 eedc99de Manuel Franceschini
6016 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6017 7baf741d Guido Trotter
      src_node = self.op.src_node
6018 7baf741d Guido Trotter
      src_path = self.op.src_path
6019 a8083063 Iustin Pop
6020 c0cbdc67 Guido Trotter
      if src_node is None:
6021 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6022 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
6023 c0cbdc67 Guido Trotter
        found = False
6024 c0cbdc67 Guido Trotter
        for node in exp_list:
6025 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
6026 1b7bfbb7 Iustin Pop
            continue
6027 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
6028 c0cbdc67 Guido Trotter
            found = True
6029 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
6030 c4feafe8 Iustin Pop
            self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6031 c4feafe8 Iustin Pop
                                                         src_path)
6032 c0cbdc67 Guido Trotter
            break
6033 c0cbdc67 Guido Trotter
        if not found:
6034 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
6035 5c983ee5 Iustin Pop
                                      src_path, errors.ECODE_INVAL)
6036 c0cbdc67 Guido Trotter
6037 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
6038 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
6039 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
6040 a8083063 Iustin Pop
6041 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6042 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
6043 5c983ee5 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config",
6044 5c983ee5 Iustin Pop
                                     errors.ECODE_ENVIRON)
6045 a8083063 Iustin Pop
6046 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
6047 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
6048 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6049 5c983ee5 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION),
6050 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
6051 a8083063 Iustin Pop
6052 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6053 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6054 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6055 09acf207 Guido Trotter
      if instance_disks < export_disks:
6056 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6057 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6058 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6059 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6060 a8083063 Iustin Pop
6061 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
6062 09acf207 Guido Trotter
      disk_images = []
6063 09acf207 Guido Trotter
      for idx in range(export_disks):
6064 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6065 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6066 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6067 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6068 c4feafe8 Iustin Pop
          image = utils.PathJoin(src_path, export_name)
6069 09acf207 Guido Trotter
          disk_images.append(image)
6070 09acf207 Guido Trotter
        else:
6071 09acf207 Guido Trotter
          disk_images.append(False)
6072 09acf207 Guido Trotter
6073 09acf207 Guido Trotter
      self.src_images = disk_images
6074 901a65c1 Iustin Pop
6075 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6076 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
6077 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
6078 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6079 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6080 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6081 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6082 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6083 bc89efc3 Guido Trotter
6084 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6085 901a65c1 Iustin Pop
6086 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6087 901a65c1 Iustin Pop
    if self.op.ip_check:
6088 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6089 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6090 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6091 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6092 901a65c1 Iustin Pop
6093 295728df Guido Trotter
    #### mac address generation
6094 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6095 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6096 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6097 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6098 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6099 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6100 295728df Guido Trotter
    # creation job will fail.
6101 295728df Guido Trotter
    for nic in self.nics:
6102 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6103 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6104 295728df Guido Trotter
6105 538475ca Iustin Pop
    #### allocator run
6106 538475ca Iustin Pop
6107 538475ca Iustin Pop
    if self.op.iallocator is not None:
6108 538475ca Iustin Pop
      self._RunAllocator()
6109 0f1a06e3 Manuel Franceschini
6110 901a65c1 Iustin Pop
    #### node related checks
6111 901a65c1 Iustin Pop
6112 901a65c1 Iustin Pop
    # check primary node
6113 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6114 7baf741d Guido Trotter
    assert self.pnode is not None, \
6115 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6116 7527a8a4 Iustin Pop
    if pnode.offline:
6117 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6118 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6119 733a2b6a Iustin Pop
    if pnode.drained:
6120 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6121 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6122 7527a8a4 Iustin Pop
6123 901a65c1 Iustin Pop
    self.secondaries = []
6124 901a65c1 Iustin Pop
6125 901a65c1 Iustin Pop
    # mirror node verification
6126 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6127 7baf741d Guido Trotter
      if self.op.snode is None:
6128 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6129 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6130 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6131 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6132 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6133 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6134 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6135 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6136 a8083063 Iustin Pop
6137 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6138 6785674e Iustin Pop
6139 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6140 08db7c5c Iustin Pop
                                self.disks)
6141 ed1ebc60 Guido Trotter
6142 8d75db10 Iustin Pop
    # Check lv size requirements
6143 8d75db10 Iustin Pop
    if req_size is not None:
6144 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
6145 72737a7f Iustin Pop
                                         self.op.hypervisor)
6146 8d75db10 Iustin Pop
      for node in nodenames:
6147 781de953 Iustin Pop
        info = nodeinfo[node]
6148 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
6149 070e998b Iustin Pop
        info = info.payload
6150 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
6151 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
6152 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
6153 5c983ee5 Iustin Pop
                                     " node %s" % node, errors.ECODE_ENVIRON)
6154 070e998b Iustin Pop
        if req_size > vg_free:
6155 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
6156 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
6157 5c983ee5 Iustin Pop
                                     (node, vg_free, req_size),
6158 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
6159 ed1ebc60 Guido Trotter
6160 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6161 6785674e Iustin Pop
6162 a8083063 Iustin Pop
    # os verification
6163 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
6164 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
6165 045dd6d9 Iustin Pop
                 (self.op.os_type, pnode.name),
6166 045dd6d9 Iustin Pop
                 prereq=True, ecode=errors.ECODE_INVAL)
6167 f2c05717 Guido Trotter
    if not self.op.force_variant:
6168 f2c05717 Guido Trotter
      _CheckOSVariant(result.payload, self.op.os_type)
6169 a8083063 Iustin Pop
6170 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6171 a8083063 Iustin Pop
6172 49ce1563 Iustin Pop
    # memory check on primary node
6173 49ce1563 Iustin Pop
    if self.op.start:
6174 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6175 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6176 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6177 338e51e8 Iustin Pop
                           self.op.hypervisor)
6178 49ce1563 Iustin Pop
6179 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6180 08896026 Iustin Pop
6181 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6182 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6183 a8083063 Iustin Pop

6184 a8083063 Iustin Pop
    """
6185 a8083063 Iustin Pop
    instance = self.op.instance_name
6186 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6187 a8083063 Iustin Pop
6188 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6189 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6190 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6191 2a6469d5 Alexander Schreiber
    else:
6192 2a6469d5 Alexander Schreiber
      network_port = None
6193 58acb49d Alexander Schreiber
6194 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
6195 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
6196 31a853d2 Iustin Pop
6197 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
6198 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
6199 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
6200 2c313123 Manuel Franceschini
    else:
6201 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
6202 2c313123 Manuel Franceschini
6203 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
6204 c4feafe8 Iustin Pop
    file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6205 c4feafe8 Iustin Pop
                                      string_file_storage_dir, instance)
6206 0f1a06e3 Manuel Franceschini
6207 0f1a06e3 Manuel Franceschini
6208 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6209 a8083063 Iustin Pop
                                  self.op.disk_template,
6210 a8083063 Iustin Pop
                                  instance, pnode_name,
6211 08db7c5c Iustin Pop
                                  self.secondaries,
6212 08db7c5c Iustin Pop
                                  self.disks,
6213 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6214 e2a65344 Iustin Pop
                                  self.op.file_driver,
6215 e2a65344 Iustin Pop
                                  0)
6216 a8083063 Iustin Pop
6217 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6218 a8083063 Iustin Pop
                            primary_node=pnode_name,
6219 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6220 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6221 4978db17 Iustin Pop
                            admin_up=False,
6222 58acb49d Alexander Schreiber
                            network_port=network_port,
6223 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6224 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6225 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6226 a8083063 Iustin Pop
                            )
6227 a8083063 Iustin Pop
6228 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
6229 796cab27 Iustin Pop
    try:
6230 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
6231 796cab27 Iustin Pop
    except errors.OpExecError:
6232 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
6233 796cab27 Iustin Pop
      try:
6234 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
6235 796cab27 Iustin Pop
      finally:
6236 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
6237 796cab27 Iustin Pop
        raise
6238 a8083063 Iustin Pop
6239 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6240 a8083063 Iustin Pop
6241 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6242 0debfb35 Guido Trotter
6243 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6244 7baf741d Guido Trotter
    # added the instance to the config
6245 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6246 e36e96b4 Guido Trotter
    # Unlock all the nodes
6247 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6248 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6249 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6250 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6251 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6252 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6253 9c8971d7 Guido Trotter
    else:
6254 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6255 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6256 a8083063 Iustin Pop
6257 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6258 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6259 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6260 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6261 a8083063 Iustin Pop
      time.sleep(15)
6262 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6263 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6264 a8083063 Iustin Pop
    else:
6265 a8083063 Iustin Pop
      disk_abort = False
6266 a8083063 Iustin Pop
6267 a8083063 Iustin Pop
    if disk_abort:
6268 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6269 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6270 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6271 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6272 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6273 3ecf6786 Iustin Pop
                               " this instance")
6274 a8083063 Iustin Pop
6275 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
6276 a8083063 Iustin Pop
                (instance, pnode_name))
6277 a8083063 Iustin Pop
6278 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
6279 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6280 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
6281 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6282 dd713605 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6283 dd713605 Iustin Pop
                                               self.op.debug_level)
6284 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
6285 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
6286 a8083063 Iustin Pop
6287 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6288 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6289 a8083063 Iustin Pop
        src_node = self.op.src_node
6290 09acf207 Guido Trotter
        src_images = self.src_images
6291 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6292 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6293 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6294 09acf207 Guido Trotter
                                                         src_node, src_images,
6295 dd713605 Iustin Pop
                                                         cluster_name,
6296 dd713605 Iustin Pop
                                                         self.op.debug_level)
6297 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6298 944bf548 Iustin Pop
        if msg:
6299 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6300 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6301 a8083063 Iustin Pop
      else:
6302 a8083063 Iustin Pop
        # also checked in the prereq part
6303 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6304 3ecf6786 Iustin Pop
                                     % self.op.mode)
6305 a8083063 Iustin Pop
6306 a8083063 Iustin Pop
    if self.op.start:
6307 4978db17 Iustin Pop
      iobj.admin_up = True
6308 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6309 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6310 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6311 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6312 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6313 a8083063 Iustin Pop
6314 08896026 Iustin Pop
    return list(iobj.all_nodes)
6315 08896026 Iustin Pop
6316 a8083063 Iustin Pop
6317 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6318 a8083063 Iustin Pop
  """Connect to an instance's console.
6319 a8083063 Iustin Pop

6320 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6321 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6322 a8083063 Iustin Pop
  console.
6323 a8083063 Iustin Pop

6324 a8083063 Iustin Pop
  """
6325 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6326 8659b73e Guido Trotter
  REQ_BGL = False
6327 8659b73e Guido Trotter
6328 8659b73e Guido Trotter
  def ExpandNames(self):
6329 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6330 a8083063 Iustin Pop
6331 a8083063 Iustin Pop
  def CheckPrereq(self):
6332 a8083063 Iustin Pop
    """Check prerequisites.
6333 a8083063 Iustin Pop

6334 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6335 a8083063 Iustin Pop

6336 a8083063 Iustin Pop
    """
6337 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6338 8659b73e Guido Trotter
    assert self.instance is not None, \
6339 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6340 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6341 a8083063 Iustin Pop
6342 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6343 a8083063 Iustin Pop
    """Connect to the console of an instance
6344 a8083063 Iustin Pop

6345 a8083063 Iustin Pop
    """
6346 a8083063 Iustin Pop
    instance = self.instance
6347 a8083063 Iustin Pop
    node = instance.primary_node
6348 a8083063 Iustin Pop
6349 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6350 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6351 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6352 a8083063 Iustin Pop
6353 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6354 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6355 a8083063 Iustin Pop
6356 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6357 a8083063 Iustin Pop
6358 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6359 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6360 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6361 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6362 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6363 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6364 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6365 b047857b Michael Hanselmann
6366 82122173 Iustin Pop
    # build ssh cmdline
6367 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6368 a8083063 Iustin Pop
6369 a8083063 Iustin Pop
6370 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6371 a8083063 Iustin Pop
  """Replace the disks of an instance.
6372 a8083063 Iustin Pop

6373 a8083063 Iustin Pop
  """
6374 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6375 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6376 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6377 efd990e4 Guido Trotter
  REQ_BGL = False
6378 efd990e4 Guido Trotter
6379 7e9366f7 Iustin Pop
  def CheckArguments(self):
6380 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6381 efd990e4 Guido Trotter
      self.op.remote_node = None
6382 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6383 7e9366f7 Iustin Pop
      self.op.iallocator = None
6384 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6385 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6386 7e9366f7 Iustin Pop
6387 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6388 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6389 7e9366f7 Iustin Pop
6390 7e9366f7 Iustin Pop
  def ExpandNames(self):
6391 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6392 7e9366f7 Iustin Pop
6393 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6394 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6395 2bb5c911 Michael Hanselmann
6396 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6397 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6398 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6399 2bb5c911 Michael Hanselmann
6400 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6401 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6402 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6403 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6404 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6405 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6406 2bb5c911 Michael Hanselmann
6407 efd990e4 Guido Trotter
    else:
6408 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6409 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6410 efd990e4 Guido Trotter
6411 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6412 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6413 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6414 c68174b6 Michael Hanselmann
6415 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6416 2bb5c911 Michael Hanselmann
6417 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6418 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6419 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6420 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6421 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6422 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6423 a8083063 Iustin Pop
6424 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6425 a8083063 Iustin Pop
    """Build hooks env.
6426 a8083063 Iustin Pop

6427 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6428 a8083063 Iustin Pop

6429 a8083063 Iustin Pop
    """
6430 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6431 a8083063 Iustin Pop
    env = {
6432 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6433 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6434 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6435 a8083063 Iustin Pop
      }
6436 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6437 0834c866 Iustin Pop
    nl = [
6438 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6439 2bb5c911 Michael Hanselmann
      instance.primary_node,
6440 0834c866 Iustin Pop
      ]
6441 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6442 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6443 a8083063 Iustin Pop
    return env, nl, nl
6444 a8083063 Iustin Pop
6445 2bb5c911 Michael Hanselmann
6446 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6447 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6448 7ffc5a86 Michael Hanselmann

6449 7ffc5a86 Michael Hanselmann
  """
6450 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6451 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6452 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6453 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6454 7ffc5a86 Michael Hanselmann
6455 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6456 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6457 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6458 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6459 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6460 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6461 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6462 7ffc5a86 Michael Hanselmann
6463 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6464 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6465 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6466 7ffc5a86 Michael Hanselmann
6467 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6468 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6469 7ffc5a86 Michael Hanselmann
6470 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6471 7ffc5a86 Michael Hanselmann
6472 7ffc5a86 Michael Hanselmann
    # Declare node locks
6473 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6474 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6475 7ffc5a86 Michael Hanselmann
6476 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6477 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6478 7ffc5a86 Michael Hanselmann
6479 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6480 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6481 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6482 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6483 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6484 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6485 7ffc5a86 Michael Hanselmann
6486 7ffc5a86 Michael Hanselmann
    else:
6487 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6488 7ffc5a86 Michael Hanselmann
6489 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6490 7ffc5a86 Michael Hanselmann
    # node
6491 7ffc5a86 Michael Hanselmann
    names = []
6492 3a012b41 Michael Hanselmann
    tasklets = []
6493 7ffc5a86 Michael Hanselmann
6494 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6495 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6496 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6497 7ffc5a86 Michael Hanselmann
6498 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6499 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
6500 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
6501 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6502 7ffc5a86 Michael Hanselmann
6503 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6504 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6505 7ffc5a86 Michael Hanselmann
6506 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6507 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6508 7ffc5a86 Michael Hanselmann
6509 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6510 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6511 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6512 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6513 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6514 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6515 7ffc5a86 Michael Hanselmann
6516 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6517 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6518 7ffc5a86 Michael Hanselmann

6519 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6520 7ffc5a86 Michael Hanselmann

6521 7ffc5a86 Michael Hanselmann
    """
6522 7ffc5a86 Michael Hanselmann
    env = {
6523 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6524 7ffc5a86 Michael Hanselmann
      }
6525 7ffc5a86 Michael Hanselmann
6526 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6527 7ffc5a86 Michael Hanselmann
6528 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6529 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6530 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6531 7ffc5a86 Michael Hanselmann
6532 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6533 7ffc5a86 Michael Hanselmann
6534 7ffc5a86 Michael Hanselmann
6535 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6536 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6537 2bb5c911 Michael Hanselmann

6538 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6539 2bb5c911 Michael Hanselmann

6540 2bb5c911 Michael Hanselmann
  """
6541 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6542 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
6543 2bb5c911 Michael Hanselmann
    """Initializes this class.
6544 2bb5c911 Michael Hanselmann

6545 2bb5c911 Michael Hanselmann
    """
6546 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6547 464243a7 Michael Hanselmann
6548 2bb5c911 Michael Hanselmann
    # Parameters
6549 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6550 2bb5c911 Michael Hanselmann
    self.mode = mode
6551 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6552 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6553 2bb5c911 Michael Hanselmann
    self.disks = disks
6554 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
6555 7ea7bcf6 Iustin Pop
    self.early_release = early_release
6556 2bb5c911 Michael Hanselmann
6557 2bb5c911 Michael Hanselmann
    # Runtime data
6558 2bb5c911 Michael Hanselmann
    self.instance = None
6559 2bb5c911 Michael Hanselmann
    self.new_node = None
6560 2bb5c911 Michael Hanselmann
    self.target_node = None
6561 2bb5c911 Michael Hanselmann
    self.other_node = None
6562 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6563 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6564 2bb5c911 Michael Hanselmann
6565 2bb5c911 Michael Hanselmann
  @staticmethod
6566 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6567 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6568 c68174b6 Michael Hanselmann

6569 c68174b6 Michael Hanselmann
    """
6570 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6571 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6572 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6573 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6574 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6575 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
6576 02a00186 Michael Hanselmann
6577 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6578 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6579 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
6580 02a00186 Michael Hanselmann
6581 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6582 02a00186 Michael Hanselmann
      # Not replacing the secondary
6583 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6584 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6585 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
6586 2bb5c911 Michael Hanselmann
6587 2bb5c911 Michael Hanselmann
  @staticmethod
6588 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6589 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6590 2bb5c911 Michael Hanselmann

6591 2bb5c911 Michael Hanselmann
    """
6592 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6593 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6594 2bb5c911 Michael Hanselmann
                     name=instance_name,
6595 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6596 2bb5c911 Michael Hanselmann
6597 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6598 2bb5c911 Michael Hanselmann
6599 2bb5c911 Michael Hanselmann
    if not ial.success:
6600 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6601 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
6602 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6603 2bb5c911 Michael Hanselmann
6604 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6605 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6606 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6607 d984846d Iustin Pop
                                 (iallocator_name,
6608 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
6609 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6610 2bb5c911 Michael Hanselmann
6611 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
6612 2bb5c911 Michael Hanselmann
6613 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6614 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6615 2bb5c911 Michael Hanselmann
6616 2bb5c911 Michael Hanselmann
    return remote_node_name
6617 2bb5c911 Michael Hanselmann
6618 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6619 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6620 2d9005d8 Michael Hanselmann
                                    node_name, True)
6621 942be002 Michael Hanselmann
6622 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6623 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6624 2bb5c911 Michael Hanselmann

6625 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6626 2bb5c911 Michael Hanselmann

6627 2bb5c911 Michael Hanselmann
    """
6628 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6629 e9022531 Iustin Pop
    assert instance is not None, \
6630 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
6631 2bb5c911 Michael Hanselmann
6632 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
6633 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6634 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
6635 a8083063 Iustin Pop
6636 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6637 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6638 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6639 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
6640 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6641 a8083063 Iustin Pop
6642 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
6643 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
6644 94a1b377 Michael Hanselmann
6645 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
6646 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
6647 94a1b377 Michael Hanselmann

6648 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
6649 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
6650 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
6651 94a1b377 Michael Hanselmann
    account.
6652 94a1b377 Michael Hanselmann

6653 94a1b377 Michael Hanselmann
    """
6654 94a1b377 Michael Hanselmann
    instance = self.instance
6655 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
6656 a9e0c397 Iustin Pop
6657 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6658 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6659 2bb5c911 Michael Hanselmann
    else:
6660 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6661 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
6662 b6e82a65 Iustin Pop
6663 a9e0c397 Iustin Pop
    if remote_node is not None:
6664 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6665 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
6666 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
6667 a9e0c397 Iustin Pop
    else:
6668 a9e0c397 Iustin Pop
      self.remote_node_info = None
6669 2bb5c911 Michael Hanselmann
6670 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
6671 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
6672 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
6673 2bb5c911 Michael Hanselmann
6674 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
6675 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
6676 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
6677 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6678 7e9366f7 Iustin Pop
6679 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6680 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
6681 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
6682 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6683 942be002 Michael Hanselmann
6684 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
6685 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
6686 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6687 942be002 Michael Hanselmann
6688 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
6689 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6690 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
6691 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
6692 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
6693 942be002 Michael Hanselmann
6694 942be002 Michael Hanselmann
      if faulty_primary:
6695 942be002 Michael Hanselmann
        self.disks = faulty_primary
6696 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6697 942be002 Michael Hanselmann
        self.other_node = secondary_node
6698 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6699 942be002 Michael Hanselmann
      elif faulty_secondary:
6700 942be002 Michael Hanselmann
        self.disks = faulty_secondary
6701 942be002 Michael Hanselmann
        self.target_node = secondary_node
6702 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6703 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6704 942be002 Michael Hanselmann
      else:
6705 942be002 Michael Hanselmann
        self.disks = []
6706 942be002 Michael Hanselmann
        check_nodes = []
6707 942be002 Michael Hanselmann
6708 942be002 Michael Hanselmann
    else:
6709 942be002 Michael Hanselmann
      # Non-automatic modes
6710 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
6711 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6712 942be002 Michael Hanselmann
        self.other_node = secondary_node
6713 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6714 7e9366f7 Iustin Pop
6715 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
6716 942be002 Michael Hanselmann
        self.target_node = secondary_node
6717 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6718 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6719 a9e0c397 Iustin Pop
6720 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
6721 942be002 Michael Hanselmann
        self.new_node = remote_node
6722 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6723 942be002 Michael Hanselmann
        self.target_node = secondary_node
6724 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
6725 54155f52 Iustin Pop
6726 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
6727 a8083063 Iustin Pop
6728 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
6729 9af0fa6a Iustin Pop
        assert old_node_info is not None
6730 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
6731 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
6732 9af0fa6a Iustin Pop
          self.early_release = True
6733 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
6734 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
6735 9af0fa6a Iustin Pop
6736 942be002 Michael Hanselmann
      else:
6737 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6738 942be002 Michael Hanselmann
                                     self.mode)
6739 942be002 Michael Hanselmann
6740 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
6741 942be002 Michael Hanselmann
      if not self.disks:
6742 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
6743 a9e0c397 Iustin Pop
6744 2bb5c911 Michael Hanselmann
    for node in check_nodes:
6745 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
6746 e4376078 Iustin Pop
6747 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
6748 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
6749 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
6750 e4376078 Iustin Pop
6751 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
6752 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
6753 e4376078 Iustin Pop
6754 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
6755 2bb5c911 Michael Hanselmann
      if node_name is not None:
6756 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6757 e4376078 Iustin Pop
6758 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
6759 a9e0c397 Iustin Pop
6760 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
6761 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
6762 2bb5c911 Michael Hanselmann

6763 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
6764 cff90b79 Iustin Pop

6765 a9e0c397 Iustin Pop
    """
6766 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
6767 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
6768 94a1b377 Michael Hanselmann
6769 942be002 Michael Hanselmann
    if not self.disks:
6770 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
6771 942be002 Michael Hanselmann
      return
6772 942be002 Michael Hanselmann
6773 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
6774 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
6775 7ffc5a86 Michael Hanselmann
6776 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
6777 2bb5c911 Michael Hanselmann
6778 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
6779 2bb5c911 Michael Hanselmann
    if activate_disks:
6780 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
6781 2bb5c911 Michael Hanselmann
6782 2bb5c911 Michael Hanselmann
    try:
6783 942be002 Michael Hanselmann
      # Should we replace the secondary node?
6784 942be002 Michael Hanselmann
      if self.new_node is not None:
6785 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
6786 2bb5c911 Michael Hanselmann
      else:
6787 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
6788 a4eae71f Michael Hanselmann
6789 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
6790 2bb5c911 Michael Hanselmann
6791 2bb5c911 Michael Hanselmann
    finally:
6792 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
6793 5c983ee5 Iustin Pop
      # down instance
6794 2bb5c911 Michael Hanselmann
      if activate_disks:
6795 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6796 2bb5c911 Michael Hanselmann
6797 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
6798 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
6799 2bb5c911 Michael Hanselmann
6800 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
6801 cff90b79 Iustin Pop
6802 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
6803 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
6804 cff90b79 Iustin Pop
    if not results:
6805 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
6806 2bb5c911 Michael Hanselmann
6807 2bb5c911 Michael Hanselmann
    for node in nodes:
6808 781de953 Iustin Pop
      res = results[node]
6809 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
6810 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
6811 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6812 2bb5c911 Michael Hanselmann
                                 (vgname, node))
6813 2bb5c911 Michael Hanselmann
6814 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
6815 2bb5c911 Michael Hanselmann
    # Check disk existence
6816 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6817 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6818 cff90b79 Iustin Pop
        continue
6819 2bb5c911 Michael Hanselmann
6820 2bb5c911 Michael Hanselmann
      for node in nodes:
6821 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6822 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
6823 2bb5c911 Michael Hanselmann
6824 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
6825 2bb5c911 Michael Hanselmann
6826 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6827 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
6828 2bb5c911 Michael Hanselmann
          if not msg:
6829 2bb5c911 Michael Hanselmann
            msg = "disk not found"
6830 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6831 23829f6f Iustin Pop
                                   (idx, node, msg))
6832 cff90b79 Iustin Pop
6833 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6834 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6835 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6836 cff90b79 Iustin Pop
        continue
6837 cff90b79 Iustin Pop
6838 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6839 2bb5c911 Michael Hanselmann
                      (idx, node_name))
6840 2bb5c911 Michael Hanselmann
6841 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6842 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
6843 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6844 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
6845 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
6846 2bb5c911 Michael Hanselmann
6847 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
6848 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
6849 2bb5c911 Michael Hanselmann
    iv_names = {}
6850 2bb5c911 Michael Hanselmann
6851 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6852 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6853 a9e0c397 Iustin Pop
        continue
6854 2bb5c911 Michael Hanselmann
6855 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6856 2bb5c911 Michael Hanselmann
6857 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6858 2bb5c911 Michael Hanselmann
6859 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6860 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
6861 2bb5c911 Michael Hanselmann
6862 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6863 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
6864 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6865 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
6866 2bb5c911 Michael Hanselmann
6867 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
6868 a9e0c397 Iustin Pop
      old_lvs = dev.children
6869 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6870 2bb5c911 Michael Hanselmann
6871 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
6872 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
6873 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6874 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6875 2bb5c911 Michael Hanselmann
6876 2bb5c911 Michael Hanselmann
    return iv_names
6877 2bb5c911 Michael Hanselmann
6878 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
6879 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
6880 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6881 2bb5c911 Michael Hanselmann
6882 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
6883 2bb5c911 Michael Hanselmann
6884 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
6885 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
6886 2bb5c911 Michael Hanselmann
        if not msg:
6887 2bb5c911 Michael Hanselmann
          msg = "disk not found"
6888 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6889 2bb5c911 Michael Hanselmann
                                 (name, msg))
6890 2bb5c911 Michael Hanselmann
6891 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
6892 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6893 2bb5c911 Michael Hanselmann
6894 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
6895 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
6896 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6897 2bb5c911 Michael Hanselmann
6898 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
6899 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
6900 2bb5c911 Michael Hanselmann
6901 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6902 2bb5c911 Michael Hanselmann
        if msg:
6903 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6904 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
6905 2bb5c911 Michael Hanselmann
6906 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
6907 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
6908 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
6909 7ea7bcf6 Iustin Pop
6910 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
6911 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
6912 2bb5c911 Michael Hanselmann

6913 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
6914 2bb5c911 Michael Hanselmann

6915 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
6916 2bb5c911 Michael Hanselmann

6917 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
6918 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
6919 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
6920 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
6921 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
6922 2bb5c911 Michael Hanselmann

6923 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
6924 2bb5c911 Michael Hanselmann

6925 2bb5c911 Michael Hanselmann
      1. for each modified disk:
6926 2bb5c911 Michael Hanselmann

6927 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
6928 2bb5c911 Michael Hanselmann

6929 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
6930 2bb5c911 Michael Hanselmann

6931 2bb5c911 Michael Hanselmann
    """
6932 2bb5c911 Michael Hanselmann
    steps_total = 6
6933 2bb5c911 Michael Hanselmann
6934 2bb5c911 Michael Hanselmann
    # Step: check device activation
6935 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6936 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
6937 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
6938 2bb5c911 Michael Hanselmann
6939 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
6940 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6941 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
6942 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
6943 2bb5c911 Michael Hanselmann
                                False)
6944 2bb5c911 Michael Hanselmann
6945 2bb5c911 Michael Hanselmann
    # Step: create new storage
6946 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6947 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
6948 a9e0c397 Iustin Pop
6949 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
6950 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6951 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6952 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6953 2bb5c911 Michael Hanselmann
6954 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6955 4d4a651d Michael Hanselmann
                                                     old_lvs)
6956 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
6957 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
6958 cff90b79 Iustin Pop
      #dev.children = []
6959 cff90b79 Iustin Pop
      #cfg.Update(instance)
6960 a9e0c397 Iustin Pop
6961 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
6962 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
6963 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6964 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
6965 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
6966 cff90b79 Iustin Pop
6967 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
6968 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
6969 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
6970 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
6971 2bb5c911 Michael Hanselmann
6972 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
6973 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
6974 cff90b79 Iustin Pop
      for to_ren in old_lvs:
6975 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6976 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
6977 23829f6f Iustin Pop
          # device exists
6978 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6979 cff90b79 Iustin Pop
6980 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
6981 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6982 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
6983 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6984 2bb5c911 Michael Hanselmann
6985 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
6986 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
6987 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
6988 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
6989 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6990 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
6991 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6992 cff90b79 Iustin Pop
6993 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
6994 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
6995 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
6996 a9e0c397 Iustin Pop
6997 cff90b79 Iustin Pop
      for disk in old_lvs:
6998 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
6999 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7000 a9e0c397 Iustin Pop
7001 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7002 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7003 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7004 4d4a651d Michael Hanselmann
                                                  new_lvs)
7005 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7006 2cc1da8b Iustin Pop
      if msg:
7007 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7008 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7009 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7010 4c4e4e1e Iustin Pop
          if msg2:
7011 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7012 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7013 2bb5c911 Michael Hanselmann
                                     "volumes"))
7014 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7015 a9e0c397 Iustin Pop
7016 a9e0c397 Iustin Pop
      dev.children = new_lvs
7017 a9e0c397 Iustin Pop
7018 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7019 a9e0c397 Iustin Pop
7020 7ea7bcf6 Iustin Pop
    cstep = 5
7021 7ea7bcf6 Iustin Pop
    if self.early_release:
7022 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7023 7ea7bcf6 Iustin Pop
      cstep += 1
7024 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7025 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7026 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7027 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7028 7ea7bcf6 Iustin Pop
7029 2bb5c911 Michael Hanselmann
    # Wait for sync
7030 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7031 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7032 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7033 7ea7bcf6 Iustin Pop
    cstep += 1
7034 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7035 a9e0c397 Iustin Pop
7036 2bb5c911 Michael Hanselmann
    # Check all devices manually
7037 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7038 a9e0c397 Iustin Pop
7039 cff90b79 Iustin Pop
    # Step: remove old storage
7040 7ea7bcf6 Iustin Pop
    if not self.early_release:
7041 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7042 7ea7bcf6 Iustin Pop
      cstep += 1
7043 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7044 a9e0c397 Iustin Pop
7045 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7046 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7047 a9e0c397 Iustin Pop

7048 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7049 a9e0c397 Iustin Pop
      - for all disks of the instance:
7050 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7051 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7052 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7053 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7054 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7055 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7056 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7057 a9e0c397 Iustin Pop
          not network enabled
7058 a9e0c397 Iustin Pop
      - wait for sync across all devices
7059 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7060 a9e0c397 Iustin Pop

7061 a9e0c397 Iustin Pop
    Failures are not very well handled.
7062 0834c866 Iustin Pop

7063 a9e0c397 Iustin Pop
    """
7064 0834c866 Iustin Pop
    steps_total = 6
7065 0834c866 Iustin Pop
7066 0834c866 Iustin Pop
    # Step: check device activation
7067 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7068 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7069 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7070 0834c866 Iustin Pop
7071 0834c866 Iustin Pop
    # Step: check other node consistency
7072 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7073 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7074 0834c866 Iustin Pop
7075 0834c866 Iustin Pop
    # Step: create new storage
7076 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7077 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7078 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7079 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7080 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7081 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7082 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7083 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7084 a9e0c397 Iustin Pop
7085 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7086 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7087 a1578d63 Iustin Pop
    # error and the success paths
7088 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7089 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7090 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7091 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7092 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7093 2bb5c911 Michael Hanselmann
7094 2bb5c911 Michael Hanselmann
    iv_names = {}
7095 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7096 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7097 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7098 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7099 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7100 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7101 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7102 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7103 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7104 a2d59d8b Iustin Pop
        p_minor = o_minor1
7105 ffa1c0dc Iustin Pop
      else:
7106 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7107 a2d59d8b Iustin Pop
        p_minor = o_minor2
7108 a2d59d8b Iustin Pop
7109 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7110 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7111 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7112 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7113 a2d59d8b Iustin Pop
7114 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7115 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7116 a2d59d8b Iustin Pop
                    new_net_id)
7117 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7118 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7119 8a6c7011 Iustin Pop
                              children=dev.children,
7120 8a6c7011 Iustin Pop
                              size=dev.size)
7121 796cab27 Iustin Pop
      try:
7122 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7123 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7124 82759cb1 Iustin Pop
      except errors.GenericError:
7125 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7126 796cab27 Iustin Pop
        raise
7127 a9e0c397 Iustin Pop
7128 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7129 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7130 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7131 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7132 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7133 cacfd1fd Iustin Pop
      if msg:
7134 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7135 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7136 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7137 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7138 a9e0c397 Iustin Pop
7139 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7140 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7141 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7142 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7143 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7144 642445d9 Iustin Pop
7145 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7146 a2d59d8b Iustin Pop
    if msg:
7147 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7148 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7149 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7150 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7151 642445d9 Iustin Pop
7152 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7153 642445d9 Iustin Pop
    # the instance to point to the new secondary
7154 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7155 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7156 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7157 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7158 2bb5c911 Michael Hanselmann
7159 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7160 a9e0c397 Iustin Pop
7161 642445d9 Iustin Pop
    # and now perform the drbd attach
7162 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7163 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7164 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7165 4d4a651d Michael Hanselmann
                                            self.new_node],
7166 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7167 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7168 4d4a651d Michael Hanselmann
                                           self.instance.name,
7169 a2d59d8b Iustin Pop
                                           False)
7170 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7171 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7172 a2d59d8b Iustin Pop
      if msg:
7173 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7174 4d4a651d Michael Hanselmann
                           to_node, msg,
7175 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7176 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7177 7ea7bcf6 Iustin Pop
    cstep = 5
7178 7ea7bcf6 Iustin Pop
    if self.early_release:
7179 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7180 7ea7bcf6 Iustin Pop
      cstep += 1
7181 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7182 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7183 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7184 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7185 d5cd389c Iustin Pop
                             self.target_node,
7186 d5cd389c Iustin Pop
                             self.new_node])
7187 a9e0c397 Iustin Pop
7188 2bb5c911 Michael Hanselmann
    # Wait for sync
7189 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7190 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7191 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7192 7ea7bcf6 Iustin Pop
    cstep += 1
7193 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7194 a9e0c397 Iustin Pop
7195 2bb5c911 Michael Hanselmann
    # Check all devices manually
7196 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7197 22985314 Guido Trotter
7198 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7199 7ea7bcf6 Iustin Pop
    if not self.early_release:
7200 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7201 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7202 a9e0c397 Iustin Pop
7203 a8083063 Iustin Pop
7204 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7205 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7206 76aef8fc Michael Hanselmann

7207 76aef8fc Michael Hanselmann
  """
7208 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7209 76aef8fc Michael Hanselmann
  REQ_BGL = False
7210 76aef8fc Michael Hanselmann
7211 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7212 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7213 76aef8fc Michael Hanselmann
7214 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7215 76aef8fc Michael Hanselmann
    self.needed_locks = {
7216 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7217 76aef8fc Michael Hanselmann
      }
7218 76aef8fc Michael Hanselmann
7219 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7220 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7221 7e9c6a78 Iustin Pop
    try:
7222 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7223 7e9c6a78 Iustin Pop
                                  node_name, True):
7224 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7225 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7226 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7227 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7228 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7229 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7230 7e9c6a78 Iustin Pop
      else:
7231 7e9c6a78 Iustin Pop
        raise
7232 76aef8fc Michael Hanselmann
7233 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7234 76aef8fc Michael Hanselmann
    """Check prerequisites.
7235 76aef8fc Michael Hanselmann

7236 76aef8fc Michael Hanselmann
    """
7237 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7238 76aef8fc Michael Hanselmann
7239 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7240 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7241 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7242 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7243 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7244 76aef8fc Michael Hanselmann
7245 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7246 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7247 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7248 7e9c6a78 Iustin Pop
        continue
7249 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7250 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7251 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7252 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7253 76aef8fc Michael Hanselmann
7254 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7255 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7256 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7257 76aef8fc Michael Hanselmann
7258 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7259 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7260 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7261 76aef8fc Michael Hanselmann
                                           self.op.name,
7262 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7263 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7264 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7265 76aef8fc Michael Hanselmann
7266 76aef8fc Michael Hanselmann
7267 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7268 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7269 f7e7689f Iustin Pop

7270 f7e7689f Iustin Pop
  """
7271 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7272 f7e7689f Iustin Pop
  REQ_BGL = False
7273 f7e7689f Iustin Pop
7274 f7e7689f Iustin Pop
  def CheckArguments(self):
7275 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7276 f7e7689f Iustin Pop
      self.op.remote_node = None
7277 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7278 f7e7689f Iustin Pop
      self.op.iallocator = None
7279 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7280 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7281 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7282 f7e7689f Iustin Pop
7283 f7e7689f Iustin Pop
  def ExpandNames(self):
7284 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7285 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7286 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7287 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7288 f7e7689f Iustin Pop
    else:
7289 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7290 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7291 f7e7689f Iustin Pop
7292 f7e7689f Iustin Pop
  def CheckPrereq(self):
7293 f7e7689f Iustin Pop
    pass
7294 f7e7689f Iustin Pop
7295 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7296 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7297 f7e7689f Iustin Pop
      instances = []
7298 f7e7689f Iustin Pop
      for node in self.op.nodes:
7299 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7300 f7e7689f Iustin Pop
      result = []
7301 f7e7689f Iustin Pop
      for i in instances:
7302 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7303 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7304 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7305 f7e7689f Iustin Pop
                                     " secondary" %
7306 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7307 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7308 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7309 f7e7689f Iustin Pop
    else:
7310 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7311 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7312 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7313 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7314 f7e7689f Iustin Pop
      if not ial.success:
7315 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7316 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7317 f7e7689f Iustin Pop
      result = ial.result
7318 f7e7689f Iustin Pop
    return result
7319 f7e7689f Iustin Pop
7320 f7e7689f Iustin Pop
7321 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7322 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7323 8729e0d7 Iustin Pop

7324 8729e0d7 Iustin Pop
  """
7325 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7326 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7327 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7328 31e63dbf Guido Trotter
  REQ_BGL = False
7329 31e63dbf Guido Trotter
7330 31e63dbf Guido Trotter
  def ExpandNames(self):
7331 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7332 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7333 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7334 31e63dbf Guido Trotter
7335 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7336 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7337 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7338 8729e0d7 Iustin Pop
7339 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7340 8729e0d7 Iustin Pop
    """Build hooks env.
7341 8729e0d7 Iustin Pop

7342 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7343 8729e0d7 Iustin Pop

7344 8729e0d7 Iustin Pop
    """
7345 8729e0d7 Iustin Pop
    env = {
7346 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7347 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7348 8729e0d7 Iustin Pop
      }
7349 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7350 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7351 8729e0d7 Iustin Pop
    return env, nl, nl
7352 8729e0d7 Iustin Pop
7353 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7354 8729e0d7 Iustin Pop
    """Check prerequisites.
7355 8729e0d7 Iustin Pop

7356 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7357 8729e0d7 Iustin Pop

7358 8729e0d7 Iustin Pop
    """
7359 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7360 31e63dbf Guido Trotter
    assert instance is not None, \
7361 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7362 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7363 6b12959c Iustin Pop
    for node in nodenames:
7364 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7365 7527a8a4 Iustin Pop
7366 31e63dbf Guido Trotter
7367 8729e0d7 Iustin Pop
    self.instance = instance
7368 8729e0d7 Iustin Pop
7369 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
7370 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7371 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7372 8729e0d7 Iustin Pop
7373 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7374 8729e0d7 Iustin Pop
7375 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
7376 72737a7f Iustin Pop
                                       instance.hypervisor)
7377 8729e0d7 Iustin Pop
    for node in nodenames:
7378 781de953 Iustin Pop
      info = nodeinfo[node]
7379 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
7380 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
7381 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
7382 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
7383 5c983ee5 Iustin Pop
                                   " node %s" % node, errors.ECODE_ENVIRON)
7384 781de953 Iustin Pop
      if self.op.amount > vg_free:
7385 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
7386 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
7387 5c983ee5 Iustin Pop
                                   (node, vg_free, self.op.amount),
7388 5c983ee5 Iustin Pop
                                   errors.ECODE_NORES)
7389 8729e0d7 Iustin Pop
7390 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7391 8729e0d7 Iustin Pop
    """Execute disk grow.
7392 8729e0d7 Iustin Pop

7393 8729e0d7 Iustin Pop
    """
7394 8729e0d7 Iustin Pop
    instance = self.instance
7395 ad24e046 Iustin Pop
    disk = self.disk
7396 6b12959c Iustin Pop
    for node in instance.all_nodes:
7397 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7398 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7399 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7400 5bc556dd Michael Hanselmann
7401 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7402 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7403 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7404 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7405 5bc556dd Michael Hanselmann
      # time is a work-around.
7406 5bc556dd Michael Hanselmann
      time.sleep(5)
7407 5bc556dd Michael Hanselmann
7408 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7409 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7410 6605411d Iustin Pop
    if self.op.wait_for_sync:
7411 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7412 6605411d Iustin Pop
      if disk_abort:
7413 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7414 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7415 8729e0d7 Iustin Pop
7416 8729e0d7 Iustin Pop
7417 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7418 a8083063 Iustin Pop
  """Query runtime instance data.
7419 a8083063 Iustin Pop

7420 a8083063 Iustin Pop
  """
7421 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7422 a987fa48 Guido Trotter
  REQ_BGL = False
7423 ae5849b5 Michael Hanselmann
7424 a987fa48 Guido Trotter
  def ExpandNames(self):
7425 a987fa48 Guido Trotter
    self.needed_locks = {}
7426 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7427 a987fa48 Guido Trotter
7428 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7429 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7430 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7431 a987fa48 Guido Trotter
7432 a987fa48 Guido Trotter
    if self.op.instances:
7433 a987fa48 Guido Trotter
      self.wanted_names = []
7434 a987fa48 Guido Trotter
      for name in self.op.instances:
7435 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7436 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7437 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7438 a987fa48 Guido Trotter
    else:
7439 a987fa48 Guido Trotter
      self.wanted_names = None
7440 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7441 a987fa48 Guido Trotter
7442 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7443 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7444 a987fa48 Guido Trotter
7445 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7446 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7447 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7448 a8083063 Iustin Pop
7449 a8083063 Iustin Pop
  def CheckPrereq(self):
7450 a8083063 Iustin Pop
    """Check prerequisites.
7451 a8083063 Iustin Pop

7452 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7453 a8083063 Iustin Pop

7454 a8083063 Iustin Pop
    """
7455 a987fa48 Guido Trotter
    if self.wanted_names is None:
7456 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7457 a8083063 Iustin Pop
7458 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7459 a987fa48 Guido Trotter
                             in self.wanted_names]
7460 a987fa48 Guido Trotter
    return
7461 a8083063 Iustin Pop
7462 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7463 98825740 Michael Hanselmann
    """Returns the status of a block device
7464 98825740 Michael Hanselmann

7465 98825740 Michael Hanselmann
    """
7466 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7467 98825740 Michael Hanselmann
      return None
7468 98825740 Michael Hanselmann
7469 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7470 98825740 Michael Hanselmann
7471 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7472 98825740 Michael Hanselmann
    if result.offline:
7473 98825740 Michael Hanselmann
      return None
7474 98825740 Michael Hanselmann
7475 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7476 98825740 Michael Hanselmann
7477 98825740 Michael Hanselmann
    status = result.payload
7478 ddfe2228 Michael Hanselmann
    if status is None:
7479 ddfe2228 Michael Hanselmann
      return None
7480 98825740 Michael Hanselmann
7481 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7482 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7483 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7484 98825740 Michael Hanselmann
7485 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7486 a8083063 Iustin Pop
    """Compute block device status.
7487 a8083063 Iustin Pop

7488 a8083063 Iustin Pop
    """
7489 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7490 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7491 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7492 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7493 a8083063 Iustin Pop
      else:
7494 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7495 a8083063 Iustin Pop
7496 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7497 98825740 Michael Hanselmann
                                              instance.name, dev)
7498 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7499 a8083063 Iustin Pop
7500 a8083063 Iustin Pop
    if dev.children:
7501 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7502 a8083063 Iustin Pop
                      for child in dev.children]
7503 a8083063 Iustin Pop
    else:
7504 a8083063 Iustin Pop
      dev_children = []
7505 a8083063 Iustin Pop
7506 a8083063 Iustin Pop
    data = {
7507 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7508 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7509 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7510 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7511 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7512 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7513 a8083063 Iustin Pop
      "children": dev_children,
7514 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7515 c98162a7 Iustin Pop
      "size": dev.size,
7516 a8083063 Iustin Pop
      }
7517 a8083063 Iustin Pop
7518 a8083063 Iustin Pop
    return data
7519 a8083063 Iustin Pop
7520 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7521 a8083063 Iustin Pop
    """Gather and return data"""
7522 a8083063 Iustin Pop
    result = {}
7523 338e51e8 Iustin Pop
7524 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7525 338e51e8 Iustin Pop
7526 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7527 57821cac Iustin Pop
      if not self.op.static:
7528 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7529 57821cac Iustin Pop
                                                  instance.name,
7530 57821cac Iustin Pop
                                                  instance.hypervisor)
7531 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7532 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7533 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7534 57821cac Iustin Pop
          remote_state = "up"
7535 57821cac Iustin Pop
        else:
7536 57821cac Iustin Pop
          remote_state = "down"
7537 a8083063 Iustin Pop
      else:
7538 57821cac Iustin Pop
        remote_state = None
7539 0d68c45d Iustin Pop
      if instance.admin_up:
7540 a8083063 Iustin Pop
        config_state = "up"
7541 0d68c45d Iustin Pop
      else:
7542 0d68c45d Iustin Pop
        config_state = "down"
7543 a8083063 Iustin Pop
7544 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7545 a8083063 Iustin Pop
               for device in instance.disks]
7546 a8083063 Iustin Pop
7547 a8083063 Iustin Pop
      idict = {
7548 a8083063 Iustin Pop
        "name": instance.name,
7549 a8083063 Iustin Pop
        "config_state": config_state,
7550 a8083063 Iustin Pop
        "run_state": remote_state,
7551 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7552 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7553 a8083063 Iustin Pop
        "os": instance.os,
7554 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7555 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7556 a8083063 Iustin Pop
        "disks": disks,
7557 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7558 24838135 Iustin Pop
        "network_port": instance.network_port,
7559 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7560 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
7561 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7562 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7563 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7564 90f72445 Iustin Pop
        "mtime": instance.mtime,
7565 90f72445 Iustin Pop
        "ctime": instance.ctime,
7566 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7567 a8083063 Iustin Pop
        }
7568 a8083063 Iustin Pop
7569 a8083063 Iustin Pop
      result[instance.name] = idict
7570 a8083063 Iustin Pop
7571 a8083063 Iustin Pop
    return result
7572 a8083063 Iustin Pop
7573 a8083063 Iustin Pop
7574 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7575 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7576 a8083063 Iustin Pop

7577 a8083063 Iustin Pop
  """
7578 a8083063 Iustin Pop
  HPATH = "instance-modify"
7579 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7580 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7581 1a5c7281 Guido Trotter
  REQ_BGL = False
7582 1a5c7281 Guido Trotter
7583 24991749 Iustin Pop
  def CheckArguments(self):
7584 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7585 24991749 Iustin Pop
      self.op.nics = []
7586 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7587 24991749 Iustin Pop
      self.op.disks = []
7588 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7589 24991749 Iustin Pop
      self.op.beparams = {}
7590 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7591 24991749 Iustin Pop
      self.op.hvparams = {}
7592 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7593 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
7594 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
7595 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
7596 24991749 Iustin Pop
7597 7736a5f2 Iustin Pop
    if self.op.hvparams:
7598 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
7599 7736a5f2 Iustin Pop
7600 24991749 Iustin Pop
    # Disk validation
7601 24991749 Iustin Pop
    disk_addremove = 0
7602 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7603 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7604 24991749 Iustin Pop
        disk_addremove += 1
7605 24991749 Iustin Pop
        continue
7606 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7607 24991749 Iustin Pop
        disk_addremove += 1
7608 24991749 Iustin Pop
      else:
7609 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7610 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
7611 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7612 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7613 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7614 8b46606c Guido Trotter
7615 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7616 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7617 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7618 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
7619 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7620 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7621 24991749 Iustin Pop
        if size is None:
7622 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
7623 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7624 24991749 Iustin Pop
        try:
7625 24991749 Iustin Pop
          size = int(size)
7626 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
7627 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7628 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
7629 24991749 Iustin Pop
        disk_dict['size'] = size
7630 24991749 Iustin Pop
      else:
7631 24991749 Iustin Pop
        # modification of disk
7632 24991749 Iustin Pop
        if 'size' in disk_dict:
7633 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7634 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
7635 24991749 Iustin Pop
7636 24991749 Iustin Pop
    if disk_addremove > 1:
7637 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7638 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7639 24991749 Iustin Pop
7640 24991749 Iustin Pop
    # NIC validation
7641 24991749 Iustin Pop
    nic_addremove = 0
7642 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7643 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7644 24991749 Iustin Pop
        nic_addremove += 1
7645 24991749 Iustin Pop
        continue
7646 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7647 24991749 Iustin Pop
        nic_addremove += 1
7648 24991749 Iustin Pop
      else:
7649 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7650 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
7651 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7652 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7653 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7654 24991749 Iustin Pop
7655 24991749 Iustin Pop
      # nic_dict should be a dict
7656 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
7657 24991749 Iustin Pop
      if nic_ip is not None:
7658 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
7659 24991749 Iustin Pop
          nic_dict['ip'] = None
7660 24991749 Iustin Pop
        else:
7661 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
7662 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
7663 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
7664 5c44da6a Guido Trotter
7665 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
7666 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
7667 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
7668 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7669 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
7670 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7671 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
7672 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7673 cd098c41 Guido Trotter
        nic_dict['link'] = None
7674 cd098c41 Guido Trotter
7675 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
7676 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
7677 5c44da6a Guido Trotter
        if nic_mac is None:
7678 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
7679 5c44da6a Guido Trotter
7680 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7681 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7682 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7683 82187135 René Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
7684 82187135 René Nussbaumer
7685 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7686 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7687 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
7688 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7689 5c44da6a Guido Trotter
7690 24991749 Iustin Pop
    if nic_addremove > 1:
7691 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7692 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7693 24991749 Iustin Pop
7694 1a5c7281 Guido Trotter
  def ExpandNames(self):
7695 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
7696 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
7697 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7698 74409b12 Iustin Pop
7699 74409b12 Iustin Pop
  def DeclareLocks(self, level):
7700 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
7701 74409b12 Iustin Pop
      self._LockInstancesNodes()
7702 a8083063 Iustin Pop
7703 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7704 a8083063 Iustin Pop
    """Build hooks env.
7705 a8083063 Iustin Pop

7706 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
7707 a8083063 Iustin Pop

7708 a8083063 Iustin Pop
    """
7709 396e1b78 Michael Hanselmann
    args = dict()
7710 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
7711 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
7712 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
7713 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7714 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7715 d8dcf3c9 Guido Trotter
    # information at all.
7716 d8dcf3c9 Guido Trotter
    if self.op.nics:
7717 d8dcf3c9 Guido Trotter
      args['nics'] = []
7718 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
7719 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7720 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
7721 d8dcf3c9 Guido Trotter
        if idx in nic_override:
7722 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
7723 d8dcf3c9 Guido Trotter
        else:
7724 d8dcf3c9 Guido Trotter
          this_nic_override = {}
7725 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
7726 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
7727 d8dcf3c9 Guido Trotter
        else:
7728 d8dcf3c9 Guido Trotter
          ip = nic.ip
7729 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
7730 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
7731 d8dcf3c9 Guido Trotter
        else:
7732 d8dcf3c9 Guido Trotter
          mac = nic.mac
7733 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
7734 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
7735 62f0dd02 Guido Trotter
        else:
7736 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7737 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7738 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7739 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7740 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
7741 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7742 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
7743 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
7744 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7745 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7746 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7747 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
7748 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
7749 d8dcf3c9 Guido Trotter
7750 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7751 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7752 a8083063 Iustin Pop
    return env, nl, nl
7753 a8083063 Iustin Pop
7754 7e950d31 Iustin Pop
  @staticmethod
7755 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
7756 0329617a Guido Trotter
                        default_values, parameter_types):
7757 0329617a Guido Trotter
    """Return the new params dict for the given params.
7758 0329617a Guido Trotter

7759 0329617a Guido Trotter
    @type old_params: dict
7760 f2fd87d7 Iustin Pop
    @param old_params: old parameters
7761 0329617a Guido Trotter
    @type update_dict: dict
7762 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
7763 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
7764 f2fd87d7 Iustin Pop
                        parameter to its default value
7765 0329617a Guido Trotter
    @type default_values: dict
7766 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
7767 0329617a Guido Trotter
    @type parameter_types: dict
7768 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
7769 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
7770 0329617a Guido Trotter
    @rtype: (dict, dict)
7771 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
7772 0329617a Guido Trotter

7773 0329617a Guido Trotter
    """
7774 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
7775 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
7776 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
7777 0329617a Guido Trotter
        try:
7778 0329617a Guido Trotter
          del params_copy[key]
7779 0329617a Guido Trotter
        except KeyError:
7780 0329617a Guido Trotter
          pass
7781 0329617a Guido Trotter
      else:
7782 0329617a Guido Trotter
        params_copy[key] = val
7783 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
7784 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
7785 0329617a Guido Trotter
    return (params_copy, params_filled)
7786 0329617a Guido Trotter
7787 a8083063 Iustin Pop
  def CheckPrereq(self):
7788 a8083063 Iustin Pop
    """Check prerequisites.
7789 a8083063 Iustin Pop

7790 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
7791 a8083063 Iustin Pop

7792 a8083063 Iustin Pop
    """
7793 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
7794 a8083063 Iustin Pop
7795 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
7796 31a853d2 Iustin Pop
7797 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7798 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
7799 1a5c7281 Guido Trotter
    assert self.instance is not None, \
7800 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7801 6b12959c Iustin Pop
    pnode = instance.primary_node
7802 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
7803 74409b12 Iustin Pop
7804 338e51e8 Iustin Pop
    # hvparams processing
7805 74409b12 Iustin Pop
    if self.op.hvparams:
7806 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
7807 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
7808 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
7809 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
7810 74409b12 Iustin Pop
      # local check
7811 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
7812 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
7813 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7814 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
7815 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
7816 338e51e8 Iustin Pop
    else:
7817 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
7818 338e51e8 Iustin Pop
7819 338e51e8 Iustin Pop
    # beparams processing
7820 338e51e8 Iustin Pop
    if self.op.beparams:
7821 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
7822 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
7823 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
7824 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
7825 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
7826 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
7827 338e51e8 Iustin Pop
    else:
7828 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
7829 74409b12 Iustin Pop
7830 cfefe007 Guido Trotter
    self.warn = []
7831 647a5d80 Iustin Pop
7832 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7833 647a5d80 Iustin Pop
      mem_check_list = [pnode]
7834 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7835 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
7836 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
7837 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7838 72737a7f Iustin Pop
                                                  instance.hypervisor)
7839 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7840 72737a7f Iustin Pop
                                         instance.hypervisor)
7841 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
7842 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
7843 070e998b Iustin Pop
      if msg:
7844 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
7845 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
7846 070e998b Iustin Pop
                         (pnode,  msg))
7847 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7848 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
7849 070e998b Iustin Pop
                         " free memory information" % pnode)
7850 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
7851 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
7852 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
7853 cfefe007 Guido Trotter
      else:
7854 7ad1af4a Iustin Pop
        if instance_info.payload:
7855 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
7856 cfefe007 Guido Trotter
        else:
7857 cfefe007 Guido Trotter
          # Assume instance not running
7858 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
7859 cfefe007 Guido Trotter
          # and we have no other way to check)
7860 cfefe007 Guido Trotter
          current_mem = 0
7861 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7862 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
7863 cfefe007 Guido Trotter
        if miss_mem > 0:
7864 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
7865 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
7866 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
7867 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
7868 cfefe007 Guido Trotter
7869 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7870 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
7871 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
7872 ea33068f Iustin Pop
            continue
7873 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
7874 070e998b Iustin Pop
          if msg:
7875 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
7876 070e998b Iustin Pop
                             (node, msg))
7877 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
7878 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
7879 070e998b Iustin Pop
                             " memory information" % node)
7880 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7881 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
7882 647a5d80 Iustin Pop
                             " secondary node %s" % node)
7883 5bc84f33 Alexander Schreiber
7884 24991749 Iustin Pop
    # NIC processing
7885 cd098c41 Guido Trotter
    self.nic_pnew = {}
7886 cd098c41 Guido Trotter
    self.nic_pinst = {}
7887 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7888 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7889 24991749 Iustin Pop
        if not instance.nics:
7890 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
7891 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7892 24991749 Iustin Pop
        continue
7893 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
7894 24991749 Iustin Pop
        # an existing nic
7895 21bcb9aa Michael Hanselmann
        if not instance.nics:
7896 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
7897 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
7898 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
7899 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
7900 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7901 24991749 Iustin Pop
                                     " are 0 to %d" %
7902 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
7903 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7904 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
7905 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
7906 cd098c41 Guido Trotter
      else:
7907 cd098c41 Guido Trotter
        old_nic_params = {}
7908 cd098c41 Guido Trotter
        old_nic_ip = None
7909 cd098c41 Guido Trotter
7910 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
7911 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
7912 cd098c41 Guido Trotter
                                 if key in nic_dict])
7913 cd098c41 Guido Trotter
7914 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
7915 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7916 cd098c41 Guido Trotter
7917 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
7918 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7919 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
7920 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
7921 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7922 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
7923 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
7924 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7925 cd098c41 Guido Trotter
7926 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7927 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7928 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7929 35c0c8da Iustin Pop
        if msg:
7930 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7931 24991749 Iustin Pop
          if self.force:
7932 24991749 Iustin Pop
            self.warn.append(msg)
7933 24991749 Iustin Pop
          else:
7934 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
7935 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7936 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
7937 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
7938 cd098c41 Guido Trotter
        else:
7939 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
7940 cd098c41 Guido Trotter
        if nic_ip is None:
7941 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7942 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
7943 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7944 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7945 5c44da6a Guido Trotter
        if nic_mac is None:
7946 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
7947 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7948 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7949 5c44da6a Guido Trotter
          # otherwise generate the mac
7950 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
7951 5c44da6a Guido Trotter
        else:
7952 5c44da6a Guido Trotter
          # or validate/reserve the current one
7953 36b66e6e Guido Trotter
          try:
7954 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
7955 36b66e6e Guido Trotter
          except errors.ReservationError:
7956 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
7957 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
7958 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
7959 24991749 Iustin Pop
7960 24991749 Iustin Pop
    # DISK processing
7961 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7962 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
7963 5c983ee5 Iustin Pop
                                 " diskless instances",
7964 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7965 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
7966 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7967 24991749 Iustin Pop
        if len(instance.disks) == 1:
7968 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
7969 5c983ee5 Iustin Pop
                                     " an instance",
7970 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7971 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7972 24991749 Iustin Pop
        ins_l = ins_l[pnode]
7973 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
7974 aca13712 Iustin Pop
        if msg:
7975 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7976 5c983ee5 Iustin Pop
                                     (pnode, msg), errors.ECODE_ENVIRON)
7977 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
7978 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
7979 5c983ee5 Iustin Pop
                                     " disks.", errors.ECODE_STATE)
7980 24991749 Iustin Pop
7981 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
7982 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
7983 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7984 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
7985 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7986 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7987 24991749 Iustin Pop
        # an existing disk
7988 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
7989 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7990 24991749 Iustin Pop
                                     " are 0 to %d" %
7991 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
7992 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7993 24991749 Iustin Pop
7994 a8083063 Iustin Pop
    return
7995 a8083063 Iustin Pop
7996 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7997 a8083063 Iustin Pop
    """Modifies an instance.
7998 a8083063 Iustin Pop

7999 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8000 24991749 Iustin Pop

8001 a8083063 Iustin Pop
    """
8002 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8003 cfefe007 Guido Trotter
    # feedback_fn there.
8004 cfefe007 Guido Trotter
    for warn in self.warn:
8005 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8006 cfefe007 Guido Trotter
8007 a8083063 Iustin Pop
    result = []
8008 a8083063 Iustin Pop
    instance = self.instance
8009 24991749 Iustin Pop
    # disk changes
8010 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8011 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8012 24991749 Iustin Pop
        # remove the last disk
8013 24991749 Iustin Pop
        device = instance.disks.pop()
8014 24991749 Iustin Pop
        device_idx = len(instance.disks)
8015 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8016 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8017 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8018 e1bc0878 Iustin Pop
          if msg:
8019 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8020 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8021 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8022 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8023 24991749 Iustin Pop
        # add a new disk
8024 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8025 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8026 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8027 24991749 Iustin Pop
        else:
8028 24991749 Iustin Pop
          file_driver = file_path = None
8029 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8030 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8031 24991749 Iustin Pop
                                         instance.disk_template,
8032 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8033 24991749 Iustin Pop
                                         instance.secondary_nodes,
8034 24991749 Iustin Pop
                                         [disk_dict],
8035 24991749 Iustin Pop
                                         file_path,
8036 24991749 Iustin Pop
                                         file_driver,
8037 24991749 Iustin Pop
                                         disk_idx_base)[0]
8038 24991749 Iustin Pop
        instance.disks.append(new_disk)
8039 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8040 24991749 Iustin Pop
8041 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8042 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8043 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8044 24991749 Iustin Pop
        #HARDCODE
8045 428958aa Iustin Pop
        for node in instance.all_nodes:
8046 428958aa Iustin Pop
          f_create = node == instance.primary_node
8047 796cab27 Iustin Pop
          try:
8048 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8049 428958aa Iustin Pop
                            f_create, info, f_create)
8050 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8051 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8052 428958aa Iustin Pop
                            " node %s: %s",
8053 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8054 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8055 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8056 24991749 Iustin Pop
      else:
8057 24991749 Iustin Pop
        # change a given disk
8058 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8059 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8060 24991749 Iustin Pop
    # NIC changes
8061 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8062 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8063 24991749 Iustin Pop
        # remove the last nic
8064 24991749 Iustin Pop
        del instance.nics[-1]
8065 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8066 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8067 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8068 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8069 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8070 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8071 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8072 24991749 Iustin Pop
        instance.nics.append(new_nic)
8073 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8074 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8075 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8076 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8077 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8078 cd098c41 Guido Trotter
                       )))
8079 24991749 Iustin Pop
      else:
8080 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8081 24991749 Iustin Pop
          if key in nic_dict:
8082 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8083 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8084 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8085 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8086 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8087 24991749 Iustin Pop
8088 24991749 Iustin Pop
    # hvparams changes
8089 74409b12 Iustin Pop
    if self.op.hvparams:
8090 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8091 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8092 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8093 24991749 Iustin Pop
8094 24991749 Iustin Pop
    # beparams changes
8095 338e51e8 Iustin Pop
    if self.op.beparams:
8096 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8097 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8098 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8099 a8083063 Iustin Pop
8100 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8101 a8083063 Iustin Pop
8102 a8083063 Iustin Pop
    return result
8103 a8083063 Iustin Pop
8104 a8083063 Iustin Pop
8105 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8106 a8083063 Iustin Pop
  """Query the exports list
8107 a8083063 Iustin Pop

8108 a8083063 Iustin Pop
  """
8109 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8110 21a15682 Guido Trotter
  REQ_BGL = False
8111 21a15682 Guido Trotter
8112 21a15682 Guido Trotter
  def ExpandNames(self):
8113 21a15682 Guido Trotter
    self.needed_locks = {}
8114 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8115 21a15682 Guido Trotter
    if not self.op.nodes:
8116 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8117 21a15682 Guido Trotter
    else:
8118 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8119 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8120 a8083063 Iustin Pop
8121 a8083063 Iustin Pop
  def CheckPrereq(self):
8122 21a15682 Guido Trotter
    """Check prerequisites.
8123 a8083063 Iustin Pop

8124 a8083063 Iustin Pop
    """
8125 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8126 a8083063 Iustin Pop
8127 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8128 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8129 a8083063 Iustin Pop

8130 e4376078 Iustin Pop
    @rtype: dict
8131 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8132 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8133 e4376078 Iustin Pop
        that node.
8134 a8083063 Iustin Pop

8135 a8083063 Iustin Pop
    """
8136 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8137 b04285f2 Guido Trotter
    result = {}
8138 b04285f2 Guido Trotter
    for node in rpcresult:
8139 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8140 b04285f2 Guido Trotter
        result[node] = False
8141 b04285f2 Guido Trotter
      else:
8142 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8143 b04285f2 Guido Trotter
8144 b04285f2 Guido Trotter
    return result
8145 a8083063 Iustin Pop
8146 a8083063 Iustin Pop
8147 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8148 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8149 a8083063 Iustin Pop

8150 a8083063 Iustin Pop
  """
8151 a8083063 Iustin Pop
  HPATH = "instance-export"
8152 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8153 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8154 6657590e Guido Trotter
  REQ_BGL = False
8155 6657590e Guido Trotter
8156 17c3f802 Guido Trotter
  def CheckArguments(self):
8157 17c3f802 Guido Trotter
    """Check the arguments.
8158 17c3f802 Guido Trotter

8159 17c3f802 Guido Trotter
    """
8160 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8161 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8162 17c3f802 Guido Trotter
8163 6657590e Guido Trotter
  def ExpandNames(self):
8164 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8165 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8166 6657590e Guido Trotter
    #
8167 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8168 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8169 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8170 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8171 6657590e Guido Trotter
    #    then one to remove, after
8172 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8173 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8174 6657590e Guido Trotter
8175 6657590e Guido Trotter
  def DeclareLocks(self, level):
8176 6657590e Guido Trotter
    """Last minute lock declaration."""
8177 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8178 a8083063 Iustin Pop
8179 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8180 a8083063 Iustin Pop
    """Build hooks env.
8181 a8083063 Iustin Pop

8182 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8183 a8083063 Iustin Pop

8184 a8083063 Iustin Pop
    """
8185 a8083063 Iustin Pop
    env = {
8186 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8187 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8188 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8189 a8083063 Iustin Pop
      }
8190 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8191 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8192 a8083063 Iustin Pop
          self.op.target_node]
8193 a8083063 Iustin Pop
    return env, nl, nl
8194 a8083063 Iustin Pop
8195 a8083063 Iustin Pop
  def CheckPrereq(self):
8196 a8083063 Iustin Pop
    """Check prerequisites.
8197 a8083063 Iustin Pop

8198 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8199 a8083063 Iustin Pop

8200 a8083063 Iustin Pop
    """
8201 6657590e Guido Trotter
    instance_name = self.op.instance_name
8202 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8203 6657590e Guido Trotter
    assert self.instance is not None, \
8204 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8205 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8206 a8083063 Iustin Pop
8207 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8208 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8209 cf26a87a Iustin Pop
    assert self.dst_node is not None
8210 a8083063 Iustin Pop
8211 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8212 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8213 a8083063 Iustin Pop
8214 b6023d6c Manuel Franceschini
    # instance disk type verification
8215 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8216 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8217 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8218 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8219 b6023d6c Manuel Franceschini
8220 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8221 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8222 a8083063 Iustin Pop

8223 a8083063 Iustin Pop
    """
8224 a8083063 Iustin Pop
    instance = self.instance
8225 a8083063 Iustin Pop
    dst_node = self.dst_node
8226 a8083063 Iustin Pop
    src_node = instance.primary_node
8227 37972df0 Michael Hanselmann
8228 a8083063 Iustin Pop
    if self.op.shutdown:
8229 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8230 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8231 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8232 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8233 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8234 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8235 a8083063 Iustin Pop
8236 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
8237 a8083063 Iustin Pop
8238 a8083063 Iustin Pop
    snap_disks = []
8239 a8083063 Iustin Pop
8240 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8241 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8242 998c712c Iustin Pop
    for disk in instance.disks:
8243 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8244 998c712c Iustin Pop
8245 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8246 3e53a60b Michael Hanselmann
8247 3e53a60b Michael Hanselmann
    if activate_disks:
8248 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8249 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8250 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8251 3e53a60b Michael Hanselmann
8252 a8083063 Iustin Pop
    try:
8253 3e53a60b Michael Hanselmann
      # per-disk results
8254 3e53a60b Michael Hanselmann
      dresults = []
8255 3e53a60b Michael Hanselmann
      try:
8256 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
8257 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8258 3e53a60b Michael Hanselmann
                      (idx, src_node))
8259 3e53a60b Michael Hanselmann
8260 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
8261 3e53a60b Michael Hanselmann
          # passed
8262 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8263 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8264 3e53a60b Michael Hanselmann
          if msg:
8265 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8266 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
8267 3e53a60b Michael Hanselmann
            snap_disks.append(False)
8268 3e53a60b Michael Hanselmann
          else:
8269 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
8270 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8271 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
8272 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
8273 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
8274 37972df0 Michael Hanselmann
8275 3e53a60b Michael Hanselmann
      finally:
8276 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
8277 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
8278 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8279 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8280 3e53a60b Michael Hanselmann
          if msg:
8281 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
8282 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
8283 3e53a60b Michael Hanselmann
8284 3e53a60b Michael Hanselmann
      # TODO: check for size
8285 3e53a60b Michael Hanselmann
8286 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
8287 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
8288 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
8289 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
8290 3e53a60b Michael Hanselmann
        if dev:
8291 4a0e011f Iustin Pop
          # FIXME: pass debug from opcode to backend
8292 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8293 4a0e011f Iustin Pop
                                                 instance, cluster_name,
8294 dd713605 Iustin Pop
                                                 idx, self.op.debug_level)
8295 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8296 3e53a60b Michael Hanselmann
          if msg:
8297 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
8298 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8299 3e53a60b Michael Hanselmann
            dresults.append(False)
8300 3e53a60b Michael Hanselmann
          else:
8301 3e53a60b Michael Hanselmann
            dresults.append(True)
8302 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8303 3e53a60b Michael Hanselmann
          if msg:
8304 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8305 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8306 19d7f90a Guido Trotter
        else:
8307 084f05a5 Iustin Pop
          dresults.append(False)
8308 a8083063 Iustin Pop
8309 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8310 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8311 3e53a60b Michael Hanselmann
                                             snap_disks)
8312 3e53a60b Michael Hanselmann
      fin_resu = True
8313 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8314 3e53a60b Michael Hanselmann
      if msg:
8315 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8316 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8317 3e53a60b Michael Hanselmann
        fin_resu = False
8318 3e53a60b Michael Hanselmann
8319 3e53a60b Michael Hanselmann
    finally:
8320 3e53a60b Michael Hanselmann
      if activate_disks:
8321 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8322 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8323 a8083063 Iustin Pop
8324 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8325 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8326 a8083063 Iustin Pop
8327 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8328 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8329 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8330 35fbcd11 Iustin Pop
    iname = instance.name
8331 a8083063 Iustin Pop
    if nodelist:
8332 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8333 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8334 a8083063 Iustin Pop
      for node in exportlist:
8335 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8336 781de953 Iustin Pop
          continue
8337 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8338 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8339 35fbcd11 Iustin Pop
          if msg:
8340 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8341 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8342 084f05a5 Iustin Pop
    return fin_resu, dresults
8343 5c947f38 Iustin Pop
8344 5c947f38 Iustin Pop
8345 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8346 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8347 9ac99fda Guido Trotter

8348 9ac99fda Guido Trotter
  """
8349 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
8350 3656b3af Guido Trotter
  REQ_BGL = False
8351 3656b3af Guido Trotter
8352 3656b3af Guido Trotter
  def ExpandNames(self):
8353 3656b3af Guido Trotter
    self.needed_locks = {}
8354 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
8355 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
8356 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
8357 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8358 9ac99fda Guido Trotter
8359 9ac99fda Guido Trotter
  def CheckPrereq(self):
8360 9ac99fda Guido Trotter
    """Check prerequisites.
8361 9ac99fda Guido Trotter
    """
8362 9ac99fda Guido Trotter
    pass
8363 9ac99fda Guido Trotter
8364 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
8365 9ac99fda Guido Trotter
    """Remove any export.
8366 9ac99fda Guido Trotter

8367 9ac99fda Guido Trotter
    """
8368 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8369 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
8370 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
8371 9ac99fda Guido Trotter
    fqdn_warn = False
8372 9ac99fda Guido Trotter
    if not instance_name:
8373 9ac99fda Guido Trotter
      fqdn_warn = True
8374 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
8375 9ac99fda Guido Trotter
8376 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8377 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
8378 9ac99fda Guido Trotter
    found = False
8379 9ac99fda Guido Trotter
    for node in exportlist:
8380 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
8381 1b7bfbb7 Iustin Pop
      if msg:
8382 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8383 781de953 Iustin Pop
        continue
8384 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
8385 9ac99fda Guido Trotter
        found = True
8386 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
8387 4c4e4e1e Iustin Pop
        msg = result.fail_msg
8388 35fbcd11 Iustin Pop
        if msg:
8389 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
8390 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
8391 9ac99fda Guido Trotter
8392 9ac99fda Guido Trotter
    if fqdn_warn and not found:
8393 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
8394 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
8395 9ac99fda Guido Trotter
                  " Domain Name.")
8396 9ac99fda Guido Trotter
8397 9ac99fda Guido Trotter
8398 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
8399 5c947f38 Iustin Pop
  """Generic tags LU.
8400 5c947f38 Iustin Pop

8401 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
8402 5c947f38 Iustin Pop

8403 5c947f38 Iustin Pop
  """
8404 5c947f38 Iustin Pop
8405 8646adce Guido Trotter
  def ExpandNames(self):
8406 8646adce Guido Trotter
    self.needed_locks = {}
8407 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
8408 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
8409 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
8410 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
8411 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
8412 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
8413 8646adce Guido Trotter
8414 8646adce Guido Trotter
  def CheckPrereq(self):
8415 8646adce Guido Trotter
    """Check prerequisites.
8416 8646adce Guido Trotter

8417 8646adce Guido Trotter
    """
8418 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
8419 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
8420 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
8421 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
8422 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
8423 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8424 5c947f38 Iustin Pop
    else:
8425 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8426 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
8427 5c947f38 Iustin Pop
8428 5c947f38 Iustin Pop
8429 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
8430 5c947f38 Iustin Pop
  """Returns the tags of a given object.
8431 5c947f38 Iustin Pop

8432 5c947f38 Iustin Pop
  """
8433 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
8434 8646adce Guido Trotter
  REQ_BGL = False
8435 5c947f38 Iustin Pop
8436 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8437 5c947f38 Iustin Pop
    """Returns the tag list.
8438 5c947f38 Iustin Pop

8439 5c947f38 Iustin Pop
    """
8440 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8441 5c947f38 Iustin Pop
8442 5c947f38 Iustin Pop
8443 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8444 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8445 73415719 Iustin Pop

8446 73415719 Iustin Pop
  """
8447 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8448 8646adce Guido Trotter
  REQ_BGL = False
8449 8646adce Guido Trotter
8450 8646adce Guido Trotter
  def ExpandNames(self):
8451 8646adce Guido Trotter
    self.needed_locks = {}
8452 73415719 Iustin Pop
8453 73415719 Iustin Pop
  def CheckPrereq(self):
8454 73415719 Iustin Pop
    """Check prerequisites.
8455 73415719 Iustin Pop

8456 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8457 73415719 Iustin Pop

8458 73415719 Iustin Pop
    """
8459 73415719 Iustin Pop
    try:
8460 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8461 73415719 Iustin Pop
    except re.error, err:
8462 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8463 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
8464 73415719 Iustin Pop
8465 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8466 73415719 Iustin Pop
    """Returns the tag list.
8467 73415719 Iustin Pop

8468 73415719 Iustin Pop
    """
8469 73415719 Iustin Pop
    cfg = self.cfg
8470 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8471 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8472 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8473 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8474 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8475 73415719 Iustin Pop
    results = []
8476 73415719 Iustin Pop
    for path, target in tgts:
8477 73415719 Iustin Pop
      for tag in target.GetTags():
8478 73415719 Iustin Pop
        if self.re.search(tag):
8479 73415719 Iustin Pop
          results.append((path, tag))
8480 73415719 Iustin Pop
    return results
8481 73415719 Iustin Pop
8482 73415719 Iustin Pop
8483 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8484 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8485 5c947f38 Iustin Pop

8486 5c947f38 Iustin Pop
  """
8487 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8488 8646adce Guido Trotter
  REQ_BGL = False
8489 5c947f38 Iustin Pop
8490 5c947f38 Iustin Pop
  def CheckPrereq(self):
8491 5c947f38 Iustin Pop
    """Check prerequisites.
8492 5c947f38 Iustin Pop

8493 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8494 5c947f38 Iustin Pop

8495 5c947f38 Iustin Pop
    """
8496 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8497 f27302fa Iustin Pop
    for tag in self.op.tags:
8498 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8499 5c947f38 Iustin Pop
8500 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8501 5c947f38 Iustin Pop
    """Sets the tag.
8502 5c947f38 Iustin Pop

8503 5c947f38 Iustin Pop
    """
8504 5c947f38 Iustin Pop
    try:
8505 f27302fa Iustin Pop
      for tag in self.op.tags:
8506 f27302fa Iustin Pop
        self.target.AddTag(tag)
8507 5c947f38 Iustin Pop
    except errors.TagError, err:
8508 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8509 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8510 5c947f38 Iustin Pop
8511 5c947f38 Iustin Pop
8512 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8513 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8514 5c947f38 Iustin Pop

8515 5c947f38 Iustin Pop
  """
8516 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8517 8646adce Guido Trotter
  REQ_BGL = False
8518 5c947f38 Iustin Pop
8519 5c947f38 Iustin Pop
  def CheckPrereq(self):
8520 5c947f38 Iustin Pop
    """Check prerequisites.
8521 5c947f38 Iustin Pop

8522 5c947f38 Iustin Pop
    This checks that we have the given tag.
8523 5c947f38 Iustin Pop

8524 5c947f38 Iustin Pop
    """
8525 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8526 f27302fa Iustin Pop
    for tag in self.op.tags:
8527 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8528 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
8529 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
8530 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
8531 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
8532 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
8533 f27302fa Iustin Pop
      diff_names.sort()
8534 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
8535 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
8536 5c947f38 Iustin Pop
8537 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8538 5c947f38 Iustin Pop
    """Remove the tag from the object.
8539 5c947f38 Iustin Pop

8540 5c947f38 Iustin Pop
    """
8541 f27302fa Iustin Pop
    for tag in self.op.tags:
8542 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
8543 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8544 06009e27 Iustin Pop
8545 0eed6e61 Guido Trotter
8546 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
8547 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
8548 06009e27 Iustin Pop

8549 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
8550 06009e27 Iustin Pop
  time.
8551 06009e27 Iustin Pop

8552 06009e27 Iustin Pop
  """
8553 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8554 fbe9022f Guido Trotter
  REQ_BGL = False
8555 06009e27 Iustin Pop
8556 fbe9022f Guido Trotter
  def ExpandNames(self):
8557 fbe9022f Guido Trotter
    """Expand names and set required locks.
8558 06009e27 Iustin Pop

8559 fbe9022f Guido Trotter
    This expands the node list, if any.
8560 06009e27 Iustin Pop

8561 06009e27 Iustin Pop
    """
8562 fbe9022f Guido Trotter
    self.needed_locks = {}
8563 06009e27 Iustin Pop
    if self.op.on_nodes:
8564 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
8565 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8566 fbe9022f Guido Trotter
      # more information.
8567 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8568 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8569 fbe9022f Guido Trotter
8570 fbe9022f Guido Trotter
  def CheckPrereq(self):
8571 fbe9022f Guido Trotter
    """Check prerequisites.
8572 fbe9022f Guido Trotter

8573 fbe9022f Guido Trotter
    """
8574 06009e27 Iustin Pop
8575 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
8576 06009e27 Iustin Pop
    """Do the actual sleep.
8577 06009e27 Iustin Pop

8578 06009e27 Iustin Pop
    """
8579 06009e27 Iustin Pop
    if self.op.on_master:
8580 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
8581 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
8582 06009e27 Iustin Pop
    if self.op.on_nodes:
8583 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8584 06009e27 Iustin Pop
      for node, node_result in result.items():
8585 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
8586 d61df03e Iustin Pop
8587 d61df03e Iustin Pop
8588 d1c2dd75 Iustin Pop
class IAllocator(object):
8589 d1c2dd75 Iustin Pop
  """IAllocator framework.
8590 d61df03e Iustin Pop

8591 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
8592 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
8593 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
8594 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
8595 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
8596 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
8597 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
8598 d1c2dd75 Iustin Pop
      easy usage
8599 d61df03e Iustin Pop

8600 d61df03e Iustin Pop
  """
8601 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
8602 7260cfbe Iustin Pop
  # lots of instance attributes
8603 29859cb7 Iustin Pop
  _ALLO_KEYS = [
8604 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
8605 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
8606 d1c2dd75 Iustin Pop
    ]
8607 29859cb7 Iustin Pop
  _RELO_KEYS = [
8608 8d3f86a0 Iustin Pop
    "name", "relocate_from",
8609 29859cb7 Iustin Pop
    ]
8610 7f60a422 Iustin Pop
  _EVAC_KEYS = [
8611 7f60a422 Iustin Pop
    "evac_nodes",
8612 7f60a422 Iustin Pop
    ]
8613 d1c2dd75 Iustin Pop
8614 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
8615 923ddac0 Michael Hanselmann
    self.cfg = cfg
8616 923ddac0 Michael Hanselmann
    self.rpc = rpc
8617 d1c2dd75 Iustin Pop
    # init buffer variables
8618 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
8619 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
8620 29859cb7 Iustin Pop
    self.mode = mode
8621 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
8622 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
8623 a0add446 Iustin Pop
    self.hypervisor = None
8624 29859cb7 Iustin Pop
    self.relocate_from = None
8625 8d3f86a0 Iustin Pop
    self.name = None
8626 7f60a422 Iustin Pop
    self.evac_nodes = None
8627 27579978 Iustin Pop
    # computed fields
8628 27579978 Iustin Pop
    self.required_nodes = None
8629 d1c2dd75 Iustin Pop
    # init result fields
8630 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
8631 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8632 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
8633 9757cc90 Iustin Pop
      fn = self._AddNewInstance
8634 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8635 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
8636 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
8637 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
8638 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
8639 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
8640 29859cb7 Iustin Pop
    else:
8641 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8642 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
8643 d1c2dd75 Iustin Pop
    for key in kwargs:
8644 29859cb7 Iustin Pop
      if key not in keyset:
8645 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8646 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8647 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
8648 7f60a422 Iustin Pop
8649 29859cb7 Iustin Pop
    for key in keyset:
8650 d1c2dd75 Iustin Pop
      if key not in kwargs:
8651 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8652 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8653 9757cc90 Iustin Pop
    self._BuildInputData(fn)
8654 d1c2dd75 Iustin Pop
8655 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
8656 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
8657 d1c2dd75 Iustin Pop

8658 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
8659 d1c2dd75 Iustin Pop

8660 d1c2dd75 Iustin Pop
    """
8661 923ddac0 Michael Hanselmann
    cfg = self.cfg
8662 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
8663 d1c2dd75 Iustin Pop
    # cluster data
8664 d1c2dd75 Iustin Pop
    data = {
8665 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
8666 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
8667 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
8668 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8669 d1c2dd75 Iustin Pop
      # we don't have job IDs
8670 d61df03e Iustin Pop
      }
8671 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
8672 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8673 6286519f Iustin Pop
8674 d1c2dd75 Iustin Pop
    # node data
8675 d1c2dd75 Iustin Pop
    node_results = {}
8676 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
8677 8cc7e742 Guido Trotter
8678 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8679 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
8680 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8681 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8682 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
8683 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
8684 8cc7e742 Guido Trotter
8685 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8686 923ddac0 Michael Hanselmann
                                        hypervisor_name)
8687 923ddac0 Michael Hanselmann
    node_iinfo = \
8688 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
8689 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
8690 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
8691 1325da74 Iustin Pop
      # first fill in static (config-based) values
8692 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
8693 d1c2dd75 Iustin Pop
      pnr = {
8694 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
8695 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
8696 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
8697 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
8698 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
8699 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
8700 d1c2dd75 Iustin Pop
        }
8701 1325da74 Iustin Pop
8702 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
8703 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
8704 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8705 4c4e4e1e Iustin Pop
                                nname)
8706 070e998b Iustin Pop
        remote_info = nresult.payload
8707 b142ef15 Iustin Pop
8708 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8709 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
8710 1325da74 Iustin Pop
          if attr not in remote_info:
8711 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
8712 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
8713 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
8714 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
8715 070e998b Iustin Pop
                                     " for '%s': %s" %
8716 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
8717 1325da74 Iustin Pop
        # compute memory used by primary instances
8718 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
8719 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
8720 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
8721 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
8722 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
8723 1325da74 Iustin Pop
              i_used_mem = 0
8724 1325da74 Iustin Pop
            else:
8725 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8726 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8727 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
8728 1325da74 Iustin Pop
8729 1325da74 Iustin Pop
            if iinfo.admin_up:
8730 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8731 1325da74 Iustin Pop
8732 1325da74 Iustin Pop
        # compute memory used by instances
8733 1325da74 Iustin Pop
        pnr_dyn = {
8734 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
8735 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
8736 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
8737 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
8738 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
8739 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
8740 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
8741 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
8742 1325da74 Iustin Pop
          }
8743 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
8744 1325da74 Iustin Pop
8745 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
8746 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
8747 d1c2dd75 Iustin Pop
8748 d1c2dd75 Iustin Pop
    # instance data
8749 d1c2dd75 Iustin Pop
    instance_data = {}
8750 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
8751 a9fe7e8f Guido Trotter
      nic_data = []
8752 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
8753 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
8754 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
8755 a9fe7e8f Guido Trotter
            nic.nicparams)
8756 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
8757 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
8758 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
8759 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
8760 a9fe7e8f Guido Trotter
                   }
8761 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8762 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8763 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
8764 d1c2dd75 Iustin Pop
      pir = {
8765 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
8766 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
8767 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
8768 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
8769 d1c2dd75 Iustin Pop
        "os": iinfo.os,
8770 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8771 d1c2dd75 Iustin Pop
        "nics": nic_data,
8772 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8773 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
8774 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
8775 d1c2dd75 Iustin Pop
        }
8776 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8777 88ae4f85 Iustin Pop
                                                 pir["disks"])
8778 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
8779 d61df03e Iustin Pop
8780 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
8781 d61df03e Iustin Pop
8782 d1c2dd75 Iustin Pop
    self.in_data = data
8783 d61df03e Iustin Pop
8784 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
8785 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
8786 d61df03e Iustin Pop

8787 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
8788 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8789 d61df03e Iustin Pop

8790 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8791 d1c2dd75 Iustin Pop
    done.
8792 d61df03e Iustin Pop

8793 d1c2dd75 Iustin Pop
    """
8794 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8795 d1c2dd75 Iustin Pop
8796 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
8797 27579978 Iustin Pop
      self.required_nodes = 2
8798 27579978 Iustin Pop
    else:
8799 27579978 Iustin Pop
      self.required_nodes = 1
8800 d1c2dd75 Iustin Pop
    request = {
8801 d1c2dd75 Iustin Pop
      "name": self.name,
8802 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
8803 d1c2dd75 Iustin Pop
      "tags": self.tags,
8804 d1c2dd75 Iustin Pop
      "os": self.os,
8805 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
8806 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
8807 d1c2dd75 Iustin Pop
      "disks": self.disks,
8808 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
8809 d1c2dd75 Iustin Pop
      "nics": self.nics,
8810 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8811 d1c2dd75 Iustin Pop
      }
8812 9757cc90 Iustin Pop
    return request
8813 298fe380 Iustin Pop
8814 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
8815 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
8816 298fe380 Iustin Pop

8817 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
8818 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8819 d61df03e Iustin Pop

8820 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8821 d1c2dd75 Iustin Pop
    done.
8822 d61df03e Iustin Pop

8823 d1c2dd75 Iustin Pop
    """
8824 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
8825 27579978 Iustin Pop
    if instance is None:
8826 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8827 27579978 Iustin Pop
                                   " IAllocator" % self.name)
8828 27579978 Iustin Pop
8829 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8830 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
8831 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8832 27579978 Iustin Pop
8833 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
8834 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
8835 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
8836 2a139bb0 Iustin Pop
8837 27579978 Iustin Pop
    self.required_nodes = 1
8838 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8839 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8840 27579978 Iustin Pop
8841 d1c2dd75 Iustin Pop
    request = {
8842 d1c2dd75 Iustin Pop
      "name": self.name,
8843 27579978 Iustin Pop
      "disk_space_total": disk_space,
8844 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8845 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
8846 d1c2dd75 Iustin Pop
      }
8847 9757cc90 Iustin Pop
    return request
8848 d61df03e Iustin Pop
8849 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
8850 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
8851 7f60a422 Iustin Pop

8852 7f60a422 Iustin Pop
    """
8853 7f60a422 Iustin Pop
    request = {
8854 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
8855 7f60a422 Iustin Pop
      }
8856 7f60a422 Iustin Pop
    return request
8857 7f60a422 Iustin Pop
8858 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
8859 d1c2dd75 Iustin Pop
    """Build input data structures.
8860 d61df03e Iustin Pop

8861 d1c2dd75 Iustin Pop
    """
8862 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
8863 d61df03e Iustin Pop
8864 9757cc90 Iustin Pop
    request = fn()
8865 9757cc90 Iustin Pop
    request["type"] = self.mode
8866 9757cc90 Iustin Pop
    self.in_data["request"] = request
8867 d61df03e Iustin Pop
8868 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
8869 d61df03e Iustin Pop
8870 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
8871 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
8872 298fe380 Iustin Pop

8873 d1c2dd75 Iustin Pop
    """
8874 72737a7f Iustin Pop
    if call_fn is None:
8875 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
8876 298fe380 Iustin Pop
8877 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8878 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
8879 8d528b7c Iustin Pop
8880 87f5c298 Iustin Pop
    self.out_text = result.payload
8881 d1c2dd75 Iustin Pop
    if validate:
8882 d1c2dd75 Iustin Pop
      self._ValidateResult()
8883 298fe380 Iustin Pop
8884 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
8885 d1c2dd75 Iustin Pop
    """Process the allocator results.
8886 538475ca Iustin Pop

8887 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
8888 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
8889 538475ca Iustin Pop

8890 d1c2dd75 Iustin Pop
    """
8891 d1c2dd75 Iustin Pop
    try:
8892 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
8893 d1c2dd75 Iustin Pop
    except Exception, err:
8894 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8895 d1c2dd75 Iustin Pop
8896 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
8897 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8898 538475ca Iustin Pop
8899 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
8900 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
8901 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
8902 680f0a89 Iustin Pop
      del rdict["nodes"]
8903 680f0a89 Iustin Pop
8904 680f0a89 Iustin Pop
    for key in "success", "info", "result":
8905 d1c2dd75 Iustin Pop
      if key not in rdict:
8906 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
8907 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
8908 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
8909 538475ca Iustin Pop
8910 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
8911 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
8912 d1c2dd75 Iustin Pop
                               " is not a list")
8913 d1c2dd75 Iustin Pop
    self.out_data = rdict
8914 538475ca Iustin Pop
8915 538475ca Iustin Pop
8916 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
8917 d61df03e Iustin Pop
  """Run allocator tests.
8918 d61df03e Iustin Pop

8919 d61df03e Iustin Pop
  This LU runs the allocator tests
8920 d61df03e Iustin Pop

8921 d61df03e Iustin Pop
  """
8922 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
8923 d61df03e Iustin Pop
8924 d61df03e Iustin Pop
  def CheckPrereq(self):
8925 d61df03e Iustin Pop
    """Check prerequisites.
8926 d61df03e Iustin Pop

8927 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
8928 d61df03e Iustin Pop

8929 d61df03e Iustin Pop
    """
8930 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8931 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
8932 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
8933 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
8934 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8935 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
8936 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
8937 d61df03e Iustin Pop
      if iname is not None:
8938 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8939 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
8940 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
8941 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
8942 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8943 d61df03e Iustin Pop
      for row in self.op.nics:
8944 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8945 d61df03e Iustin Pop
            "mac" not in row or
8946 d61df03e Iustin Pop
            "ip" not in row or
8947 d61df03e Iustin Pop
            "bridge" not in row):
8948 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
8949 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
8950 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
8951 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
8952 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8953 d61df03e Iustin Pop
      for row in self.op.disks:
8954 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8955 d61df03e Iustin Pop
            "size" not in row or
8956 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
8957 d61df03e Iustin Pop
            "mode" not in row or
8958 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
8959 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
8960 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
8961 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8962 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
8963 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8964 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
8965 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
8966 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8967 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
8968 d61df03e Iustin Pop
      self.op.name = fname
8969 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8970 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
8971 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
8972 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
8973 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
8974 d61df03e Iustin Pop
    else:
8975 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8976 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
8977 d61df03e Iustin Pop
8978 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8979 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8980 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
8981 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
8982 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8983 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8984 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
8985 d61df03e Iustin Pop
8986 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
8987 d61df03e Iustin Pop
    """Run the allocator test.
8988 d61df03e Iustin Pop

8989 d61df03e Iustin Pop
    """
8990 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8991 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8992 29859cb7 Iustin Pop
                       mode=self.op.mode,
8993 29859cb7 Iustin Pop
                       name=self.op.name,
8994 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
8995 29859cb7 Iustin Pop
                       disks=self.op.disks,
8996 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
8997 29859cb7 Iustin Pop
                       os=self.op.os,
8998 29859cb7 Iustin Pop
                       tags=self.op.tags,
8999 29859cb7 Iustin Pop
                       nics=self.op.nics,
9000 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9001 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9002 29859cb7 Iustin Pop
                       )
9003 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9004 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9005 29859cb7 Iustin Pop
                       mode=self.op.mode,
9006 29859cb7 Iustin Pop
                       name=self.op.name,
9007 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9008 29859cb7 Iustin Pop
                       )
9009 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9010 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9011 823a72bc Iustin Pop
                       mode=self.op.mode,
9012 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9013 823a72bc Iustin Pop
    else:
9014 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9015 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9016 d61df03e Iustin Pop
9017 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9018 d1c2dd75 Iustin Pop
      result = ial.in_text
9019 298fe380 Iustin Pop
    else:
9020 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9021 d1c2dd75 Iustin Pop
      result = ial.out_text
9022 298fe380 Iustin Pop
    return result