Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 5b349fd1

History | View | Annotate | Download (332.4 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 b98bf262 Michael Hanselmann
import OpenSSL
37 a8083063 Iustin Pop
38 a8083063 Iustin Pop
from ganeti import ssh
39 a8083063 Iustin Pop
from ganeti import utils
40 a8083063 Iustin Pop
from ganeti import errors
41 a8083063 Iustin Pop
from ganeti import hypervisor
42 6048c986 Guido Trotter
from ganeti import locking
43 a8083063 Iustin Pop
from ganeti import constants
44 a8083063 Iustin Pop
from ganeti import objects
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
55 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
64 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
65 20777413 Iustin Pop

66 a8083063 Iustin Pop
  """
67 a8083063 Iustin Pop
  HPATH = None
68 a8083063 Iustin Pop
  HTYPE = None
69 a8083063 Iustin Pop
  _OP_REQP = []
70 7e55040e Guido Trotter
  REQ_BGL = True
71 a8083063 Iustin Pop
72 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
73 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
74 a8083063 Iustin Pop

75 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
76 a8083063 Iustin Pop
    validity.
77 a8083063 Iustin Pop

78 a8083063 Iustin Pop
    """
79 5bfac263 Iustin Pop
    self.proc = processor
80 a8083063 Iustin Pop
    self.op = op
81 77b657a3 Guido Trotter
    self.cfg = context.cfg
82 77b657a3 Guido Trotter
    self.context = context
83 72737a7f Iustin Pop
    self.rpc = rpc
84 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
85 d465bdc8 Guido Trotter
    self.needed_locks = None
86 6683bba2 Guido Trotter
    self.acquired_locks = {}
87 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
88 ca2a79e1 Guido Trotter
    self.add_locks = {}
89 ca2a79e1 Guido Trotter
    self.remove_locks = {}
90 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
91 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
92 c92b310a Michael Hanselmann
    self.__ssh = None
93 86d9d3bb Iustin Pop
    # logging
94 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
95 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
96 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
97 20777413 Iustin Pop
    # support for dry-run
98 20777413 Iustin Pop
    self.dry_run_result = None
99 ee844e20 Iustin Pop
    # support for generic debug attribute
100 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
101 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
102 ee844e20 Iustin Pop
      self.op.debug_level = 0
103 c92b310a Michael Hanselmann
104 6fd35c4d Michael Hanselmann
    # Tasklets
105 3a012b41 Michael Hanselmann
    self.tasklets = None
106 6fd35c4d Michael Hanselmann
107 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
108 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
109 a8083063 Iustin Pop
      if attr_val is None:
110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
111 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
112 6fd35c4d Michael Hanselmann
113 4be4691d Iustin Pop
    self.CheckArguments()
114 a8083063 Iustin Pop
115 c92b310a Michael Hanselmann
  def __GetSSH(self):
116 c92b310a Michael Hanselmann
    """Returns the SshRunner object
117 c92b310a Michael Hanselmann

118 c92b310a Michael Hanselmann
    """
119 c92b310a Michael Hanselmann
    if not self.__ssh:
120 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
121 c92b310a Michael Hanselmann
    return self.__ssh
122 c92b310a Michael Hanselmann
123 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
124 c92b310a Michael Hanselmann
125 4be4691d Iustin Pop
  def CheckArguments(self):
126 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
127 4be4691d Iustin Pop

128 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
129 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
130 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
131 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
132 4be4691d Iustin Pop

133 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
134 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
135 4be4691d Iustin Pop
        waited for them)
136 4be4691d Iustin Pop

137 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
138 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
139 4be4691d Iustin Pop

140 4be4691d Iustin Pop
    """
141 4be4691d Iustin Pop
    pass
142 4be4691d Iustin Pop
143 d465bdc8 Guido Trotter
  def ExpandNames(self):
144 d465bdc8 Guido Trotter
    """Expand names for this LU.
145 d465bdc8 Guido Trotter

146 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
147 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
148 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
149 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
150 d465bdc8 Guido Trotter

151 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
152 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
153 d465bdc8 Guido Trotter
    as values. Rules:
154 e4376078 Iustin Pop

155 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
156 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
157 e4376078 Iustin Pop
      - don't put anything for the BGL level
158 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
159 d465bdc8 Guido Trotter

160 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
161 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
162 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
163 3977a4c1 Guido Trotter

164 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
165 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
166 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
167 6fd35c4d Michael Hanselmann

168 e4376078 Iustin Pop
    Examples::
169 e4376078 Iustin Pop

170 e4376078 Iustin Pop
      # Acquire all nodes and one instance
171 e4376078 Iustin Pop
      self.needed_locks = {
172 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
173 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
174 e4376078 Iustin Pop
      }
175 e4376078 Iustin Pop
      # Acquire just two nodes
176 e4376078 Iustin Pop
      self.needed_locks = {
177 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
178 e4376078 Iustin Pop
      }
179 e4376078 Iustin Pop
      # Acquire no locks
180 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
181 d465bdc8 Guido Trotter

182 d465bdc8 Guido Trotter
    """
183 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
184 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
185 d465bdc8 Guido Trotter
    # time.
186 d465bdc8 Guido Trotter
    if self.REQ_BGL:
187 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
188 d465bdc8 Guido Trotter
    else:
189 d465bdc8 Guido Trotter
      raise NotImplementedError
190 d465bdc8 Guido Trotter
191 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
192 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
193 fb8dcb62 Guido Trotter

194 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
195 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
196 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
197 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
198 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
199 fb8dcb62 Guido Trotter
    default it does nothing.
200 fb8dcb62 Guido Trotter

201 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
202 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
203 fb8dcb62 Guido Trotter

204 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
205 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
206 fb8dcb62 Guido Trotter

207 fb8dcb62 Guido Trotter
    """
208 fb8dcb62 Guido Trotter
209 a8083063 Iustin Pop
  def CheckPrereq(self):
210 a8083063 Iustin Pop
    """Check prerequisites for this LU.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
213 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
214 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
215 a8083063 Iustin Pop
    allowed.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
218 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
221 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
225 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
226 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
227 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
228 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
229 6fd35c4d Michael Hanselmann
    else:
230 6fd35c4d Michael Hanselmann
      raise NotImplementedError
231 a8083063 Iustin Pop
232 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
233 a8083063 Iustin Pop
    """Execute the LU.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
236 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
237 a8083063 Iustin Pop
    code, or expected.
238 a8083063 Iustin Pop

239 a8083063 Iustin Pop
    """
240 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
241 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
242 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
243 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
244 6fd35c4d Michael Hanselmann
    else:
245 6fd35c4d Michael Hanselmann
      raise NotImplementedError
246 a8083063 Iustin Pop
247 a8083063 Iustin Pop
  def BuildHooksEnv(self):
248 a8083063 Iustin Pop
    """Build hooks environment for this LU.
249 a8083063 Iustin Pop

250 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
251 a8083063 Iustin Pop
    containing the environment that will be used for running the
252 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
253 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
254 a8083063 Iustin Pop
    the hook should run after the execution.
255 a8083063 Iustin Pop

256 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
257 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
258 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
259 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
260 a8083063 Iustin Pop

261 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
262 a8083063 Iustin Pop

263 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
264 a8083063 Iustin Pop
    not be called.
265 a8083063 Iustin Pop

266 a8083063 Iustin Pop
    """
267 a8083063 Iustin Pop
    raise NotImplementedError
268 a8083063 Iustin Pop
269 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
270 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
271 1fce5219 Guido Trotter

272 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
273 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
274 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
275 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
276 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
277 1fce5219 Guido Trotter

278 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
279 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
280 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
281 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
282 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
283 e4376078 Iustin Pop
        in the PRE phase
284 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
285 e4376078 Iustin Pop
        and hook results
286 1fce5219 Guido Trotter

287 1fce5219 Guido Trotter
    """
288 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
289 2d54e29c Iustin Pop
    # be a function warnings
290 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
291 1fce5219 Guido Trotter
    return lu_result
292 1fce5219 Guido Trotter
293 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
294 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
295 43905206 Guido Trotter

296 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
297 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
298 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
299 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
300 43905206 Guido Trotter
    before.
301 43905206 Guido Trotter

302 43905206 Guido Trotter
    """
303 43905206 Guido Trotter
    if self.needed_locks is None:
304 43905206 Guido Trotter
      self.needed_locks = {}
305 43905206 Guido Trotter
    else:
306 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
307 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
308 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
309 cf26a87a Iustin Pop
                                                self.op.instance_name)
310 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
311 43905206 Guido Trotter
312 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
313 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
316 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
317 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
318 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
319 c4a2fee1 Guido Trotter

320 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
321 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
322 c4a2fee1 Guido Trotter

323 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
324 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
325 c4a2fee1 Guido Trotter

326 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
327 c4a2fee1 Guido Trotter

328 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
329 e4376078 Iustin Pop
        self._LockInstancesNodes()
330 c4a2fee1 Guido Trotter

331 a82ce292 Guido Trotter
    @type primary_only: boolean
332 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
333 a82ce292 Guido Trotter

334 c4a2fee1 Guido Trotter
    """
335 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
336 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
337 c4a2fee1 Guido Trotter
338 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
339 c4a2fee1 Guido Trotter
340 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
341 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
342 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
343 c4a2fee1 Guido Trotter
    wanted_nodes = []
344 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
345 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
346 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
347 a82ce292 Guido Trotter
      if not primary_only:
348 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
349 9513b6ab Guido Trotter
350 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
351 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
352 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
353 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
354 c4a2fee1 Guido Trotter
355 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
356 c4a2fee1 Guido Trotter
357 a8083063 Iustin Pop
358 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
359 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
360 a8083063 Iustin Pop

361 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
362 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
363 a8083063 Iustin Pop

364 a8083063 Iustin Pop
  """
365 a8083063 Iustin Pop
  HPATH = None
366 a8083063 Iustin Pop
  HTYPE = None
367 a8083063 Iustin Pop
368 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
369 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
370 fc8a6b8f Iustin Pop

371 fc8a6b8f Iustin Pop
    This just raises an error.
372 fc8a6b8f Iustin Pop

373 fc8a6b8f Iustin Pop
    """
374 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
375 fc8a6b8f Iustin Pop
376 a8083063 Iustin Pop
377 9a6800e1 Michael Hanselmann
class Tasklet:
378 9a6800e1 Michael Hanselmann
  """Tasklet base class.
379 9a6800e1 Michael Hanselmann

380 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
381 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
382 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
383 9a6800e1 Michael Hanselmann

384 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
385 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
386 9a6800e1 Michael Hanselmann
    - Implement Exec
387 9a6800e1 Michael Hanselmann

388 9a6800e1 Michael Hanselmann
  """
389 464243a7 Michael Hanselmann
  def __init__(self, lu):
390 464243a7 Michael Hanselmann
    self.lu = lu
391 464243a7 Michael Hanselmann
392 464243a7 Michael Hanselmann
    # Shortcuts
393 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
394 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
395 464243a7 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
397 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
400 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
401 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
404 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
405 9a6800e1 Michael Hanselmann

406 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
407 9a6800e1 Michael Hanselmann
    hasn't been done before.
408 9a6800e1 Michael Hanselmann

409 9a6800e1 Michael Hanselmann
    """
410 9a6800e1 Michael Hanselmann
    raise NotImplementedError
411 9a6800e1 Michael Hanselmann
412 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
413 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
414 9a6800e1 Michael Hanselmann

415 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
416 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
417 9a6800e1 Michael Hanselmann
    expected.
418 9a6800e1 Michael Hanselmann

419 9a6800e1 Michael Hanselmann
    """
420 9a6800e1 Michael Hanselmann
    raise NotImplementedError
421 9a6800e1 Michael Hanselmann
422 9a6800e1 Michael Hanselmann
423 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
424 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
425 83120a01 Michael Hanselmann

426 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
427 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
428 e4376078 Iustin Pop
  @type nodes: list
429 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
430 e4376078 Iustin Pop
  @rtype: list
431 e4376078 Iustin Pop
  @return: the list of nodes, sorted
432 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
433 83120a01 Michael Hanselmann

434 83120a01 Michael Hanselmann
  """
435 3312b702 Iustin Pop
  if not isinstance(nodes, list):
436 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
437 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
438 dcb93971 Michael Hanselmann
439 ea47808a Guido Trotter
  if not nodes:
440 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
441 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
442 dcb93971 Michael Hanselmann
443 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
444 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
445 3312b702 Iustin Pop
446 3312b702 Iustin Pop
447 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
448 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
449 3312b702 Iustin Pop

450 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
451 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
452 e4376078 Iustin Pop
  @type instances: list
453 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
454 e4376078 Iustin Pop
  @rtype: list
455 e4376078 Iustin Pop
  @return: the list of instances, sorted
456 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
457 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
458 3312b702 Iustin Pop

459 3312b702 Iustin Pop
  """
460 3312b702 Iustin Pop
  if not isinstance(instances, list):
461 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
462 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
463 3312b702 Iustin Pop
464 3312b702 Iustin Pop
  if instances:
465 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
466 3312b702 Iustin Pop
  else:
467 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
468 a7f5dc98 Iustin Pop
  return wanted
469 dcb93971 Michael Hanselmann
470 dcb93971 Michael Hanselmann
471 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
472 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
473 83120a01 Michael Hanselmann

474 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
475 31bf511f Iustin Pop
  @param static: static fields set
476 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
477 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
478 83120a01 Michael Hanselmann

479 83120a01 Michael Hanselmann
  """
480 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
481 31bf511f Iustin Pop
  f.Extend(static)
482 31bf511f Iustin Pop
  f.Extend(dynamic)
483 dcb93971 Michael Hanselmann
484 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
485 31bf511f Iustin Pop
  if delta:
486 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
487 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
488 dcb93971 Michael Hanselmann
489 dcb93971 Michael Hanselmann
490 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
491 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
492 a5961235 Iustin Pop

493 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
494 a5961235 Iustin Pop
  or None (but that it always exists).
495 a5961235 Iustin Pop

496 a5961235 Iustin Pop
  """
497 a5961235 Iustin Pop
  val = getattr(op, name, None)
498 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
499 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
500 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
501 a5961235 Iustin Pop
  setattr(op, name, val)
502 a5961235 Iustin Pop
503 a5961235 Iustin Pop
504 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
505 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
506 7736a5f2 Iustin Pop

507 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
508 7736a5f2 Iustin Pop
  global params.
509 7736a5f2 Iustin Pop

510 7736a5f2 Iustin Pop
  """
511 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
512 7736a5f2 Iustin Pop
  if used_globals:
513 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
514 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
515 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
516 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
517 7736a5f2 Iustin Pop
518 7736a5f2 Iustin Pop
519 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
520 a5961235 Iustin Pop
  """Ensure that a given node is online.
521 a5961235 Iustin Pop

522 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
523 a5961235 Iustin Pop
  @param node: the node to check
524 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
525 a5961235 Iustin Pop

526 a5961235 Iustin Pop
  """
527 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
528 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
529 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
530 a5961235 Iustin Pop
531 a5961235 Iustin Pop
532 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
533 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
534 733a2b6a Iustin Pop

535 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
536 733a2b6a Iustin Pop
  @param node: the node to check
537 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
538 733a2b6a Iustin Pop

539 733a2b6a Iustin Pop
  """
540 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
541 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
542 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
543 733a2b6a Iustin Pop
544 733a2b6a Iustin Pop
545 231cd901 Iustin Pop
def _CheckNodeHasOS(lu, node, os_name, force_variant):
546 231cd901 Iustin Pop
  """Ensure that a node supports a given OS.
547 231cd901 Iustin Pop

548 231cd901 Iustin Pop
  @param lu: the LU on behalf of which we make the check
549 231cd901 Iustin Pop
  @param node: the node to check
550 231cd901 Iustin Pop
  @param os_name: the OS to query about
551 231cd901 Iustin Pop
  @param force_variant: whether to ignore variant errors
552 231cd901 Iustin Pop
  @raise errors.OpPrereqError: if the node is not supporting the OS
553 231cd901 Iustin Pop

554 231cd901 Iustin Pop
  """
555 231cd901 Iustin Pop
  result = lu.rpc.call_os_get(node, os_name)
556 231cd901 Iustin Pop
  result.Raise("OS '%s' not in supported OS list for node %s" %
557 231cd901 Iustin Pop
               (os_name, node),
558 231cd901 Iustin Pop
               prereq=True, ecode=errors.ECODE_INVAL)
559 231cd901 Iustin Pop
  if not force_variant:
560 231cd901 Iustin Pop
    _CheckOSVariant(result.payload, os_name)
561 231cd901 Iustin Pop
562 231cd901 Iustin Pop
563 5d55819e Iustin Pop
def _CheckDiskTemplate(template):
564 5d55819e Iustin Pop
  """Ensure a given disk template is valid.
565 5d55819e Iustin Pop

566 5d55819e Iustin Pop
  """
567 5d55819e Iustin Pop
  if template not in constants.DISK_TEMPLATES:
568 5d55819e Iustin Pop
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
569 5d55819e Iustin Pop
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
570 5d55819e Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
571 5d55819e Iustin Pop
572 5d55819e Iustin Pop
573 31624382 Iustin Pop
def _CheckInstanceDown(lu, instance, reason):
574 31624382 Iustin Pop
  """Ensure that an instance is not running."""
575 31624382 Iustin Pop
  if instance.admin_up:
576 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
577 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
578 31624382 Iustin Pop
579 31624382 Iustin Pop
  pnode = instance.primary_node
580 31624382 Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
581 31624382 Iustin Pop
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
582 31624382 Iustin Pop
              prereq=True, ecode=errors.ECODE_ENVIRON)
583 31624382 Iustin Pop
584 31624382 Iustin Pop
  if instance.name in ins_l.payload:
585 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is running, %s" %
586 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
587 31624382 Iustin Pop
588 31624382 Iustin Pop
589 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
590 cf26a87a Iustin Pop
  """Expand an item name.
591 cf26a87a Iustin Pop

592 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
593 cf26a87a Iustin Pop
  @param name: requested item name
594 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
595 cf26a87a Iustin Pop
  @return: the resolved (full) name
596 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
597 cf26a87a Iustin Pop

598 cf26a87a Iustin Pop
  """
599 cf26a87a Iustin Pop
  full_name = fn(name)
600 cf26a87a Iustin Pop
  if full_name is None:
601 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
602 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
603 cf26a87a Iustin Pop
  return full_name
604 cf26a87a Iustin Pop
605 cf26a87a Iustin Pop
606 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
607 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
608 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
609 cf26a87a Iustin Pop
610 cf26a87a Iustin Pop
611 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
612 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
613 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
614 cf26a87a Iustin Pop
615 cf26a87a Iustin Pop
616 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
617 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
618 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
619 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
620 e4376078 Iustin Pop

621 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
622 e4376078 Iustin Pop

623 e4376078 Iustin Pop
  @type name: string
624 e4376078 Iustin Pop
  @param name: the name of the instance
625 e4376078 Iustin Pop
  @type primary_node: string
626 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
627 e4376078 Iustin Pop
  @type secondary_nodes: list
628 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
629 e4376078 Iustin Pop
  @type os_type: string
630 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
631 0d68c45d Iustin Pop
  @type status: boolean
632 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
633 e4376078 Iustin Pop
  @type memory: string
634 e4376078 Iustin Pop
  @param memory: the memory size of the instance
635 e4376078 Iustin Pop
  @type vcpus: string
636 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
637 e4376078 Iustin Pop
  @type nics: list
638 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
639 5e3d3eb3 Guido Trotter
      the NICs the instance has
640 2c2690c9 Iustin Pop
  @type disk_template: string
641 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
642 2c2690c9 Iustin Pop
  @type disks: list
643 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
644 67fc3042 Iustin Pop
  @type bep: dict
645 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
646 67fc3042 Iustin Pop
  @type hvp: dict
647 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
648 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
649 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
650 e4376078 Iustin Pop
  @rtype: dict
651 e4376078 Iustin Pop
  @return: the hook environment for this instance
652 ecb215b5 Michael Hanselmann

653 396e1b78 Michael Hanselmann
  """
654 0d68c45d Iustin Pop
  if status:
655 0d68c45d Iustin Pop
    str_status = "up"
656 0d68c45d Iustin Pop
  else:
657 0d68c45d Iustin Pop
    str_status = "down"
658 396e1b78 Michael Hanselmann
  env = {
659 0e137c28 Iustin Pop
    "OP_TARGET": name,
660 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
661 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
662 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
663 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
664 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
665 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
666 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
667 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
668 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
669 396e1b78 Michael Hanselmann
  }
670 396e1b78 Michael Hanselmann
671 396e1b78 Michael Hanselmann
  if nics:
672 396e1b78 Michael Hanselmann
    nic_count = len(nics)
673 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
674 396e1b78 Michael Hanselmann
      if ip is None:
675 396e1b78 Michael Hanselmann
        ip = ""
676 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
677 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
678 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
679 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
680 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
681 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
682 396e1b78 Michael Hanselmann
  else:
683 396e1b78 Michael Hanselmann
    nic_count = 0
684 396e1b78 Michael Hanselmann
685 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
686 396e1b78 Michael Hanselmann
687 2c2690c9 Iustin Pop
  if disks:
688 2c2690c9 Iustin Pop
    disk_count = len(disks)
689 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
690 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
691 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
692 2c2690c9 Iustin Pop
  else:
693 2c2690c9 Iustin Pop
    disk_count = 0
694 2c2690c9 Iustin Pop
695 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
696 2c2690c9 Iustin Pop
697 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
698 67fc3042 Iustin Pop
    for key, value in source.items():
699 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
700 67fc3042 Iustin Pop
701 396e1b78 Michael Hanselmann
  return env
702 396e1b78 Michael Hanselmann
703 96acbc09 Michael Hanselmann
704 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
705 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
706 62f0dd02 Guido Trotter

707 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
708 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
709 62f0dd02 Guido Trotter

710 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
711 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
712 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
713 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
714 62f0dd02 Guido Trotter

715 62f0dd02 Guido Trotter
  """
716 62f0dd02 Guido Trotter
  hooks_nics = []
717 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
718 62f0dd02 Guido Trotter
  for nic in nics:
719 62f0dd02 Guido Trotter
    ip = nic.ip
720 62f0dd02 Guido Trotter
    mac = nic.mac
721 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
722 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
723 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
724 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
725 62f0dd02 Guido Trotter
  return hooks_nics
726 396e1b78 Michael Hanselmann
727 96acbc09 Michael Hanselmann
728 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
729 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
730 ecb215b5 Michael Hanselmann

731 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
732 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
733 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
734 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
735 e4376078 Iustin Pop
      environment
736 e4376078 Iustin Pop
  @type override: dict
737 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
738 e4376078 Iustin Pop
      our values
739 e4376078 Iustin Pop
  @rtype: dict
740 e4376078 Iustin Pop
  @return: the hook environment dictionary
741 e4376078 Iustin Pop

742 ecb215b5 Michael Hanselmann
  """
743 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
744 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
745 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
746 396e1b78 Michael Hanselmann
  args = {
747 396e1b78 Michael Hanselmann
    'name': instance.name,
748 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
749 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
750 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
751 0d68c45d Iustin Pop
    'status': instance.admin_up,
752 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
753 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
754 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
755 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
756 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
757 67fc3042 Iustin Pop
    'bep': bep,
758 67fc3042 Iustin Pop
    'hvp': hvp,
759 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
760 396e1b78 Michael Hanselmann
  }
761 396e1b78 Michael Hanselmann
  if override:
762 396e1b78 Michael Hanselmann
    args.update(override)
763 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
764 396e1b78 Michael Hanselmann
765 396e1b78 Michael Hanselmann
766 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
767 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
768 ec0292f1 Iustin Pop

769 ec0292f1 Iustin Pop
  """
770 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
771 ec0292f1 Iustin Pop
  if mod_list:
772 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
773 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
774 ec0292f1 Iustin Pop
    for name in mod_list:
775 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
776 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
777 ec0292f1 Iustin Pop
  if mc_now > mc_max:
778 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
779 ec0292f1 Iustin Pop
               (mc_now, mc_max))
780 ec0292f1 Iustin Pop
781 ec0292f1 Iustin Pop
782 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
783 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
784 6d7e1f20 Guido Trotter

785 6d7e1f20 Guido Trotter
  """
786 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
787 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
788 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
789 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
790 6d7e1f20 Guido Trotter
  return mc_now < mc_should
791 6d7e1f20 Guido Trotter
792 6d7e1f20 Guido Trotter
793 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
794 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
795 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
796 b165e77e Guido Trotter

797 b165e77e Guido Trotter
  """
798 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
799 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
800 b165e77e Guido Trotter
                for nic in target_nics]
801 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
802 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
803 b165e77e Guido Trotter
  if brlist:
804 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
805 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
806 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
807 b165e77e Guido Trotter
808 b165e77e Guido Trotter
809 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
810 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
811 bf6929a2 Alexander Schreiber

812 bf6929a2 Alexander Schreiber
  """
813 b165e77e Guido Trotter
  if node is None:
814 29921401 Iustin Pop
    node = instance.primary_node
815 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
816 bf6929a2 Alexander Schreiber
817 bf6929a2 Alexander Schreiber
818 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
819 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
820 f2c05717 Guido Trotter

821 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
822 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
823 f2c05717 Guido Trotter
  @type name: string
824 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
825 f2c05717 Guido Trotter

826 f2c05717 Guido Trotter
  """
827 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
828 f2c05717 Guido Trotter
    return
829 f2c05717 Guido Trotter
  try:
830 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
831 f2c05717 Guido Trotter
  except IndexError:
832 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
833 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
834 f2c05717 Guido Trotter
835 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
836 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
837 f2c05717 Guido Trotter
838 f2c05717 Guido Trotter
839 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
840 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
841 5ba9701d Michael Hanselmann
842 5ba9701d Michael Hanselmann
843 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
844 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
845 e9721add Michael Hanselmann

846 e9721add Michael Hanselmann
  """
847 e9721add Michael Hanselmann
848 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
849 e9721add Michael Hanselmann
850 e9721add Michael Hanselmann
851 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
852 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
853 80cb875c Michael Hanselmann

854 80cb875c Michael Hanselmann
  """
855 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
856 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
857 80cb875c Michael Hanselmann
858 80cb875c Michael Hanselmann
859 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
860 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
861 692738fc Michael Hanselmann

862 692738fc Michael Hanselmann
  """
863 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
864 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
865 692738fc Michael Hanselmann
866 692738fc Michael Hanselmann
867 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
868 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
869 efb8da02 Michael Hanselmann

870 efb8da02 Michael Hanselmann
  """
871 efb8da02 Michael Hanselmann
  # Special case for file storage
872 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
873 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
874 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
875 efb8da02 Michael Hanselmann
876 efb8da02 Michael Hanselmann
  return []
877 efb8da02 Michael Hanselmann
878 efb8da02 Michael Hanselmann
879 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
880 2d9005d8 Michael Hanselmann
  faulty = []
881 2d9005d8 Michael Hanselmann
882 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
883 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
884 2d9005d8 Michael Hanselmann
885 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
886 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
887 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
888 2d9005d8 Michael Hanselmann
889 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
890 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
891 2d9005d8 Michael Hanselmann
      faulty.append(idx)
892 2d9005d8 Michael Hanselmann
893 2d9005d8 Michael Hanselmann
  return faulty
894 2d9005d8 Michael Hanselmann
895 2d9005d8 Michael Hanselmann
896 b98bf262 Michael Hanselmann
def _FormatTimestamp(secs):
897 b98bf262 Michael Hanselmann
  """Formats a Unix timestamp with the local timezone.
898 b98bf262 Michael Hanselmann

899 b98bf262 Michael Hanselmann
  """
900 b98bf262 Michael Hanselmann
  return time.strftime("%F %T %Z", time.gmtime(secs))
901 b98bf262 Michael Hanselmann
902 b98bf262 Michael Hanselmann
903 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
904 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
905 b5f5fae9 Luca Bigliardi

906 b5f5fae9 Luca Bigliardi
  """
907 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
908 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
909 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
910 b5f5fae9 Luca Bigliardi
911 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
912 b5f5fae9 Luca Bigliardi
    """Build hooks env.
913 b5f5fae9 Luca Bigliardi

914 b5f5fae9 Luca Bigliardi
    """
915 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
916 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
917 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
918 b5f5fae9 Luca Bigliardi
919 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
920 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
921 b5f5fae9 Luca Bigliardi

922 b5f5fae9 Luca Bigliardi
    """
923 b5f5fae9 Luca Bigliardi
    return True
924 b5f5fae9 Luca Bigliardi
925 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
926 b5f5fae9 Luca Bigliardi
    """Nothing to do.
927 b5f5fae9 Luca Bigliardi

928 b5f5fae9 Luca Bigliardi
    """
929 b5f5fae9 Luca Bigliardi
    return True
930 b5f5fae9 Luca Bigliardi
931 b5f5fae9 Luca Bigliardi
932 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
933 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
934 a8083063 Iustin Pop

935 a8083063 Iustin Pop
  """
936 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
937 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
938 a8083063 Iustin Pop
  _OP_REQP = []
939 a8083063 Iustin Pop
940 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
941 b2c750a4 Luca Bigliardi
    """Build hooks env.
942 b2c750a4 Luca Bigliardi

943 b2c750a4 Luca Bigliardi
    """
944 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
945 b2c750a4 Luca Bigliardi
    return env, [], []
946 b2c750a4 Luca Bigliardi
947 a8083063 Iustin Pop
  def CheckPrereq(self):
948 a8083063 Iustin Pop
    """Check prerequisites.
949 a8083063 Iustin Pop

950 a8083063 Iustin Pop
    This checks whether the cluster is empty.
951 a8083063 Iustin Pop

952 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
953 a8083063 Iustin Pop

954 a8083063 Iustin Pop
    """
955 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
956 a8083063 Iustin Pop
957 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
958 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
959 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
960 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
961 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
962 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
963 db915bd1 Michael Hanselmann
    if instancelist:
964 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
965 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
966 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
969 a8083063 Iustin Pop
    """Destroys the cluster.
970 a8083063 Iustin Pop

971 a8083063 Iustin Pop
    """
972 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
973 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
974 3141ad3b Luca Bigliardi
975 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
976 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
977 3141ad3b Luca Bigliardi
    try:
978 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
979 3141ad3b Luca Bigliardi
    except:
980 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
981 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
982 3141ad3b Luca Bigliardi
983 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
984 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
985 b989b9d9 Ken Wehr
986 b989b9d9 Ken Wehr
    if modify_ssh_setup:
987 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
988 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
989 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
990 b989b9d9 Ken Wehr
991 140aa4a8 Iustin Pop
    return master
992 a8083063 Iustin Pop
993 a8083063 Iustin Pop
994 b98bf262 Michael Hanselmann
def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
995 b98bf262 Michael Hanselmann
                            warn_days=constants.SSL_CERT_EXPIRATION_WARN,
996 b98bf262 Michael Hanselmann
                            error_days=constants.SSL_CERT_EXPIRATION_ERROR):
997 b98bf262 Michael Hanselmann
  """Verifies certificate details for LUVerifyCluster.
998 b98bf262 Michael Hanselmann

999 b98bf262 Michael Hanselmann
  """
1000 b98bf262 Michael Hanselmann
  if expired:
1001 b98bf262 Michael Hanselmann
    msg = "Certificate %s is expired" % filename
1002 b98bf262 Michael Hanselmann
1003 b98bf262 Michael Hanselmann
    if not_before is not None and not_after is not None:
1004 b98bf262 Michael Hanselmann
      msg += (" (valid from %s to %s)" %
1005 b98bf262 Michael Hanselmann
              (_FormatTimestamp(not_before),
1006 b98bf262 Michael Hanselmann
               _FormatTimestamp(not_after)))
1007 b98bf262 Michael Hanselmann
    elif not_before is not None:
1008 b98bf262 Michael Hanselmann
      msg += " (valid from %s)" % _FormatTimestamp(not_before)
1009 b98bf262 Michael Hanselmann
    elif not_after is not None:
1010 b98bf262 Michael Hanselmann
      msg += " (valid until %s)" % _FormatTimestamp(not_after)
1011 b98bf262 Michael Hanselmann
1012 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR, msg)
1013 b98bf262 Michael Hanselmann
1014 b98bf262 Michael Hanselmann
  elif not_before is not None and not_before > now:
1015 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_WARNING,
1016 b98bf262 Michael Hanselmann
            "Certificate %s not yet valid (valid from %s)" %
1017 b98bf262 Michael Hanselmann
            (filename, _FormatTimestamp(not_before)))
1018 b98bf262 Michael Hanselmann
1019 b98bf262 Michael Hanselmann
  elif not_after is not None:
1020 b98bf262 Michael Hanselmann
    remaining_days = int((not_after - now) / (24 * 3600))
1021 b98bf262 Michael Hanselmann
1022 b98bf262 Michael Hanselmann
    msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
1023 b98bf262 Michael Hanselmann
1024 b98bf262 Michael Hanselmann
    if remaining_days <= error_days:
1025 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_ERROR, msg)
1026 b98bf262 Michael Hanselmann
1027 b98bf262 Michael Hanselmann
    if remaining_days <= warn_days:
1028 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_WARNING, msg)
1029 b98bf262 Michael Hanselmann
1030 b98bf262 Michael Hanselmann
  return (None, None)
1031 b98bf262 Michael Hanselmann
1032 b98bf262 Michael Hanselmann
1033 b98bf262 Michael Hanselmann
def _VerifyCertificate(filename):
1034 b98bf262 Michael Hanselmann
  """Verifies a certificate for LUVerifyCluster.
1035 b98bf262 Michael Hanselmann

1036 b98bf262 Michael Hanselmann
  @type filename: string
1037 b98bf262 Michael Hanselmann
  @param filename: Path to PEM file
1038 b98bf262 Michael Hanselmann

1039 b98bf262 Michael Hanselmann
  """
1040 b98bf262 Michael Hanselmann
  try:
1041 b98bf262 Michael Hanselmann
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1042 b98bf262 Michael Hanselmann
                                           utils.ReadFile(filename))
1043 b98bf262 Michael Hanselmann
  except Exception, err: # pylint: disable-msg=W0703
1044 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR,
1045 b98bf262 Michael Hanselmann
            "Failed to load X509 certificate %s: %s" % (filename, err))
1046 b98bf262 Michael Hanselmann
1047 b98bf262 Michael Hanselmann
  # Depending on the pyOpenSSL version, this can just return (None, None)
1048 b98bf262 Michael Hanselmann
  (not_before, not_after) = utils.GetX509CertValidity(cert)
1049 b98bf262 Michael Hanselmann
1050 b98bf262 Michael Hanselmann
  return _VerifyCertificateInner(filename, cert.has_expired(),
1051 b98bf262 Michael Hanselmann
                                 not_before, not_after, time.time())
1052 b98bf262 Michael Hanselmann
1053 b98bf262 Michael Hanselmann
1054 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
1055 a8083063 Iustin Pop
  """Verifies the cluster status.
1056 a8083063 Iustin Pop

1057 a8083063 Iustin Pop
  """
1058 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
1059 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
1060 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1061 d4b9d97f Guido Trotter
  REQ_BGL = False
1062 d4b9d97f Guido Trotter
1063 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
1064 7c874ee1 Iustin Pop
  TNODE = "node"
1065 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
1066 7c874ee1 Iustin Pop
1067 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1068 b98bf262 Michael Hanselmann
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1069 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1070 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1071 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1072 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1073 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1074 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1075 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
1076 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1077 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1078 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
1079 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
1080 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
1081 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
1082 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1083 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1084 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
1085 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
1086 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
1087 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
1088 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
1089 7c874ee1 Iustin Pop
1090 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
1091 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
1092 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
1093 a0c9776a Iustin Pop
1094 02c521e4 Iustin Pop
  class NodeImage(object):
1095 02c521e4 Iustin Pop
    """A class representing the logical and physical status of a node.
1096 02c521e4 Iustin Pop

1097 02c521e4 Iustin Pop
    @ivar volumes: a structure as returned from
1098 3a488770 Iustin Pop
        L{ganeti.backend.GetVolumeList} (runtime)
1099 02c521e4 Iustin Pop
    @ivar instances: a list of running instances (runtime)
1100 02c521e4 Iustin Pop
    @ivar pinst: list of configured primary instances (config)
1101 02c521e4 Iustin Pop
    @ivar sinst: list of configured secondary instances (config)
1102 02c521e4 Iustin Pop
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1103 02c521e4 Iustin Pop
        of this node (config)
1104 02c521e4 Iustin Pop
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1105 02c521e4 Iustin Pop
    @ivar dfree: free disk, as reported by the node (runtime)
1106 02c521e4 Iustin Pop
    @ivar offline: the offline status (config)
1107 02c521e4 Iustin Pop
    @type rpc_fail: boolean
1108 02c521e4 Iustin Pop
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1109 02c521e4 Iustin Pop
        not whether the individual keys were correct) (runtime)
1110 02c521e4 Iustin Pop
    @type lvm_fail: boolean
1111 02c521e4 Iustin Pop
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1112 02c521e4 Iustin Pop
    @type hyp_fail: boolean
1113 02c521e4 Iustin Pop
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1114 02c521e4 Iustin Pop
    @type ghost: boolean
1115 02c521e4 Iustin Pop
    @ivar ghost: whether this is a known node or not (config)
1116 02c521e4 Iustin Pop

1117 02c521e4 Iustin Pop
    """
1118 02c521e4 Iustin Pop
    def __init__(self, offline=False):
1119 02c521e4 Iustin Pop
      self.volumes = {}
1120 02c521e4 Iustin Pop
      self.instances = []
1121 02c521e4 Iustin Pop
      self.pinst = []
1122 02c521e4 Iustin Pop
      self.sinst = []
1123 02c521e4 Iustin Pop
      self.sbp = {}
1124 02c521e4 Iustin Pop
      self.mfree = 0
1125 02c521e4 Iustin Pop
      self.dfree = 0
1126 02c521e4 Iustin Pop
      self.offline = offline
1127 02c521e4 Iustin Pop
      self.rpc_fail = False
1128 02c521e4 Iustin Pop
      self.lvm_fail = False
1129 02c521e4 Iustin Pop
      self.hyp_fail = False
1130 02c521e4 Iustin Pop
      self.ghost = False
1131 02c521e4 Iustin Pop
1132 d4b9d97f Guido Trotter
  def ExpandNames(self):
1133 d4b9d97f Guido Trotter
    self.needed_locks = {
1134 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1135 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1136 d4b9d97f Guido Trotter
    }
1137 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1138 a8083063 Iustin Pop
1139 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
1140 7c874ee1 Iustin Pop
    """Format an error message.
1141 7c874ee1 Iustin Pop

1142 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
1143 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
1144 7c874ee1 Iustin Pop

1145 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
1146 7c874ee1 Iustin Pop

1147 7c874ee1 Iustin Pop
    """
1148 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1149 7c874ee1 Iustin Pop
    itype, etxt = ecode
1150 7c874ee1 Iustin Pop
    # first complete the msg
1151 7c874ee1 Iustin Pop
    if args:
1152 7c874ee1 Iustin Pop
      msg = msg % args
1153 7c874ee1 Iustin Pop
    # then format the whole message
1154 7c874ee1 Iustin Pop
    if self.op.error_codes:
1155 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1156 7c874ee1 Iustin Pop
    else:
1157 7c874ee1 Iustin Pop
      if item:
1158 7c874ee1 Iustin Pop
        item = " " + item
1159 7c874ee1 Iustin Pop
      else:
1160 7c874ee1 Iustin Pop
        item = ""
1161 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1162 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1163 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1164 7c874ee1 Iustin Pop
1165 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1166 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1167 a0c9776a Iustin Pop

1168 a0c9776a Iustin Pop
    """
1169 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1170 a0c9776a Iustin Pop
    if cond:
1171 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1172 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1173 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1174 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1175 a0c9776a Iustin Pop
1176 02c521e4 Iustin Pop
  def _VerifyNode(self, ninfo, nresult):
1177 a8083063 Iustin Pop
    """Run multiple tests against a node.
1178 a8083063 Iustin Pop

1179 112f18a5 Iustin Pop
    Test list:
1180 e4376078 Iustin Pop

1181 a8083063 Iustin Pop
      - compares ganeti version
1182 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1183 a8083063 Iustin Pop
      - checks config file checksum
1184 a8083063 Iustin Pop
      - checks ssh to other nodes
1185 a8083063 Iustin Pop

1186 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1187 02c521e4 Iustin Pop
    @param ninfo: the node to check
1188 02c521e4 Iustin Pop
    @param nresult: the results from the node
1189 02c521e4 Iustin Pop
    @rtype: boolean
1190 02c521e4 Iustin Pop
    @return: whether overall this call was successful (and we can expect
1191 02c521e4 Iustin Pop
         reasonable values in the respose)
1192 098c0958 Michael Hanselmann

1193 a8083063 Iustin Pop
    """
1194 02c521e4 Iustin Pop
    node = ninfo.name
1195 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1196 25361b9a Iustin Pop
1197 02c521e4 Iustin Pop
    # main result, nresult should be a non-empty dict
1198 02c521e4 Iustin Pop
    test = not nresult or not isinstance(nresult, dict)
1199 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1200 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1201 a0c9776a Iustin Pop
    if test:
1202 02c521e4 Iustin Pop
      return False
1203 25361b9a Iustin Pop
1204 a8083063 Iustin Pop
    # compares ganeti version
1205 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1206 02c521e4 Iustin Pop
    remote_version = nresult.get("version", None)
1207 a0c9776a Iustin Pop
    test = not (remote_version and
1208 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1209 a0c9776a Iustin Pop
                len(remote_version) == 2)
1210 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1211 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1212 a0c9776a Iustin Pop
    if test:
1213 02c521e4 Iustin Pop
      return False
1214 a0c9776a Iustin Pop
1215 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1216 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1217 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1218 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1219 a0c9776a Iustin Pop
    if test:
1220 02c521e4 Iustin Pop
      return False
1221 a8083063 Iustin Pop
1222 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1223 a8083063 Iustin Pop
1224 e9ce0a64 Iustin Pop
    # full package version
1225 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1226 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1227 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1228 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1229 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1230 e9ce0a64 Iustin Pop
1231 02c521e4 Iustin Pop
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1232 02c521e4 Iustin Pop
    if isinstance(hyp_result, dict):
1233 02c521e4 Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1234 02c521e4 Iustin Pop
        test = hv_result is not None
1235 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1236 02c521e4 Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1237 a8083063 Iustin Pop
1238 a8083063 Iustin Pop
1239 02c521e4 Iustin Pop
    test = nresult.get(constants.NV_NODESETUP,
1240 02c521e4 Iustin Pop
                           ["Missing NODESETUP results"])
1241 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1242 02c521e4 Iustin Pop
             "; ".join(test))
1243 02c521e4 Iustin Pop
1244 02c521e4 Iustin Pop
    return True
1245 02c521e4 Iustin Pop
1246 02c521e4 Iustin Pop
  def _VerifyNodeTime(self, ninfo, nresult,
1247 02c521e4 Iustin Pop
                      nvinfo_starttime, nvinfo_endtime):
1248 02c521e4 Iustin Pop
    """Check the node time.
1249 02c521e4 Iustin Pop

1250 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1251 02c521e4 Iustin Pop
    @param ninfo: the node to check
1252 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1253 02c521e4 Iustin Pop
    @param nvinfo_starttime: the start time of the RPC call
1254 02c521e4 Iustin Pop
    @param nvinfo_endtime: the end time of the RPC call
1255 02c521e4 Iustin Pop

1256 02c521e4 Iustin Pop
    """
1257 02c521e4 Iustin Pop
    node = ninfo.name
1258 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1259 02c521e4 Iustin Pop
1260 02c521e4 Iustin Pop
    ntime = nresult.get(constants.NV_TIME, None)
1261 02c521e4 Iustin Pop
    try:
1262 02c521e4 Iustin Pop
      ntime_merged = utils.MergeTime(ntime)
1263 02c521e4 Iustin Pop
    except (ValueError, TypeError):
1264 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1265 02c521e4 Iustin Pop
      return
1266 02c521e4 Iustin Pop
1267 02c521e4 Iustin Pop
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1268 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1269 02c521e4 Iustin Pop
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1270 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1271 02c521e4 Iustin Pop
    else:
1272 02c521e4 Iustin Pop
      ntime_diff = None
1273 02c521e4 Iustin Pop
1274 02c521e4 Iustin Pop
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1275 02c521e4 Iustin Pop
             "Node time diverges by at least %s from master node time",
1276 02c521e4 Iustin Pop
             ntime_diff)
1277 02c521e4 Iustin Pop
1278 02c521e4 Iustin Pop
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1279 02c521e4 Iustin Pop
    """Check the node time.
1280 02c521e4 Iustin Pop

1281 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1282 02c521e4 Iustin Pop
    @param ninfo: the node to check
1283 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1284 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1285 02c521e4 Iustin Pop

1286 02c521e4 Iustin Pop
    """
1287 02c521e4 Iustin Pop
    if vg_name is None:
1288 02c521e4 Iustin Pop
      return
1289 02c521e4 Iustin Pop
1290 02c521e4 Iustin Pop
    node = ninfo.name
1291 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1292 02c521e4 Iustin Pop
1293 02c521e4 Iustin Pop
    # checks vg existence and size > 20G
1294 02c521e4 Iustin Pop
    vglist = nresult.get(constants.NV_VGLIST, None)
1295 02c521e4 Iustin Pop
    test = not vglist
1296 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1297 02c521e4 Iustin Pop
    if not test:
1298 02c521e4 Iustin Pop
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1299 02c521e4 Iustin Pop
                                            constants.MIN_VG_SIZE)
1300 02c521e4 Iustin Pop
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1301 02c521e4 Iustin Pop
1302 02c521e4 Iustin Pop
    # check pv names
1303 02c521e4 Iustin Pop
    pvlist = nresult.get(constants.NV_PVLIST, None)
1304 02c521e4 Iustin Pop
    test = pvlist is None
1305 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1306 a0c9776a Iustin Pop
    if not test:
1307 02c521e4 Iustin Pop
      # check that ':' is not present in PV names, since it's a
1308 02c521e4 Iustin Pop
      # special character for lvcreate (denotes the range of PEs to
1309 02c521e4 Iustin Pop
      # use on the PV)
1310 02c521e4 Iustin Pop
      for _, pvname, owner_vg in pvlist:
1311 02c521e4 Iustin Pop
        test = ":" in pvname
1312 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1313 02c521e4 Iustin Pop
                 " '%s' of VG '%s'", pvname, owner_vg)
1314 02c521e4 Iustin Pop
1315 02c521e4 Iustin Pop
  def _VerifyNodeNetwork(self, ninfo, nresult):
1316 02c521e4 Iustin Pop
    """Check the node time.
1317 02c521e4 Iustin Pop

1318 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1319 02c521e4 Iustin Pop
    @param ninfo: the node to check
1320 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1321 02c521e4 Iustin Pop

1322 02c521e4 Iustin Pop
    """
1323 02c521e4 Iustin Pop
    node = ninfo.name
1324 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1325 02c521e4 Iustin Pop
1326 02c521e4 Iustin Pop
    test = constants.NV_NODELIST not in nresult
1327 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1328 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1329 a0c9776a Iustin Pop
    if not test:
1330 02c521e4 Iustin Pop
      if nresult[constants.NV_NODELIST]:
1331 02c521e4 Iustin Pop
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1332 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1333 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1334 25361b9a Iustin Pop
1335 02c521e4 Iustin Pop
    test = constants.NV_NODENETTEST not in nresult
1336 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1337 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1338 a0c9776a Iustin Pop
    if not test:
1339 02c521e4 Iustin Pop
      if nresult[constants.NV_NODENETTEST]:
1340 02c521e4 Iustin Pop
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1341 7c874ee1 Iustin Pop
        for anode in nlist:
1342 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1343 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1344 02c521e4 Iustin Pop
                   anode, nresult[constants.NV_NODENETTEST][anode])
1345 a8083063 Iustin Pop
1346 02c521e4 Iustin Pop
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1347 a8083063 Iustin Pop
    """Verify an instance.
1348 a8083063 Iustin Pop

1349 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1350 a8083063 Iustin Pop
    available on the instance's node.
1351 a8083063 Iustin Pop

1352 a8083063 Iustin Pop
    """
1353 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1354 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1355 a8083063 Iustin Pop
1356 a8083063 Iustin Pop
    node_vol_should = {}
1357 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1358 a8083063 Iustin Pop
1359 a8083063 Iustin Pop
    for node in node_vol_should:
1360 02c521e4 Iustin Pop
      n_img = node_image[node]
1361 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1362 02c521e4 Iustin Pop
        # ignore missing volumes on offline or broken nodes
1363 0a66c968 Iustin Pop
        continue
1364 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1365 02c521e4 Iustin Pop
        test = volume not in n_img.volumes
1366 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1367 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1368 a8083063 Iustin Pop
1369 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1370 02c521e4 Iustin Pop
      pri_img = node_image[node_current]
1371 02c521e4 Iustin Pop
      test = instance not in pri_img.instances and not pri_img.offline
1372 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1373 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1374 a0c9776a Iustin Pop
               node_current)
1375 a8083063 Iustin Pop
1376 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1377 a8083063 Iustin Pop
      if (not node == node_current):
1378 02c521e4 Iustin Pop
        test = instance in n_img.instances
1379 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1380 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1381 a8083063 Iustin Pop
1382 02c521e4 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1383 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1384 a8083063 Iustin Pop

1385 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1386 a8083063 Iustin Pop
    reported as unknown.
1387 a8083063 Iustin Pop

1388 a8083063 Iustin Pop
    """
1389 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1390 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1391 02c521e4 Iustin Pop
        # skip non-healthy nodes
1392 02c521e4 Iustin Pop
        continue
1393 02c521e4 Iustin Pop
      for volume in n_img.volumes:
1394 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1395 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1396 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1397 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1398 a8083063 Iustin Pop
1399 02c521e4 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_image):
1400 a8083063 Iustin Pop
    """Verify the list of running instances.
1401 a8083063 Iustin Pop

1402 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1403 a8083063 Iustin Pop

1404 a8083063 Iustin Pop
    """
1405 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1406 02c521e4 Iustin Pop
      for o_inst in n_img.instances:
1407 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1408 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1409 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1410 a8083063 Iustin Pop
1411 02c521e4 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1412 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1413 2b3b6ddd Guido Trotter

1414 02c521e4 Iustin Pop
    Check that if one single node dies we can still start all the
1415 02c521e4 Iustin Pop
    instances it was primary for.
1416 2b3b6ddd Guido Trotter

1417 2b3b6ddd Guido Trotter
    """
1418 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1419 02c521e4 Iustin Pop
      # This code checks that every node which is now listed as
1420 02c521e4 Iustin Pop
      # secondary has enough memory to host all instances it is
1421 02c521e4 Iustin Pop
      # supposed to should a single other node in the cluster fail.
1422 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1423 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1424 02c521e4 Iustin Pop
      # WARNING: we currently take into account down instances as well
1425 02c521e4 Iustin Pop
      # as up ones, considering that even if they're down someone
1426 02c521e4 Iustin Pop
      # might want to start them even in the event of a node failure.
1427 02c521e4 Iustin Pop
      for prinode, instances in n_img.sbp.items():
1428 2b3b6ddd Guido Trotter
        needed_mem = 0
1429 2b3b6ddd Guido Trotter
        for instance in instances:
1430 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1431 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1432 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1433 02c521e4 Iustin Pop
        test = n_img.mfree < needed_mem
1434 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1435 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1436 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1437 2b3b6ddd Guido Trotter
1438 02c521e4 Iustin Pop
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1439 02c521e4 Iustin Pop
                       master_files):
1440 02c521e4 Iustin Pop
    """Verifies and computes the node required file checksums.
1441 02c521e4 Iustin Pop

1442 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1443 02c521e4 Iustin Pop
    @param ninfo: the node to check
1444 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1445 02c521e4 Iustin Pop
    @param file_list: required list of files
1446 02c521e4 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1447 02c521e4 Iustin Pop
    @param master_files: list of files that only masters should have
1448 02c521e4 Iustin Pop

1449 02c521e4 Iustin Pop
    """
1450 02c521e4 Iustin Pop
    node = ninfo.name
1451 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1452 02c521e4 Iustin Pop
1453 02c521e4 Iustin Pop
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1454 02c521e4 Iustin Pop
    test = not isinstance(remote_cksum, dict)
1455 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1456 02c521e4 Iustin Pop
             "node hasn't returned file checksum data")
1457 02c521e4 Iustin Pop
    if test:
1458 02c521e4 Iustin Pop
      return
1459 02c521e4 Iustin Pop
1460 02c521e4 Iustin Pop
    for file_name in file_list:
1461 02c521e4 Iustin Pop
      node_is_mc = ninfo.master_candidate
1462 02c521e4 Iustin Pop
      must_have = (file_name not in master_files) or node_is_mc
1463 02c521e4 Iustin Pop
      # missing
1464 02c521e4 Iustin Pop
      test1 = file_name not in remote_cksum
1465 02c521e4 Iustin Pop
      # invalid checksum
1466 02c521e4 Iustin Pop
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1467 02c521e4 Iustin Pop
      # existing and good
1468 02c521e4 Iustin Pop
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1469 02c521e4 Iustin Pop
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1470 02c521e4 Iustin Pop
               "file '%s' missing", file_name)
1471 02c521e4 Iustin Pop
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1472 02c521e4 Iustin Pop
               "file '%s' has wrong checksum", file_name)
1473 02c521e4 Iustin Pop
      # not candidate and this is not a must-have file
1474 02c521e4 Iustin Pop
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1475 02c521e4 Iustin Pop
               "file '%s' should not exist on non master"
1476 02c521e4 Iustin Pop
               " candidates (and the file is outdated)", file_name)
1477 02c521e4 Iustin Pop
      # all good, except non-master/non-must have combination
1478 02c521e4 Iustin Pop
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1479 02c521e4 Iustin Pop
               "file '%s' should not exist"
1480 02c521e4 Iustin Pop
               " on non master candidates", file_name)
1481 02c521e4 Iustin Pop
1482 02c521e4 Iustin Pop
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1483 02c521e4 Iustin Pop
    """Verifies and the node DRBD status.
1484 02c521e4 Iustin Pop

1485 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1486 02c521e4 Iustin Pop
    @param ninfo: the node to check
1487 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1488 02c521e4 Iustin Pop
    @param instanceinfo: the dict of instances
1489 02c521e4 Iustin Pop
    @param drbd_map: the DRBD map as returned by
1490 02c521e4 Iustin Pop
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1491 02c521e4 Iustin Pop

1492 02c521e4 Iustin Pop
    """
1493 02c521e4 Iustin Pop
    node = ninfo.name
1494 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1495 02c521e4 Iustin Pop
1496 02c521e4 Iustin Pop
    # compute the DRBD minors
1497 02c521e4 Iustin Pop
    node_drbd = {}
1498 02c521e4 Iustin Pop
    for minor, instance in drbd_map[node].items():
1499 02c521e4 Iustin Pop
      test = instance not in instanceinfo
1500 02c521e4 Iustin Pop
      _ErrorIf(test, self.ECLUSTERCFG, None,
1501 02c521e4 Iustin Pop
               "ghost instance '%s' in temporary DRBD map", instance)
1502 02c521e4 Iustin Pop
        # ghost instance should not be running, but otherwise we
1503 02c521e4 Iustin Pop
        # don't give double warnings (both ghost instance and
1504 02c521e4 Iustin Pop
        # unallocated minor in use)
1505 02c521e4 Iustin Pop
      if test:
1506 02c521e4 Iustin Pop
        node_drbd[minor] = (instance, False)
1507 02c521e4 Iustin Pop
      else:
1508 02c521e4 Iustin Pop
        instance = instanceinfo[instance]
1509 02c521e4 Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1510 02c521e4 Iustin Pop
1511 02c521e4 Iustin Pop
    # and now check them
1512 02c521e4 Iustin Pop
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1513 02c521e4 Iustin Pop
    test = not isinstance(used_minors, (tuple, list))
1514 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEDRBD, node,
1515 02c521e4 Iustin Pop
             "cannot parse drbd status file: %s", str(used_minors))
1516 02c521e4 Iustin Pop
    if test:
1517 02c521e4 Iustin Pop
      # we cannot check drbd status
1518 02c521e4 Iustin Pop
      return
1519 02c521e4 Iustin Pop
1520 02c521e4 Iustin Pop
    for minor, (iname, must_exist) in node_drbd.items():
1521 02c521e4 Iustin Pop
      test = minor not in used_minors and must_exist
1522 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1523 02c521e4 Iustin Pop
               "drbd minor %d of instance %s is not active", minor, iname)
1524 02c521e4 Iustin Pop
    for minor in used_minors:
1525 02c521e4 Iustin Pop
      test = minor not in node_drbd
1526 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1527 02c521e4 Iustin Pop
               "unallocated drbd minor %d is in use", minor)
1528 02c521e4 Iustin Pop
1529 02c521e4 Iustin Pop
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1530 02c521e4 Iustin Pop
    """Verifies and updates the node volume data.
1531 02c521e4 Iustin Pop

1532 02c521e4 Iustin Pop
    This function will update a L{NodeImage}'s internal structures
1533 02c521e4 Iustin Pop
    with data from the remote call.
1534 02c521e4 Iustin Pop

1535 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1536 02c521e4 Iustin Pop
    @param ninfo: the node to check
1537 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1538 02c521e4 Iustin Pop
    @param nimg: the node image object
1539 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1540 02c521e4 Iustin Pop

1541 02c521e4 Iustin Pop
    """
1542 02c521e4 Iustin Pop
    node = ninfo.name
1543 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1544 02c521e4 Iustin Pop
1545 02c521e4 Iustin Pop
    nimg.lvm_fail = True
1546 02c521e4 Iustin Pop
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1547 02c521e4 Iustin Pop
    if vg_name is None:
1548 02c521e4 Iustin Pop
      pass
1549 02c521e4 Iustin Pop
    elif isinstance(lvdata, basestring):
1550 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1551 02c521e4 Iustin Pop
               utils.SafeEncode(lvdata))
1552 02c521e4 Iustin Pop
    elif not isinstance(lvdata, dict):
1553 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1554 02c521e4 Iustin Pop
    else:
1555 02c521e4 Iustin Pop
      nimg.volumes = lvdata
1556 02c521e4 Iustin Pop
      nimg.lvm_fail = False
1557 02c521e4 Iustin Pop
1558 02c521e4 Iustin Pop
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1559 02c521e4 Iustin Pop
    """Verifies and updates the node instance list.
1560 02c521e4 Iustin Pop

1561 02c521e4 Iustin Pop
    If the listing was successful, then updates this node's instance
1562 02c521e4 Iustin Pop
    list. Otherwise, it marks the RPC call as failed for the instance
1563 02c521e4 Iustin Pop
    list key.
1564 02c521e4 Iustin Pop

1565 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1566 02c521e4 Iustin Pop
    @param ninfo: the node to check
1567 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1568 02c521e4 Iustin Pop
    @param nimg: the node image object
1569 02c521e4 Iustin Pop

1570 02c521e4 Iustin Pop
    """
1571 02c521e4 Iustin Pop
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1572 02c521e4 Iustin Pop
    test = not isinstance(idata, list)
1573 02c521e4 Iustin Pop
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1574 02c521e4 Iustin Pop
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1575 02c521e4 Iustin Pop
    if test:
1576 02c521e4 Iustin Pop
      nimg.hyp_fail = True
1577 02c521e4 Iustin Pop
    else:
1578 02c521e4 Iustin Pop
      nimg.instances = idata
1579 02c521e4 Iustin Pop
1580 02c521e4 Iustin Pop
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1581 02c521e4 Iustin Pop
    """Verifies and computes a node information map
1582 02c521e4 Iustin Pop

1583 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1584 02c521e4 Iustin Pop
    @param ninfo: the node to check
1585 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1586 02c521e4 Iustin Pop
    @param nimg: the node image object
1587 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1588 02c521e4 Iustin Pop

1589 02c521e4 Iustin Pop
    """
1590 02c521e4 Iustin Pop
    node = ninfo.name
1591 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1592 02c521e4 Iustin Pop
1593 02c521e4 Iustin Pop
    # try to read free memory (from the hypervisor)
1594 02c521e4 Iustin Pop
    hv_info = nresult.get(constants.NV_HVINFO, None)
1595 02c521e4 Iustin Pop
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1596 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1597 02c521e4 Iustin Pop
    if not test:
1598 02c521e4 Iustin Pop
      try:
1599 02c521e4 Iustin Pop
        nimg.mfree = int(hv_info["memory_free"])
1600 02c521e4 Iustin Pop
      except (ValueError, TypeError):
1601 02c521e4 Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1602 02c521e4 Iustin Pop
                 "node returned invalid nodeinfo, check hypervisor")
1603 02c521e4 Iustin Pop
1604 02c521e4 Iustin Pop
    # FIXME: devise a free space model for file based instances as well
1605 02c521e4 Iustin Pop
    if vg_name is not None:
1606 02c521e4 Iustin Pop
      test = (constants.NV_VGLIST not in nresult or
1607 02c521e4 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST])
1608 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODELVM, node,
1609 02c521e4 Iustin Pop
               "node didn't return data for the volume group '%s'"
1610 02c521e4 Iustin Pop
               " - it is either missing or broken", vg_name)
1611 02c521e4 Iustin Pop
      if not test:
1612 02c521e4 Iustin Pop
        try:
1613 02c521e4 Iustin Pop
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1614 02c521e4 Iustin Pop
        except (ValueError, TypeError):
1615 02c521e4 Iustin Pop
          _ErrorIf(True, self.ENODERPC, node,
1616 02c521e4 Iustin Pop
                   "node returned invalid LVM info, check LVM status")
1617 02c521e4 Iustin Pop
1618 a8083063 Iustin Pop
  def CheckPrereq(self):
1619 a8083063 Iustin Pop
    """Check prerequisites.
1620 a8083063 Iustin Pop

1621 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1622 e54c4c5e Guido Trotter
    all its members are valid.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
    """
1625 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1626 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1627 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1628 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1629 a8083063 Iustin Pop
1630 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1631 d8fff41c Guido Trotter
    """Build hooks env.
1632 d8fff41c Guido Trotter

1633 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1634 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1635 d8fff41c Guido Trotter

1636 d8fff41c Guido Trotter
    """
1637 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1638 35e994e9 Iustin Pop
    env = {
1639 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1640 35e994e9 Iustin Pop
      }
1641 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1642 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1643 35e994e9 Iustin Pop
1644 d8fff41c Guido Trotter
    return env, [], all_nodes
1645 d8fff41c Guido Trotter
1646 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1647 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 a0c9776a Iustin Pop
    self.bad = False
1651 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1652 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1653 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1654 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1655 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1656 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1657 a8083063 Iustin Pop
1658 b98bf262 Michael Hanselmann
    # Check the cluster certificates
1659 b98bf262 Michael Hanselmann
    for cert_filename in constants.ALL_CERT_FILES:
1660 b98bf262 Michael Hanselmann
      (errcode, msg) = _VerifyCertificate(cert_filename)
1661 b98bf262 Michael Hanselmann
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1662 b98bf262 Michael Hanselmann
1663 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1664 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1665 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1666 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1667 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1668 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1669 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1670 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1671 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1672 02c521e4 Iustin Pop
    n_offline = 0 # Count of offline nodes
1673 02c521e4 Iustin Pop
    n_drained = 0 # Count of nodes being drained
1674 02c521e4 Iustin Pop
    node_vol_should = {}
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
    # FIXME: verify OS list
1677 a8083063 Iustin Pop
    # do local checksums
1678 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1679 112f18a5 Iustin Pop
1680 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1681 d3100055 Michael Hanselmann
    file_names.extend(constants.ALL_CERT_FILES)
1682 112f18a5 Iustin Pop
    file_names.extend(master_files)
1683 112f18a5 Iustin Pop
1684 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1687 a8083063 Iustin Pop
    node_verify_param = {
1688 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1689 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1690 82e37788 Iustin Pop
                              if not node.offline],
1691 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1692 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1693 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1694 82e37788 Iustin Pop
                                 if not node.offline],
1695 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1696 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1697 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1698 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1699 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1700 a8083063 Iustin Pop
      }
1701 313b2dd4 Michael Hanselmann
1702 cc9e1230 Guido Trotter
    if vg_name is not None:
1703 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1704 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1705 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1706 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1707 313b2dd4 Michael Hanselmann
1708 02c521e4 Iustin Pop
    # Build our expected cluster state
1709 02c521e4 Iustin Pop
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1710 02c521e4 Iustin Pop
                      for node in nodeinfo)
1711 02c521e4 Iustin Pop
1712 02c521e4 Iustin Pop
    for instance in instancelist:
1713 02c521e4 Iustin Pop
      inst_config = instanceinfo[instance]
1714 02c521e4 Iustin Pop
1715 02c521e4 Iustin Pop
      for nname in inst_config.all_nodes:
1716 02c521e4 Iustin Pop
        if nname not in node_image:
1717 02c521e4 Iustin Pop
          # ghost node
1718 02c521e4 Iustin Pop
          gnode = self.NodeImage()
1719 02c521e4 Iustin Pop
          gnode.ghost = True
1720 02c521e4 Iustin Pop
          node_image[nname] = gnode
1721 02c521e4 Iustin Pop
1722 02c521e4 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1723 02c521e4 Iustin Pop
1724 02c521e4 Iustin Pop
      pnode = inst_config.primary_node
1725 02c521e4 Iustin Pop
      node_image[pnode].pinst.append(instance)
1726 02c521e4 Iustin Pop
1727 02c521e4 Iustin Pop
      for snode in inst_config.secondary_nodes:
1728 02c521e4 Iustin Pop
        nimg = node_image[snode]
1729 02c521e4 Iustin Pop
        nimg.sinst.append(instance)
1730 02c521e4 Iustin Pop
        if pnode not in nimg.sbp:
1731 02c521e4 Iustin Pop
          nimg.sbp[pnode] = []
1732 02c521e4 Iustin Pop
        nimg.sbp[pnode].append(instance)
1733 02c521e4 Iustin Pop
1734 02c521e4 Iustin Pop
    # At this point, we have the in-memory data structures complete,
1735 02c521e4 Iustin Pop
    # except for the runtime information, which we'll gather next
1736 02c521e4 Iustin Pop
1737 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1738 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1739 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1740 313b2dd4 Michael Hanselmann
    # window.
1741 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1742 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1743 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1744 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1745 a8083063 Iustin Pop
1746 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1747 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1748 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1749 6d2e83d5 Iustin Pop
1750 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1751 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1752 112f18a5 Iustin Pop
      node = node_i.name
1753 02c521e4 Iustin Pop
      nimg = node_image[node]
1754 25361b9a Iustin Pop
1755 0a66c968 Iustin Pop
      if node_i.offline:
1756 7c874ee1 Iustin Pop
        if verbose:
1757 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1758 02c521e4 Iustin Pop
        n_offline += 1
1759 0a66c968 Iustin Pop
        continue
1760 0a66c968 Iustin Pop
1761 112f18a5 Iustin Pop
      if node == master_node:
1762 25361b9a Iustin Pop
        ntype = "master"
1763 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1764 25361b9a Iustin Pop
        ntype = "master candidate"
1765 22f0f71d Iustin Pop
      elif node_i.drained:
1766 22f0f71d Iustin Pop
        ntype = "drained"
1767 02c521e4 Iustin Pop
        n_drained += 1
1768 112f18a5 Iustin Pop
      else:
1769 25361b9a Iustin Pop
        ntype = "regular"
1770 7c874ee1 Iustin Pop
      if verbose:
1771 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1772 25361b9a Iustin Pop
1773 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1774 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1775 6f68a739 Iustin Pop
      if msg:
1776 02c521e4 Iustin Pop
        nimg.rpc_fail = True
1777 25361b9a Iustin Pop
        continue
1778 25361b9a Iustin Pop
1779 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1780 a8083063 Iustin Pop
1781 02c521e4 Iustin Pop
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1782 02c521e4 Iustin Pop
      self._VerifyNodeNetwork(node_i, nresult)
1783 02c521e4 Iustin Pop
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1784 02c521e4 Iustin Pop
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1785 02c521e4 Iustin Pop
                            master_files)
1786 02c521e4 Iustin Pop
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1787 02c521e4 Iustin Pop
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1788 a8083063 Iustin Pop
1789 02c521e4 Iustin Pop
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1790 02c521e4 Iustin Pop
      self._UpdateNodeInstances(node_i, nresult, nimg)
1791 02c521e4 Iustin Pop
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1792 a8083063 Iustin Pop
1793 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1794 a8083063 Iustin Pop
    for instance in instancelist:
1795 7c874ee1 Iustin Pop
      if verbose:
1796 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1797 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1798 02c521e4 Iustin Pop
      self._VerifyInstance(instance, inst_config, node_image)
1799 832261fd Iustin Pop
      inst_nodes_offline = []
1800 a8083063 Iustin Pop
1801 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1802 02c521e4 Iustin Pop
      pnode_img = node_image[pnode]
1803 02c521e4 Iustin Pop
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1804 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1805 a0c9776a Iustin Pop
               " primary node failed", instance)
1806 93e4c50b Guido Trotter
1807 02c521e4 Iustin Pop
      if pnode_img.offline:
1808 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1809 832261fd Iustin Pop
1810 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1811 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1812 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1813 93e4c50b Guido Trotter
      # supported either.
1814 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1815 02c521e4 Iustin Pop
      if not inst_config.secondary_nodes:
1816 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1817 02c521e4 Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1818 02c521e4 Iustin Pop
               instance, "instance has multiple secondary nodes: %s",
1819 02c521e4 Iustin Pop
               utils.CommaJoin(inst_config.secondary_nodes),
1820 02c521e4 Iustin Pop
               code=self.ETYPE_WARNING)
1821 93e4c50b Guido Trotter
1822 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1823 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1824 3924700f Iustin Pop
1825 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1826 02c521e4 Iustin Pop
        s_img = node_image[snode]
1827 02c521e4 Iustin Pop
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1828 02c521e4 Iustin Pop
                 "instance %s, connection to secondary node failed", instance)
1829 02c521e4 Iustin Pop
1830 02c521e4 Iustin Pop
        if s_img.offline:
1831 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1832 832261fd Iustin Pop
1833 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1834 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1835 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1836 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1837 02c521e4 Iustin Pop
      # ... or ghost nodes
1838 02c521e4 Iustin Pop
      for node in inst_config.all_nodes:
1839 02c521e4 Iustin Pop
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1840 02c521e4 Iustin Pop
                 "instance lives on ghost node %s", node)
1841 93e4c50b Guido Trotter
1842 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1843 02c521e4 Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1844 a8083063 Iustin Pop
1845 02c521e4 Iustin Pop
    feedback_fn("* Verifying oprhan instances")
1846 02c521e4 Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_image)
1847 a8083063 Iustin Pop
1848 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1849 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1850 02c521e4 Iustin Pop
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1851 2b3b6ddd Guido Trotter
1852 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1853 2b3b6ddd Guido Trotter
    if i_non_redundant:
1854 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1855 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1856 2b3b6ddd Guido Trotter
1857 3924700f Iustin Pop
    if i_non_a_balanced:
1858 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1859 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1860 3924700f Iustin Pop
1861 0a66c968 Iustin Pop
    if n_offline:
1862 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1863 0a66c968 Iustin Pop
1864 22f0f71d Iustin Pop
    if n_drained:
1865 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1866 22f0f71d Iustin Pop
1867 a0c9776a Iustin Pop
    return not self.bad
1868 a8083063 Iustin Pop
1869 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1870 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1871 e4376078 Iustin Pop

1872 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1873 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1874 d8fff41c Guido Trotter

1875 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1876 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1877 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1878 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1879 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1880 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1881 e4376078 Iustin Pop
        and hook results
1882 d8fff41c Guido Trotter

1883 d8fff41c Guido Trotter
    """
1884 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1885 38206f3c Iustin Pop
    # their results
1886 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1887 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1888 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1889 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1890 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1891 7c874ee1 Iustin Pop
1892 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1893 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1894 7c874ee1 Iustin Pop
        msg = res.fail_msg
1895 a0c9776a Iustin Pop
        test = msg and not res.offline
1896 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1897 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1898 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1899 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1900 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1901 a0c9776a Iustin Pop
          # overrides self.bad
1902 7c874ee1 Iustin Pop
          lu_result = 1
1903 7c874ee1 Iustin Pop
          continue
1904 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1905 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1906 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1907 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1908 a0c9776a Iustin Pop
          if test:
1909 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1910 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1911 6d7b472a Iustin Pop
            lu_result = 0
1912 d8fff41c Guido Trotter
1913 d8fff41c Guido Trotter
      return lu_result
1914 d8fff41c Guido Trotter
1915 a8083063 Iustin Pop
1916 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1917 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1918 2c95a8d4 Iustin Pop

1919 2c95a8d4 Iustin Pop
  """
1920 2c95a8d4 Iustin Pop
  _OP_REQP = []
1921 d4b9d97f Guido Trotter
  REQ_BGL = False
1922 d4b9d97f Guido Trotter
1923 d4b9d97f Guido Trotter
  def ExpandNames(self):
1924 d4b9d97f Guido Trotter
    self.needed_locks = {
1925 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1926 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1927 d4b9d97f Guido Trotter
    }
1928 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1929 2c95a8d4 Iustin Pop
1930 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1931 2c95a8d4 Iustin Pop
    """Check prerequisites.
1932 2c95a8d4 Iustin Pop

1933 2c95a8d4 Iustin Pop
    This has no prerequisites.
1934 2c95a8d4 Iustin Pop

1935 2c95a8d4 Iustin Pop
    """
1936 2c95a8d4 Iustin Pop
    pass
1937 2c95a8d4 Iustin Pop
1938 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1939 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1940 2c95a8d4 Iustin Pop

1941 29d376ec Iustin Pop
    @rtype: tuple of three items
1942 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1943 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1944 29d376ec Iustin Pop
        missing volumes
1945 29d376ec Iustin Pop

1946 2c95a8d4 Iustin Pop
    """
1947 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1948 2c95a8d4 Iustin Pop
1949 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1950 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1951 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1952 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1953 2c95a8d4 Iustin Pop
1954 2c95a8d4 Iustin Pop
    nv_dict = {}
1955 2c95a8d4 Iustin Pop
    for inst in instances:
1956 2c95a8d4 Iustin Pop
      inst_lvs = {}
1957 0d68c45d Iustin Pop
      if (not inst.admin_up or
1958 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1959 2c95a8d4 Iustin Pop
        continue
1960 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1961 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1962 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1963 2c95a8d4 Iustin Pop
        for vol in vol_list:
1964 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1965 2c95a8d4 Iustin Pop
1966 2c95a8d4 Iustin Pop
    if not nv_dict:
1967 2c95a8d4 Iustin Pop
      return result
1968 2c95a8d4 Iustin Pop
1969 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1970 2c95a8d4 Iustin Pop
1971 2c95a8d4 Iustin Pop
    for node in nodes:
1972 2c95a8d4 Iustin Pop
      # node_volume
1973 29d376ec Iustin Pop
      node_res = node_lvs[node]
1974 29d376ec Iustin Pop
      if node_res.offline:
1975 ea9ddc07 Iustin Pop
        continue
1976 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1977 29d376ec Iustin Pop
      if msg:
1978 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1979 29d376ec Iustin Pop
        res_nodes[node] = msg
1980 2c95a8d4 Iustin Pop
        continue
1981 2c95a8d4 Iustin Pop
1982 29d376ec Iustin Pop
      lvs = node_res.payload
1983 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
1984 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1985 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1986 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1987 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1988 2c95a8d4 Iustin Pop
1989 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1990 b63ed789 Iustin Pop
    # data better
1991 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1992 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1993 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1994 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1995 b63ed789 Iustin Pop
1996 2c95a8d4 Iustin Pop
    return result
1997 2c95a8d4 Iustin Pop
1998 2c95a8d4 Iustin Pop
1999 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
2000 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
2001 60975797 Iustin Pop

2002 60975797 Iustin Pop
  """
2003 60975797 Iustin Pop
  _OP_REQP = ["instances"]
2004 60975797 Iustin Pop
  REQ_BGL = False
2005 60975797 Iustin Pop
2006 60975797 Iustin Pop
  def ExpandNames(self):
2007 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
2008 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2009 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2010 60975797 Iustin Pop
2011 60975797 Iustin Pop
    if self.op.instances:
2012 60975797 Iustin Pop
      self.wanted_names = []
2013 60975797 Iustin Pop
      for name in self.op.instances:
2014 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
2015 60975797 Iustin Pop
        self.wanted_names.append(full_name)
2016 60975797 Iustin Pop
      self.needed_locks = {
2017 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
2018 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
2019 60975797 Iustin Pop
        }
2020 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2021 60975797 Iustin Pop
    else:
2022 60975797 Iustin Pop
      self.wanted_names = None
2023 60975797 Iustin Pop
      self.needed_locks = {
2024 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
2025 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2026 60975797 Iustin Pop
        }
2027 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2028 60975797 Iustin Pop
2029 60975797 Iustin Pop
  def DeclareLocks(self, level):
2030 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2031 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
2032 60975797 Iustin Pop
2033 60975797 Iustin Pop
  def CheckPrereq(self):
2034 60975797 Iustin Pop
    """Check prerequisites.
2035 60975797 Iustin Pop

2036 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
2037 60975797 Iustin Pop

2038 60975797 Iustin Pop
    """
2039 60975797 Iustin Pop
    if self.wanted_names is None:
2040 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2041 60975797 Iustin Pop
2042 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2043 60975797 Iustin Pop
                             in self.wanted_names]
2044 60975797 Iustin Pop
2045 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
2046 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
2047 b775c337 Iustin Pop

2048 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
2049 b775c337 Iustin Pop
    children have smaller disk size.
2050 b775c337 Iustin Pop

2051 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
2052 b775c337 Iustin Pop

2053 b775c337 Iustin Pop
    """
2054 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
2055 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
2056 b775c337 Iustin Pop
      fchild = disk.children[0]
2057 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
2058 b775c337 Iustin Pop
      if mismatch:
2059 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2060 b775c337 Iustin Pop
                     fchild.size, disk.size)
2061 b775c337 Iustin Pop
        fchild.size = disk.size
2062 b775c337 Iustin Pop
2063 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
2064 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
2065 b775c337 Iustin Pop
    else:
2066 b775c337 Iustin Pop
      return False
2067 b775c337 Iustin Pop
2068 60975797 Iustin Pop
  def Exec(self, feedback_fn):
2069 60975797 Iustin Pop
    """Verify the size of cluster disks.
2070 60975797 Iustin Pop

2071 60975797 Iustin Pop
    """
2072 60975797 Iustin Pop
    # TODO: check child disks too
2073 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
2074 60975797 Iustin Pop
    per_node_disks = {}
2075 60975797 Iustin Pop
    for instance in self.wanted_instances:
2076 60975797 Iustin Pop
      pnode = instance.primary_node
2077 60975797 Iustin Pop
      if pnode not in per_node_disks:
2078 60975797 Iustin Pop
        per_node_disks[pnode] = []
2079 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
2080 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
2081 60975797 Iustin Pop
2082 60975797 Iustin Pop
    changed = []
2083 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
2084 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
2085 4d9e6835 Iustin Pop
      for dsk in newl:
2086 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
2087 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
2088 3cebe102 Michael Hanselmann
      if result.fail_msg:
2089 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
2090 60975797 Iustin Pop
                        " %s, ignoring", node)
2091 60975797 Iustin Pop
        continue
2092 60975797 Iustin Pop
      if len(result.data) != len(dskl):
2093 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
2094 60975797 Iustin Pop
                        node)
2095 60975797 Iustin Pop
        continue
2096 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2097 60975797 Iustin Pop
        if size is None:
2098 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
2099 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
2100 60975797 Iustin Pop
          continue
2101 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
2102 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
2103 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
2104 60975797 Iustin Pop
          continue
2105 60975797 Iustin Pop
        size = size >> 20
2106 60975797 Iustin Pop
        if size != disk.size:
2107 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2108 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
2109 60975797 Iustin Pop
                       instance.name, disk.size, size)
2110 60975797 Iustin Pop
          disk.size = size
2111 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2112 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
2113 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
2114 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2115 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
2116 60975797 Iustin Pop
    return changed
2117 60975797 Iustin Pop
2118 60975797 Iustin Pop
2119 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
2120 07bd8a51 Iustin Pop
  """Rename the cluster.
2121 07bd8a51 Iustin Pop

2122 07bd8a51 Iustin Pop
  """
2123 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
2124 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
2125 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
2126 07bd8a51 Iustin Pop
2127 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
2128 07bd8a51 Iustin Pop
    """Build hooks env.
2129 07bd8a51 Iustin Pop

2130 07bd8a51 Iustin Pop
    """
2131 07bd8a51 Iustin Pop
    env = {
2132 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2133 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
2134 07bd8a51 Iustin Pop
      }
2135 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2136 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2137 47a72f18 Iustin Pop
    return env, [mn], all_nodes
2138 07bd8a51 Iustin Pop
2139 07bd8a51 Iustin Pop
  def CheckPrereq(self):
2140 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
2141 07bd8a51 Iustin Pop

2142 07bd8a51 Iustin Pop
    """
2143 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
2144 07bd8a51 Iustin Pop
2145 bcf043c9 Iustin Pop
    new_name = hostname.name
2146 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
2147 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
2148 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
2149 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
2150 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2151 5c983ee5 Iustin Pop
                                 " cluster has changed",
2152 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2153 07bd8a51 Iustin Pop
    if new_ip != old_ip:
2154 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2155 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2156 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
2157 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
2158 07bd8a51 Iustin Pop
2159 07bd8a51 Iustin Pop
    self.op.name = new_name
2160 07bd8a51 Iustin Pop
2161 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
2162 07bd8a51 Iustin Pop
    """Rename the cluster.
2163 07bd8a51 Iustin Pop

2164 07bd8a51 Iustin Pop
    """
2165 07bd8a51 Iustin Pop
    clustername = self.op.name
2166 07bd8a51 Iustin Pop
    ip = self.ip
2167 07bd8a51 Iustin Pop
2168 07bd8a51 Iustin Pop
    # shutdown the master IP
2169 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
2170 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
2171 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
2172 07bd8a51 Iustin Pop
2173 07bd8a51 Iustin Pop
    try:
2174 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2175 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
2176 55cf7d83 Iustin Pop
      cluster.master_ip = ip
2177 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
2178 ec85e3d5 Iustin Pop
2179 ec85e3d5 Iustin Pop
      # update the known hosts file
2180 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2181 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
2182 ec85e3d5 Iustin Pop
      try:
2183 ec85e3d5 Iustin Pop
        node_list.remove(master)
2184 ec85e3d5 Iustin Pop
      except ValueError:
2185 ec85e3d5 Iustin Pop
        pass
2186 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
2187 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
2188 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2189 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2190 6f7d4e75 Iustin Pop
        if msg:
2191 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2192 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2193 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
2194 ec85e3d5 Iustin Pop
2195 07bd8a51 Iustin Pop
    finally:
2196 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
2197 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2198 b726aff0 Iustin Pop
      if msg:
2199 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
2200 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
2201 07bd8a51 Iustin Pop
2202 07bd8a51 Iustin Pop
2203 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
2204 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
2205 8084f9f6 Manuel Franceschini

2206 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
2207 e4376078 Iustin Pop
  @param disk: the disk to check
2208 5bbd3f7f Michael Hanselmann
  @rtype: boolean
2209 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
2210 8084f9f6 Manuel Franceschini

2211 8084f9f6 Manuel Franceschini
  """
2212 8084f9f6 Manuel Franceschini
  if disk.children:
2213 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
2214 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
2215 8084f9f6 Manuel Franceschini
        return True
2216 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
2217 8084f9f6 Manuel Franceschini
2218 8084f9f6 Manuel Franceschini
2219 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
2220 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
2221 8084f9f6 Manuel Franceschini

2222 8084f9f6 Manuel Franceschini
  """
2223 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
2224 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
2225 8084f9f6 Manuel Franceschini
  _OP_REQP = []
2226 c53279cf Guido Trotter
  REQ_BGL = False
2227 c53279cf Guido Trotter
2228 3994f455 Iustin Pop
  def CheckArguments(self):
2229 4b7735f9 Iustin Pop
    """Check parameters
2230 4b7735f9 Iustin Pop

2231 4b7735f9 Iustin Pop
    """
2232 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
2233 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
2234 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2235 4b7735f9 Iustin Pop
      try:
2236 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2237 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
2238 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2239 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
2240 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
2241 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
2242 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2243 4b7735f9 Iustin Pop
2244 c53279cf Guido Trotter
  def ExpandNames(self):
2245 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
2246 c53279cf Guido Trotter
    # all nodes to be modified.
2247 c53279cf Guido Trotter
    self.needed_locks = {
2248 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
2249 c53279cf Guido Trotter
    }
2250 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2251 8084f9f6 Manuel Franceschini
2252 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
2253 8084f9f6 Manuel Franceschini
    """Build hooks env.
2254 8084f9f6 Manuel Franceschini

2255 8084f9f6 Manuel Franceschini
    """
2256 8084f9f6 Manuel Franceschini
    env = {
2257 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2258 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
2259 8084f9f6 Manuel Franceschini
      }
2260 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2261 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
2262 8084f9f6 Manuel Franceschini
2263 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
2264 8084f9f6 Manuel Franceschini
    """Check prerequisites.
2265 8084f9f6 Manuel Franceschini

2266 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
2267 5f83e263 Iustin Pop
    if the given volume group is valid.
2268 8084f9f6 Manuel Franceschini

2269 8084f9f6 Manuel Franceschini
    """
2270 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
2271 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
2272 8084f9f6 Manuel Franceschini
      for inst in instances:
2273 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
2274 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
2275 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2276 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
2277 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
2278 8084f9f6 Manuel Franceschini
2279 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2280 779c15bb Iustin Pop
2281 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2282 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2283 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2284 8084f9f6 Manuel Franceschini
      for node in node_list:
2285 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2286 e480923b Iustin Pop
        if msg:
2287 781de953 Iustin Pop
          # ignoring down node
2288 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2289 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2290 781de953 Iustin Pop
          continue
2291 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2292 781de953 Iustin Pop
                                              self.op.vg_name,
2293 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2294 8084f9f6 Manuel Franceschini
        if vgstatus:
2295 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2296 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2297 8084f9f6 Manuel Franceschini
2298 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2299 5af3da74 Guido Trotter
    # validate params changes
2300 779c15bb Iustin Pop
    if self.op.beparams:
2301 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2302 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2303 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2304 779c15bb Iustin Pop
2305 5af3da74 Guido Trotter
    if self.op.nicparams:
2306 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2307 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2308 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2309 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2310 90b704a1 Guido Trotter
      nic_errors = []
2311 90b704a1 Guido Trotter
2312 90b704a1 Guido Trotter
      # check all instances for consistency
2313 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2314 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2315 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2316 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2317 90b704a1 Guido Trotter
2318 90b704a1 Guido Trotter
          # check parameter syntax
2319 90b704a1 Guido Trotter
          try:
2320 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2321 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2322 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2323 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2324 90b704a1 Guido Trotter
2325 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2326 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2327 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2328 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2329 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2330 90b704a1 Guido Trotter
      if nic_errors:
2331 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2332 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2333 5af3da74 Guido Trotter
2334 779c15bb Iustin Pop
    # hypervisor list/parameters
2335 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
2336 779c15bb Iustin Pop
    if self.op.hvparams:
2337 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2338 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2339 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2340 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2341 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2342 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2343 779c15bb Iustin Pop
        else:
2344 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2345 779c15bb Iustin Pop
2346 17463d22 Renรฉ Nussbaumer
    # os hypervisor parameters
2347 17463d22 Renรฉ Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2348 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2349 17463d22 Renรฉ Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2350 17463d22 Renรฉ Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2351 17463d22 Renรฉ Nussbaumer
                                   errors.ECODE_INVAL)
2352 17463d22 Renรฉ Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2353 17463d22 Renรฉ Nussbaumer
        if not isinstance(hvs, dict):
2354 17463d22 Renรฉ Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2355 17463d22 Renรฉ Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2356 17463d22 Renรฉ Nussbaumer
        if os_name not in self.new_os_hvp:
2357 17463d22 Renรฉ Nussbaumer
          self.new_os_hvp[os_name] = hvs
2358 17463d22 Renรฉ Nussbaumer
        else:
2359 17463d22 Renรฉ Nussbaumer
          for hv_name, hv_dict in hvs.items():
2360 17463d22 Renรฉ Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2361 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2362 17463d22 Renรฉ Nussbaumer
            else:
2363 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2364 17463d22 Renรฉ Nussbaumer
2365 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2366 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2367 b119bccb Guido Trotter
      if not self.hv_list:
2368 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2369 5c983ee5 Iustin Pop
                                   " least one member",
2370 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2371 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2372 b119bccb Guido Trotter
      if invalid_hvs:
2373 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2374 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2375 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2376 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2377 779c15bb Iustin Pop
    else:
2378 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2379 779c15bb Iustin Pop
2380 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2381 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2382 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2383 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2384 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2385 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2386 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2387 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2388 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2389 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2390 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2391 779c15bb Iustin Pop
2392 cced4c39 Iustin Pop
    if self.op.os_hvp:
2393 cced4c39 Iustin Pop
      # no need to check any newly-enabled hypervisors, since the
2394 cced4c39 Iustin Pop
      # defaults have already been checked in the above code-block
2395 cced4c39 Iustin Pop
      for os_name, os_hvp in self.new_os_hvp.items():
2396 cced4c39 Iustin Pop
        for hv_name, hv_params in os_hvp.items():
2397 cced4c39 Iustin Pop
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2398 cced4c39 Iustin Pop
          # we need to fill in the new os_hvp on top of the actual hv_p
2399 cced4c39 Iustin Pop
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2400 cced4c39 Iustin Pop
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2401 cced4c39 Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2402 cced4c39 Iustin Pop
          hv_class.CheckParameterSyntax(new_osp)
2403 cced4c39 Iustin Pop
          _CheckHVParams(self, node_list, hv_name, new_osp)
2404 cced4c39 Iustin Pop
2405 cced4c39 Iustin Pop
2406 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2407 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2408 8084f9f6 Manuel Franceschini

2409 8084f9f6 Manuel Franceschini
    """
2410 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2411 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2412 b2482333 Guido Trotter
      if not new_volume:
2413 b2482333 Guido Trotter
        new_volume = None
2414 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2415 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2416 779c15bb Iustin Pop
      else:
2417 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2418 779c15bb Iustin Pop
                    " state, not changing")
2419 779c15bb Iustin Pop
    if self.op.hvparams:
2420 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2421 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2422 17463d22 Renรฉ Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2423 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2424 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2425 779c15bb Iustin Pop
    if self.op.beparams:
2426 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2427 5af3da74 Guido Trotter
    if self.op.nicparams:
2428 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2429 5af3da74 Guido Trotter
2430 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2431 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2432 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2433 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2434 4b7735f9 Iustin Pop
2435 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2436 8084f9f6 Manuel Franceschini
2437 8084f9f6 Manuel Franceschini
2438 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2439 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2440 28eddce5 Guido Trotter

2441 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2442 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2443 28eddce5 Guido Trotter
  makes sure those are copied.
2444 28eddce5 Guido Trotter

2445 28eddce5 Guido Trotter
  @param lu: calling logical unit
2446 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2447 28eddce5 Guido Trotter

2448 28eddce5 Guido Trotter
  """
2449 28eddce5 Guido Trotter
  # 1. Gather target nodes
2450 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2451 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2452 28eddce5 Guido Trotter
  if additional_nodes is not None:
2453 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2454 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2455 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2456 a4eae71f Michael Hanselmann
2457 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2458 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2459 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2460 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2461 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2462 6b7d5878 Michael Hanselmann
                    constants.CONFD_HMAC_KEY,
2463 28eddce5 Guido Trotter
                   ])
2464 e1b8653f Guido Trotter
2465 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2466 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2467 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2468 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2469 e1b8653f Guido Trotter
2470 28eddce5 Guido Trotter
  # 3. Perform the files upload
2471 28eddce5 Guido Trotter
  for fname in dist_files:
2472 28eddce5 Guido Trotter
    if os.path.exists(fname):
2473 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2474 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2475 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2476 6f7d4e75 Iustin Pop
        if msg:
2477 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2478 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2479 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2480 28eddce5 Guido Trotter
2481 28eddce5 Guido Trotter
2482 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2483 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2484 afee0879 Iustin Pop

2485 afee0879 Iustin Pop
  This is a very simple LU.
2486 afee0879 Iustin Pop

2487 afee0879 Iustin Pop
  """
2488 afee0879 Iustin Pop
  _OP_REQP = []
2489 afee0879 Iustin Pop
  REQ_BGL = False
2490 afee0879 Iustin Pop
2491 afee0879 Iustin Pop
  def ExpandNames(self):
2492 afee0879 Iustin Pop
    self.needed_locks = {
2493 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2494 afee0879 Iustin Pop
    }
2495 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2496 afee0879 Iustin Pop
2497 afee0879 Iustin Pop
  def CheckPrereq(self):
2498 afee0879 Iustin Pop
    """Check prerequisites.
2499 afee0879 Iustin Pop

2500 afee0879 Iustin Pop
    """
2501 afee0879 Iustin Pop
2502 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2503 afee0879 Iustin Pop
    """Redistribute the configuration.
2504 afee0879 Iustin Pop

2505 afee0879 Iustin Pop
    """
2506 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2507 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2508 afee0879 Iustin Pop
2509 afee0879 Iustin Pop
2510 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2511 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2512 a8083063 Iustin Pop

2513 a8083063 Iustin Pop
  """
2514 a8083063 Iustin Pop
  if not instance.disks:
2515 a8083063 Iustin Pop
    return True
2516 a8083063 Iustin Pop
2517 a8083063 Iustin Pop
  if not oneshot:
2518 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2519 a8083063 Iustin Pop
2520 a8083063 Iustin Pop
  node = instance.primary_node
2521 a8083063 Iustin Pop
2522 a8083063 Iustin Pop
  for dev in instance.disks:
2523 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2524 a8083063 Iustin Pop
2525 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2526 6bcb1446 Michael Hanselmann
2527 a8083063 Iustin Pop
  retries = 0
2528 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2529 a8083063 Iustin Pop
  while True:
2530 a8083063 Iustin Pop
    max_time = 0
2531 a8083063 Iustin Pop
    done = True
2532 a8083063 Iustin Pop
    cumul_degraded = False
2533 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2534 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2535 3efa9051 Iustin Pop
    if msg:
2536 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2537 a8083063 Iustin Pop
      retries += 1
2538 a8083063 Iustin Pop
      if retries >= 10:
2539 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2540 3ecf6786 Iustin Pop
                                 " aborting." % node)
2541 a8083063 Iustin Pop
      time.sleep(6)
2542 a8083063 Iustin Pop
      continue
2543 3efa9051 Iustin Pop
    rstats = rstats.payload
2544 a8083063 Iustin Pop
    retries = 0
2545 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2546 a8083063 Iustin Pop
      if mstat is None:
2547 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2548 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2549 a8083063 Iustin Pop
        continue
2550 36145b12 Michael Hanselmann
2551 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2552 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2553 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2554 a8083063 Iustin Pop
        done = False
2555 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2556 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2557 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2558 a8083063 Iustin Pop
        else:
2559 a8083063 Iustin Pop
          rem_time = "no time estimate"
2560 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2561 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2562 4d4a651d Michael Hanselmann
                         rem_time))
2563 fbafd7a8 Iustin Pop
2564 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2565 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2566 fbafd7a8 Iustin Pop
    # we force restart of the loop
2567 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2568 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2569 fbafd7a8 Iustin Pop
      degr_retries -= 1
2570 fbafd7a8 Iustin Pop
      time.sleep(1)
2571 fbafd7a8 Iustin Pop
      continue
2572 fbafd7a8 Iustin Pop
2573 a8083063 Iustin Pop
    if done or oneshot:
2574 a8083063 Iustin Pop
      break
2575 a8083063 Iustin Pop
2576 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
  if done:
2579 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2580 a8083063 Iustin Pop
  return not cumul_degraded
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
2583 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2584 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2585 a8083063 Iustin Pop

2586 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2587 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2588 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2589 0834c866 Iustin Pop

2590 a8083063 Iustin Pop
  """
2591 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2592 a8083063 Iustin Pop
2593 a8083063 Iustin Pop
  result = True
2594 96acbc09 Michael Hanselmann
2595 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2596 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2597 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2598 23829f6f Iustin Pop
    if msg:
2599 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2600 23829f6f Iustin Pop
      result = False
2601 23829f6f Iustin Pop
    elif not rstats.payload:
2602 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2603 a8083063 Iustin Pop
      result = False
2604 a8083063 Iustin Pop
    else:
2605 96acbc09 Michael Hanselmann
      if ldisk:
2606 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2607 96acbc09 Michael Hanselmann
      else:
2608 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2609 96acbc09 Michael Hanselmann
2610 a8083063 Iustin Pop
  if dev.children:
2611 a8083063 Iustin Pop
    for child in dev.children:
2612 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2613 a8083063 Iustin Pop
2614 a8083063 Iustin Pop
  return result
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2618 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
  """
2621 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2622 6bf01bbb Guido Trotter
  REQ_BGL = False
2623 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2624 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2625 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2626 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2627 a8083063 Iustin Pop
2628 6bf01bbb Guido Trotter
  def ExpandNames(self):
2629 1f9430d6 Iustin Pop
    if self.op.names:
2630 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2631 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2632 1f9430d6 Iustin Pop
2633 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2634 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2635 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2636 1f9430d6 Iustin Pop
2637 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2638 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2639 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2640 6bf01bbb Guido Trotter
    self.needed_locks = {}
2641 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2642 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2643 6bf01bbb Guido Trotter
2644 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2645 6bf01bbb Guido Trotter
    """Check prerequisites.
2646 6bf01bbb Guido Trotter

2647 6bf01bbb Guido Trotter
    """
2648 6bf01bbb Guido Trotter
2649 1f9430d6 Iustin Pop
  @staticmethod
2650 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2651 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2652 1f9430d6 Iustin Pop

2653 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2654 1f9430d6 Iustin Pop

2655 e4376078 Iustin Pop
    @rtype: dict
2656 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2657 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2658 e4376078 Iustin Pop

2659 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2660 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2661 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2662 e4376078 Iustin Pop
          }
2663 1f9430d6 Iustin Pop

2664 1f9430d6 Iustin Pop
    """
2665 1f9430d6 Iustin Pop
    all_os = {}
2666 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2667 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2668 a6ab004b Iustin Pop
    # make all OSes invalid
2669 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2670 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2671 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2672 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2673 1f9430d6 Iustin Pop
        continue
2674 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2675 255dcebd Iustin Pop
        if name not in all_os:
2676 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2677 1f9430d6 Iustin Pop
          # for each node in node_list
2678 255dcebd Iustin Pop
          all_os[name] = {}
2679 a6ab004b Iustin Pop
          for nname in good_nodes:
2680 255dcebd Iustin Pop
            all_os[name][nname] = []
2681 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2682 1f9430d6 Iustin Pop
    return all_os
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2685 a8083063 Iustin Pop
    """Compute the list of OSes.
2686 a8083063 Iustin Pop

2687 a8083063 Iustin Pop
    """
2688 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2689 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2690 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2691 1f9430d6 Iustin Pop
    output = []
2692 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2693 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2694 1e288a26 Guido Trotter
2695 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2696 1f9430d6 Iustin Pop
      row = []
2697 1e288a26 Guido Trotter
      if calc_valid:
2698 1e288a26 Guido Trotter
        valid = True
2699 1e288a26 Guido Trotter
        variants = None
2700 1e288a26 Guido Trotter
        for osl in os_data.values():
2701 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2702 1e288a26 Guido Trotter
          if not valid:
2703 1e288a26 Guido Trotter
            variants = None
2704 1e288a26 Guido Trotter
            break
2705 1e288a26 Guido Trotter
          if calc_variants:
2706 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2707 1e288a26 Guido Trotter
            if variants is None:
2708 1e288a26 Guido Trotter
              variants = node_variants
2709 1e288a26 Guido Trotter
            else:
2710 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2711 1e288a26 Guido Trotter
2712 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2713 1f9430d6 Iustin Pop
        if field == "name":
2714 1f9430d6 Iustin Pop
          val = os_name
2715 1f9430d6 Iustin Pop
        elif field == "valid":
2716 1e288a26 Guido Trotter
          val = valid
2717 1f9430d6 Iustin Pop
        elif field == "node_status":
2718 255dcebd Iustin Pop
          # this is just a copy of the dict
2719 1f9430d6 Iustin Pop
          val = {}
2720 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2721 255dcebd Iustin Pop
            val[node_name] = nos_list
2722 1e288a26 Guido Trotter
        elif field == "variants":
2723 1e288a26 Guido Trotter
          val =  variants
2724 1f9430d6 Iustin Pop
        else:
2725 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2726 1f9430d6 Iustin Pop
        row.append(val)
2727 1f9430d6 Iustin Pop
      output.append(row)
2728 1f9430d6 Iustin Pop
2729 1f9430d6 Iustin Pop
    return output
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2733 a8083063 Iustin Pop
  """Logical unit for removing a node.
2734 a8083063 Iustin Pop

2735 a8083063 Iustin Pop
  """
2736 a8083063 Iustin Pop
  HPATH = "node-remove"
2737 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2738 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2741 a8083063 Iustin Pop
    """Build hooks env.
2742 a8083063 Iustin Pop

2743 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2744 d08869ee Guido Trotter
    node would then be impossible to remove.
2745 a8083063 Iustin Pop

2746 a8083063 Iustin Pop
    """
2747 396e1b78 Michael Hanselmann
    env = {
2748 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2749 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2750 396e1b78 Michael Hanselmann
      }
2751 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2752 9bb31ea8 Iustin Pop
    try:
2753 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2754 9bb31ea8 Iustin Pop
    except ValueError:
2755 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2756 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2757 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2758 a8083063 Iustin Pop
2759 a8083063 Iustin Pop
  def CheckPrereq(self):
2760 a8083063 Iustin Pop
    """Check prerequisites.
2761 a8083063 Iustin Pop

2762 a8083063 Iustin Pop
    This checks:
2763 a8083063 Iustin Pop
     - the node exists in the configuration
2764 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2765 a8083063 Iustin Pop
     - it's not the master
2766 a8083063 Iustin Pop

2767 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2768 a8083063 Iustin Pop

2769 a8083063 Iustin Pop
    """
2770 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2771 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2772 cf26a87a Iustin Pop
    assert node is not None
2773 a8083063 Iustin Pop
2774 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2775 a8083063 Iustin Pop
2776 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2777 a8083063 Iustin Pop
    if node.name == masternode:
2778 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2779 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2780 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
    for instance_name in instance_list:
2783 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2784 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2785 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2786 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2787 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2788 a8083063 Iustin Pop
    self.op.node_name = node.name
2789 a8083063 Iustin Pop
    self.node = node
2790 a8083063 Iustin Pop
2791 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2792 a8083063 Iustin Pop
    """Removes the node from the cluster.
2793 a8083063 Iustin Pop

2794 a8083063 Iustin Pop
    """
2795 a8083063 Iustin Pop
    node = self.node
2796 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2797 9a4f63d1 Iustin Pop
                 node.name)
2798 a8083063 Iustin Pop
2799 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2800 b989b9d9 Ken Wehr
2801 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2802 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2803 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2804 a8083063 Iustin Pop
2805 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2806 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2807 cd46f3b4 Luca Bigliardi
    try:
2808 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2809 3cb5c1e3 Luca Bigliardi
    except:
2810 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2811 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2812 cd46f3b4 Luca Bigliardi
2813 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2814 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2815 0623d351 Iustin Pop
    if msg:
2816 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2817 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2818 c8a0948f Michael Hanselmann
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2821 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2822 a8083063 Iustin Pop

2823 a8083063 Iustin Pop
  """
2824 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2825 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2826 35705d8f Guido Trotter
  REQ_BGL = False
2827 19bed813 Iustin Pop
2828 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2829 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2830 19bed813 Iustin Pop
2831 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2832 31bf511f Iustin Pop
    "dtotal", "dfree",
2833 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2834 31bf511f Iustin Pop
    "bootid",
2835 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2836 31bf511f Iustin Pop
    )
2837 31bf511f Iustin Pop
2838 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2839 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2840 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2841 31bf511f Iustin Pop
    "pip", "sip", "tags",
2842 0e67cdbe Iustin Pop
    "master",
2843 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2844 31bf511f Iustin Pop
    )
2845 a8083063 Iustin Pop
2846 35705d8f Guido Trotter
  def ExpandNames(self):
2847 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2848 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2849 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2850 a8083063 Iustin Pop
2851 35705d8f Guido Trotter
    self.needed_locks = {}
2852 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2853 c8d8b4c8 Iustin Pop
2854 c8d8b4c8 Iustin Pop
    if self.op.names:
2855 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2856 35705d8f Guido Trotter
    else:
2857 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2858 c8d8b4c8 Iustin Pop
2859 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2860 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2861 c8d8b4c8 Iustin Pop
    if self.do_locking:
2862 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2863 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2864 c8d8b4c8 Iustin Pop
2865 35705d8f Guido Trotter
  def CheckPrereq(self):
2866 35705d8f Guido Trotter
    """Check prerequisites.
2867 35705d8f Guido Trotter

2868 35705d8f Guido Trotter
    """
2869 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2870 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2871 c8d8b4c8 Iustin Pop
    pass
2872 a8083063 Iustin Pop
2873 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2874 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2875 a8083063 Iustin Pop

2876 a8083063 Iustin Pop
    """
2877 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2878 c8d8b4c8 Iustin Pop
    if self.do_locking:
2879 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2880 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2881 3fa93523 Guido Trotter
      nodenames = self.wanted
2882 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2883 3fa93523 Guido Trotter
      if missing:
2884 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2885 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2886 c8d8b4c8 Iustin Pop
    else:
2887 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2888 c1f1cbb2 Iustin Pop
2889 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2890 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2891 a8083063 Iustin Pop
2892 a8083063 Iustin Pop
    # begin data gathering
2893 a8083063 Iustin Pop
2894 bc8e4a1a Iustin Pop
    if self.do_node_query:
2895 a8083063 Iustin Pop
      live_data = {}
2896 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2897 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2898 a8083063 Iustin Pop
      for name in nodenames:
2899 781de953 Iustin Pop
        nodeinfo = node_data[name]
2900 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2901 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2902 d599d686 Iustin Pop
          fn = utils.TryConvert
2903 a8083063 Iustin Pop
          live_data[name] = {
2904 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2905 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2906 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2907 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2908 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2909 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2910 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2911 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2912 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2913 a8083063 Iustin Pop
            }
2914 a8083063 Iustin Pop
        else:
2915 a8083063 Iustin Pop
          live_data[name] = {}
2916 a8083063 Iustin Pop
    else:
2917 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2918 a8083063 Iustin Pop
2919 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2920 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2921 a8083063 Iustin Pop
2922 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2923 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2924 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2925 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2926 a8083063 Iustin Pop
2927 1122eb25 Iustin Pop
      for inst in inst_data.values():
2928 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2929 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2930 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2931 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2932 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2933 a8083063 Iustin Pop
2934 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2935 0e67cdbe Iustin Pop
2936 a8083063 Iustin Pop
    # end data gathering
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
    output = []
2939 a8083063 Iustin Pop
    for node in nodelist:
2940 a8083063 Iustin Pop
      node_output = []
2941 a8083063 Iustin Pop
      for field in self.op.output_fields:
2942 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2943 19bed813 Iustin Pop
          val = getattr(node, field)
2944 ec223efb Iustin Pop
        elif field == "pinst_list":
2945 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2946 ec223efb Iustin Pop
        elif field == "sinst_list":
2947 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2948 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2949 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2950 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2951 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2952 a8083063 Iustin Pop
        elif field == "pip":
2953 a8083063 Iustin Pop
          val = node.primary_ip
2954 a8083063 Iustin Pop
        elif field == "sip":
2955 a8083063 Iustin Pop
          val = node.secondary_ip
2956 130a6a6f Iustin Pop
        elif field == "tags":
2957 130a6a6f Iustin Pop
          val = list(node.GetTags())
2958 0e67cdbe Iustin Pop
        elif field == "master":
2959 0e67cdbe Iustin Pop
          val = node.name == master_node
2960 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2961 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2962 c120ff34 Iustin Pop
        elif field == "role":
2963 c120ff34 Iustin Pop
          if node.name == master_node:
2964 c120ff34 Iustin Pop
            val = "M"
2965 c120ff34 Iustin Pop
          elif node.master_candidate:
2966 c120ff34 Iustin Pop
            val = "C"
2967 c120ff34 Iustin Pop
          elif node.drained:
2968 c120ff34 Iustin Pop
            val = "D"
2969 c120ff34 Iustin Pop
          elif node.offline:
2970 c120ff34 Iustin Pop
            val = "O"
2971 c120ff34 Iustin Pop
          else:
2972 c120ff34 Iustin Pop
            val = "R"
2973 a8083063 Iustin Pop
        else:
2974 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2975 a8083063 Iustin Pop
        node_output.append(val)
2976 a8083063 Iustin Pop
      output.append(node_output)
2977 a8083063 Iustin Pop
2978 a8083063 Iustin Pop
    return output
2979 a8083063 Iustin Pop
2980 a8083063 Iustin Pop
2981 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2982 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2983 dcb93971 Michael Hanselmann

2984 dcb93971 Michael Hanselmann
  """
2985 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2986 21a15682 Guido Trotter
  REQ_BGL = False
2987 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2988 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2989 21a15682 Guido Trotter
2990 21a15682 Guido Trotter
  def ExpandNames(self):
2991 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2992 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2993 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2994 21a15682 Guido Trotter
2995 21a15682 Guido Trotter
    self.needed_locks = {}
2996 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2997 21a15682 Guido Trotter
    if not self.op.nodes:
2998 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2999 21a15682 Guido Trotter
    else:
3000 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
3001 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
3002 dcb93971 Michael Hanselmann
3003 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
3004 dcb93971 Michael Hanselmann
    """Check prerequisites.
3005 dcb93971 Michael Hanselmann

3006 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
3007 dcb93971 Michael Hanselmann

3008 dcb93971 Michael Hanselmann
    """
3009 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3010 dcb93971 Michael Hanselmann
3011 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
3012 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3013 dcb93971 Michael Hanselmann

3014 dcb93971 Michael Hanselmann
    """
3015 a7ba5e53 Iustin Pop
    nodenames = self.nodes
3016 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
3017 dcb93971 Michael Hanselmann
3018 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3019 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
3020 dcb93971 Michael Hanselmann
3021 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3022 dcb93971 Michael Hanselmann
3023 dcb93971 Michael Hanselmann
    output = []
3024 dcb93971 Michael Hanselmann
    for node in nodenames:
3025 10bfe6cb Iustin Pop
      nresult = volumes[node]
3026 10bfe6cb Iustin Pop
      if nresult.offline:
3027 10bfe6cb Iustin Pop
        continue
3028 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
3029 10bfe6cb Iustin Pop
      if msg:
3030 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3031 37d19eb2 Michael Hanselmann
        continue
3032 37d19eb2 Michael Hanselmann
3033 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
3034 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
3035 dcb93971 Michael Hanselmann
3036 dcb93971 Michael Hanselmann
      for vol in node_vols:
3037 dcb93971 Michael Hanselmann
        node_output = []
3038 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
3039 dcb93971 Michael Hanselmann
          if field == "node":
3040 dcb93971 Michael Hanselmann
            val = node
3041 dcb93971 Michael Hanselmann
          elif field == "phys":
3042 dcb93971 Michael Hanselmann
            val = vol['dev']
3043 dcb93971 Michael Hanselmann
          elif field == "vg":
3044 dcb93971 Michael Hanselmann
            val = vol['vg']
3045 dcb93971 Michael Hanselmann
          elif field == "name":
3046 dcb93971 Michael Hanselmann
            val = vol['name']
3047 dcb93971 Michael Hanselmann
          elif field == "size":
3048 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
3049 dcb93971 Michael Hanselmann
          elif field == "instance":
3050 dcb93971 Michael Hanselmann
            for inst in ilist:
3051 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
3052 dcb93971 Michael Hanselmann
                continue
3053 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
3054 dcb93971 Michael Hanselmann
                val = inst.name
3055 dcb93971 Michael Hanselmann
                break
3056 dcb93971 Michael Hanselmann
            else:
3057 dcb93971 Michael Hanselmann
              val = '-'
3058 dcb93971 Michael Hanselmann
          else:
3059 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
3060 dcb93971 Michael Hanselmann
          node_output.append(str(val))
3061 dcb93971 Michael Hanselmann
3062 dcb93971 Michael Hanselmann
        output.append(node_output)
3063 dcb93971 Michael Hanselmann
3064 dcb93971 Michael Hanselmann
    return output
3065 dcb93971 Michael Hanselmann
3066 dcb93971 Michael Hanselmann
3067 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
3068 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
3069 9e5442ce Michael Hanselmann

3070 9e5442ce Michael Hanselmann
  """
3071 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3072 9e5442ce Michael Hanselmann
  REQ_BGL = False
3073 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3074 9e5442ce Michael Hanselmann
3075 9e5442ce Michael Hanselmann
  def ExpandNames(self):
3076 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
3077 9e5442ce Michael Hanselmann
3078 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
3079 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
3080 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3081 9e5442ce Michael Hanselmann
3082 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
3083 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3084 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
3085 9e5442ce Michael Hanselmann
3086 9e5442ce Michael Hanselmann
    self.needed_locks = {}
3087 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
3088 9e5442ce Michael Hanselmann
3089 9e5442ce Michael Hanselmann
    if self.op.nodes:
3090 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
3091 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
3092 9e5442ce Michael Hanselmann
    else:
3093 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3094 9e5442ce Michael Hanselmann
3095 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
3096 9e5442ce Michael Hanselmann
    """Check prerequisites.
3097 9e5442ce Michael Hanselmann

3098 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
3099 9e5442ce Michael Hanselmann

3100 9e5442ce Michael Hanselmann
    """
3101 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
3102 9e5442ce Michael Hanselmann
3103 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3104 9e5442ce Michael Hanselmann
3105 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
3106 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
3107 9e5442ce Michael Hanselmann

3108 9e5442ce Michael Hanselmann
    """
3109 9e5442ce Michael Hanselmann
    # Always get name to sort by
3110 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
3111 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
3112 9e5442ce Michael Hanselmann
    else:
3113 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
3114 9e5442ce Michael Hanselmann
3115 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
3116 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3117 620a85fd Iustin Pop
      while extra in fields:
3118 620a85fd Iustin Pop
        fields.remove(extra)
3119 9e5442ce Michael Hanselmann
3120 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3121 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
3122 9e5442ce Michael Hanselmann
3123 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3124 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
3125 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
3126 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
3127 9e5442ce Michael Hanselmann
3128 9e5442ce Michael Hanselmann
    result = []
3129 9e5442ce Michael Hanselmann
3130 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
3131 9e5442ce Michael Hanselmann
      nresult = data[node]
3132 9e5442ce Michael Hanselmann
      if nresult.offline:
3133 9e5442ce Michael Hanselmann
        continue
3134 9e5442ce Michael Hanselmann
3135 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
3136 9e5442ce Michael Hanselmann
      if msg:
3137 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3138 9e5442ce Michael Hanselmann
        continue
3139 9e5442ce Michael Hanselmann
3140 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3141 9e5442ce Michael Hanselmann
3142 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
3143 9e5442ce Michael Hanselmann
        row = rows[name]
3144 9e5442ce Michael Hanselmann
3145 9e5442ce Michael Hanselmann
        out = []
3146 9e5442ce Michael Hanselmann
3147 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
3148 620a85fd Iustin Pop
          if field == constants.SF_NODE:
3149 9e5442ce Michael Hanselmann
            val = node
3150 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
3151 620a85fd Iustin Pop
            val = self.op.storage_type
3152 9e5442ce Michael Hanselmann
          elif field in field_idx:
3153 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
3154 9e5442ce Michael Hanselmann
          else:
3155 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
3156 9e5442ce Michael Hanselmann
3157 9e5442ce Michael Hanselmann
          out.append(val)
3158 9e5442ce Michael Hanselmann
3159 9e5442ce Michael Hanselmann
        result.append(out)
3160 9e5442ce Michael Hanselmann
3161 9e5442ce Michael Hanselmann
    return result
3162 9e5442ce Michael Hanselmann
3163 9e5442ce Michael Hanselmann
3164 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
3165 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
3166 efb8da02 Michael Hanselmann

3167 efb8da02 Michael Hanselmann
  """
3168 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3169 efb8da02 Michael Hanselmann
  REQ_BGL = False
3170 efb8da02 Michael Hanselmann
3171 efb8da02 Michael Hanselmann
  def CheckArguments(self):
3172 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3173 efb8da02 Michael Hanselmann
3174 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3175 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
3176 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
3177 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3178 efb8da02 Michael Hanselmann
3179 efb8da02 Michael Hanselmann
  def ExpandNames(self):
3180 efb8da02 Michael Hanselmann
    self.needed_locks = {
3181 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
3182 efb8da02 Michael Hanselmann
      }
3183 efb8da02 Michael Hanselmann
3184 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
3185 efb8da02 Michael Hanselmann
    """Check prerequisites.
3186 efb8da02 Michael Hanselmann

3187 efb8da02 Michael Hanselmann
    """
3188 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3189 efb8da02 Michael Hanselmann
3190 efb8da02 Michael Hanselmann
    try:
3191 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3192 efb8da02 Michael Hanselmann
    except KeyError:
3193 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3194 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
3195 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3196 efb8da02 Michael Hanselmann
3197 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
3198 efb8da02 Michael Hanselmann
    if diff:
3199 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
3200 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
3201 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
3202 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3203 efb8da02 Michael Hanselmann
3204 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
3205 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3206 efb8da02 Michael Hanselmann

3207 efb8da02 Michael Hanselmann
    """
3208 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3209 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
3210 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
3211 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
3212 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
3213 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
3214 efb8da02 Michael Hanselmann
3215 efb8da02 Michael Hanselmann
3216 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
3217 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
3218 a8083063 Iustin Pop

3219 a8083063 Iustin Pop
  """
3220 a8083063 Iustin Pop
  HPATH = "node-add"
3221 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3222 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
3223 a8083063 Iustin Pop
3224 44caf5a8 Iustin Pop
  def CheckArguments(self):
3225 44caf5a8 Iustin Pop
    # validate/normalize the node name
3226 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3227 44caf5a8 Iustin Pop
3228 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3229 a8083063 Iustin Pop
    """Build hooks env.
3230 a8083063 Iustin Pop

3231 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
3232 a8083063 Iustin Pop

3233 a8083063 Iustin Pop
    """
3234 a8083063 Iustin Pop
    env = {
3235 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
3236 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
3237 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
3238 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
3239 a8083063 Iustin Pop
      }
3240 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
3241 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
3242 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
  def CheckPrereq(self):
3245 a8083063 Iustin Pop
    """Check prerequisites.
3246 a8083063 Iustin Pop

3247 a8083063 Iustin Pop
    This checks:
3248 a8083063 Iustin Pop
     - the new node is not already in the config
3249 a8083063 Iustin Pop
     - it is resolvable
3250 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
3251 a8083063 Iustin Pop

3252 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
3253 a8083063 Iustin Pop

3254 a8083063 Iustin Pop
    """
3255 a8083063 Iustin Pop
    node_name = self.op.node_name
3256 a8083063 Iustin Pop
    cfg = self.cfg
3257 a8083063 Iustin Pop
3258 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
3259 a8083063 Iustin Pop
3260 bcf043c9 Iustin Pop
    node = dns_data.name
3261 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
3262 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
3263 a8083063 Iustin Pop
    if secondary_ip is None:
3264 a8083063 Iustin Pop
      secondary_ip = primary_ip
3265 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
3266 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
3267 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3268 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
3269 e7c6e02b Michael Hanselmann
3270 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
3271 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
3272 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3273 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
3274 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
3275 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3276 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
    for existing_node_name in node_list:
3279 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
3280 e7c6e02b Michael Hanselmann
3281 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
3282 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
3283 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
3284 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3285 5c983ee5 Iustin Pop
                                     " address configuration as before",
3286 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3287 e7c6e02b Michael Hanselmann
        continue
3288 e7c6e02b Michael Hanselmann
3289 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
3290 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
3291 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
3292 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3293 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3294 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3295 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3296 a8083063 Iustin Pop
3297 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3298 a8083063 Iustin Pop
    # same as for the master
3299 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3300 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3301 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3302 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3303 a8083063 Iustin Pop
      if master_singlehomed:
3304 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3305 5c983ee5 Iustin Pop
                                   " new node has one",
3306 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3307 a8083063 Iustin Pop
      else:
3308 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3309 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3310 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3311 a8083063 Iustin Pop
3312 5bbd3f7f Michael Hanselmann
    # checks reachability
3313 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3314 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3315 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3316 a8083063 Iustin Pop
3317 a8083063 Iustin Pop
    if not newbie_singlehomed:
3318 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3319 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3320 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3321 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3322 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3323 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3324 a8083063 Iustin Pop
3325 a8ae3eb5 Iustin Pop
    if self.op.readd:
3326 a8ae3eb5 Iustin Pop
      exceptions = [node]
3327 a8ae3eb5 Iustin Pop
    else:
3328 a8ae3eb5 Iustin Pop
      exceptions = []
3329 6d7e1f20 Guido Trotter
3330 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3331 0fff97e9 Guido Trotter
3332 a8ae3eb5 Iustin Pop
    if self.op.readd:
3333 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3334 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3335 a8ae3eb5 Iustin Pop
    else:
3336 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3337 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3338 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3339 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3340 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3341 a8083063 Iustin Pop
3342 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3343 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3344 a8083063 Iustin Pop

3345 a8083063 Iustin Pop
    """
3346 a8083063 Iustin Pop
    new_node = self.new_node
3347 a8083063 Iustin Pop
    node = new_node.name
3348 a8083063 Iustin Pop
3349 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3350 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3351 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3352 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3353 a8ae3eb5 Iustin Pop
    if self.op.readd:
3354 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3355 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3356 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3357 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3358 a8ae3eb5 Iustin Pop
3359 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3360 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3361 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3362 a8ae3eb5 Iustin Pop
3363 a8083063 Iustin Pop
    # check connectivity
3364 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3365 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3366 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3367 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3368 90b54c26 Iustin Pop
                   node, result.payload)
3369 a8083063 Iustin Pop
    else:
3370 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3371 90b54c26 Iustin Pop
                               " node version %s" %
3372 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3373 a8083063 Iustin Pop
3374 a8083063 Iustin Pop
    # setup ssh on node
3375 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3376 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3377 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3378 b989b9d9 Ken Wehr
      keyarray = []
3379 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3380 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3381 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3382 b989b9d9 Ken Wehr
3383 b989b9d9 Ken Wehr
      for i in keyfiles:
3384 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3385 b989b9d9 Ken Wehr
3386 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3387 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3388 b989b9d9 Ken Wehr
                                      keyarray[5])
3389 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3390 a8083063 Iustin Pop
3391 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3392 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3393 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3394 c8a0948f Michael Hanselmann
3395 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3396 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3397 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3398 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3399 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3400 c2fc8250 Iustin Pop
      if not result.payload:
3401 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3402 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3403 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3404 a8083063 Iustin Pop
3405 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3406 5c0527ed Guido Trotter
    node_verify_param = {
3407 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3408 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3409 5c0527ed Guido Trotter
    }
3410 5c0527ed Guido Trotter
3411 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3412 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3413 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3414 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3415 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3416 6f68a739 Iustin Pop
      if nl_payload:
3417 6f68a739 Iustin Pop
        for failed in nl_payload:
3418 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3419 31821208 Iustin Pop
                      " (checking from %s): %s" %
3420 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3421 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3422 ff98055b Iustin Pop
3423 d8470559 Michael Hanselmann
    if self.op.readd:
3424 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3425 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3426 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3427 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3428 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3429 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3430 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3431 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3432 a8ae3eb5 Iustin Pop
        if msg:
3433 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3434 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3435 d8470559 Michael Hanselmann
    else:
3436 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3437 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
3440 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3441 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3442 b31c8676 Iustin Pop

3443 b31c8676 Iustin Pop
  """
3444 b31c8676 Iustin Pop
  HPATH = "node-modify"
3445 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3446 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3447 b31c8676 Iustin Pop
  REQ_BGL = False
3448 b31c8676 Iustin Pop
3449 b31c8676 Iustin Pop
  def CheckArguments(self):
3450 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3451 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3452 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3453 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3454 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3455 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3456 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3457 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3458 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3459 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3460 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3461 5c983ee5 Iustin Pop
                                 " state at the same time",
3462 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3463 b31c8676 Iustin Pop
3464 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3465 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3466 601908d0 Iustin Pop
                             self.op.drained == True)
3467 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3468 601908d0 Iustin Pop
                               self.op.drained == False)
3469 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3470 601908d0 Iustin Pop
                         self.offline_or_drain)
3471 601908d0 Iustin Pop
3472 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3473 601908d0 Iustin Pop
3474 601908d0 Iustin Pop
3475 b31c8676 Iustin Pop
  def ExpandNames(self):
3476 601908d0 Iustin Pop
    if self.lock_all:
3477 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3478 601908d0 Iustin Pop
    else:
3479 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3480 b31c8676 Iustin Pop
3481 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3482 b31c8676 Iustin Pop
    """Build hooks env.
3483 b31c8676 Iustin Pop

3484 b31c8676 Iustin Pop
    This runs on the master node.
3485 b31c8676 Iustin Pop

3486 b31c8676 Iustin Pop
    """
3487 b31c8676 Iustin Pop
    env = {
3488 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3489 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3490 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3491 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3492 b31c8676 Iustin Pop
      }
3493 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3494 b31c8676 Iustin Pop
          self.op.node_name]
3495 b31c8676 Iustin Pop
    return env, nl, nl
3496 b31c8676 Iustin Pop
3497 b31c8676 Iustin Pop
  def CheckPrereq(self):
3498 b31c8676 Iustin Pop
    """Check prerequisites.
3499 b31c8676 Iustin Pop

3500 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3501 b31c8676 Iustin Pop

3502 b31c8676 Iustin Pop
    """
3503 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3504 b31c8676 Iustin Pop
3505 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3506 97c61d46 Iustin Pop
        self.op.drained is not None or
3507 97c61d46 Iustin Pop
        self.op.offline is not None):
3508 97c61d46 Iustin Pop
      # we can't change the master's node flags
3509 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3510 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3511 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3512 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3513 97c61d46 Iustin Pop
3514 601908d0 Iustin Pop
3515 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3516 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3517 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3518 601908d0 Iustin Pop
      # candidates
3519 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3520 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3521 8fe9239e Iustin Pop
      if mc_remaining < mc_should:
3522 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3523 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3524 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3525 3e83dd48 Iustin Pop
3526 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3527 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3528 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3529 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3530 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3531 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3532 3a5ba66a Iustin Pop
3533 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3534 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3535 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3536 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3537 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3538 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3539 3d9eb52b Guido Trotter
3540 b31c8676 Iustin Pop
    return
3541 b31c8676 Iustin Pop
3542 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3543 b31c8676 Iustin Pop
    """Modifies a node.
3544 b31c8676 Iustin Pop

3545 b31c8676 Iustin Pop
    """
3546 3a5ba66a Iustin Pop
    node = self.node
3547 b31c8676 Iustin Pop
3548 b31c8676 Iustin Pop
    result = []
3549 c9d443ea Iustin Pop
    changed_mc = False
3550 b31c8676 Iustin Pop
3551 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3552 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3553 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3554 c9d443ea Iustin Pop
      if self.op.offline == True:
3555 c9d443ea Iustin Pop
        if node.master_candidate:
3556 c9d443ea Iustin Pop
          node.master_candidate = False
3557 c9d443ea Iustin Pop
          changed_mc = True
3558 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3559 c9d443ea Iustin Pop
        if node.drained:
3560 c9d443ea Iustin Pop
          node.drained = False
3561 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3562 3a5ba66a Iustin Pop
3563 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3564 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3565 c9d443ea Iustin Pop
      changed_mc = True
3566 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3567 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3568 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3569 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3570 0959c824 Iustin Pop
        if msg:
3571 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3572 b31c8676 Iustin Pop
3573 c9d443ea Iustin Pop
    if self.op.drained is not None:
3574 c9d443ea Iustin Pop
      node.drained = self.op.drained
3575 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3576 c9d443ea Iustin Pop
      if self.op.drained == True:
3577 c9d443ea Iustin Pop
        if node.master_candidate:
3578 c9d443ea Iustin Pop
          node.master_candidate = False
3579 c9d443ea Iustin Pop
          changed_mc = True
3580 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3581 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3582 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3583 dec0d9da Iustin Pop
          if msg:
3584 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3585 c9d443ea Iustin Pop
        if node.offline:
3586 c9d443ea Iustin Pop
          node.offline = False
3587 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3588 c9d443ea Iustin Pop
3589 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3590 601908d0 Iustin Pop
    if self.lock_all:
3591 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3592 601908d0 Iustin Pop
3593 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3594 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3595 601908d0 Iustin Pop
3596 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3597 c9d443ea Iustin Pop
    if changed_mc:
3598 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3599 b31c8676 Iustin Pop
3600 b31c8676 Iustin Pop
    return result
3601 b31c8676 Iustin Pop
3602 b31c8676 Iustin Pop
3603 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3604 f5118ade Iustin Pop
  """Powercycles a node.
3605 f5118ade Iustin Pop

3606 f5118ade Iustin Pop
  """
3607 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3608 f5118ade Iustin Pop
  REQ_BGL = False
3609 f5118ade Iustin Pop
3610 f5118ade Iustin Pop
  def CheckArguments(self):
3611 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3612 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3613 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3614 5c983ee5 Iustin Pop
                                 " parameter was not set",
3615 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3616 f5118ade Iustin Pop
3617 f5118ade Iustin Pop
  def ExpandNames(self):
3618 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3619 f5118ade Iustin Pop

3620 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3621 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3622 f5118ade Iustin Pop

3623 f5118ade Iustin Pop
    """
3624 f5118ade Iustin Pop
    self.needed_locks = {}
3625 f5118ade Iustin Pop
3626 f5118ade Iustin Pop
  def CheckPrereq(self):
3627 f5118ade Iustin Pop
    """Check prerequisites.
3628 f5118ade Iustin Pop

3629 f5118ade Iustin Pop
    This LU has no prereqs.
3630 f5118ade Iustin Pop

3631 f5118ade Iustin Pop
    """
3632 f5118ade Iustin Pop
    pass
3633 f5118ade Iustin Pop
3634 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3635 f5118ade Iustin Pop
    """Reboots a node.
3636 f5118ade Iustin Pop

3637 f5118ade Iustin Pop
    """
3638 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3639 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3640 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3641 f5118ade Iustin Pop
    return result.payload
3642 f5118ade Iustin Pop
3643 f5118ade Iustin Pop
3644 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3645 a8083063 Iustin Pop
  """Query cluster configuration.
3646 a8083063 Iustin Pop

3647 a8083063 Iustin Pop
  """
3648 a8083063 Iustin Pop
  _OP_REQP = []
3649 642339cf Guido Trotter
  REQ_BGL = False
3650 642339cf Guido Trotter
3651 642339cf Guido Trotter
  def ExpandNames(self):
3652 642339cf Guido Trotter
    self.needed_locks = {}
3653 a8083063 Iustin Pop
3654 a8083063 Iustin Pop
  def CheckPrereq(self):
3655 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3656 a8083063 Iustin Pop

3657 a8083063 Iustin Pop
    """
3658 a8083063 Iustin Pop
    pass
3659 a8083063 Iustin Pop
3660 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3661 a8083063 Iustin Pop
    """Return cluster config.
3662 a8083063 Iustin Pop

3663 a8083063 Iustin Pop
    """
3664 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3665 17463d22 Renรฉ Nussbaumer
    os_hvp = {}
3666 17463d22 Renรฉ Nussbaumer
3667 17463d22 Renรฉ Nussbaumer
    # Filter just for enabled hypervisors
3668 17463d22 Renรฉ Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3669 17463d22 Renรฉ Nussbaumer
      os_hvp[os_name] = {}
3670 17463d22 Renรฉ Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3671 17463d22 Renรฉ Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3672 17463d22 Renรฉ Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3673 17463d22 Renรฉ Nussbaumer
3674 a8083063 Iustin Pop
    result = {
3675 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3676 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3677 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3678 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3679 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3680 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3681 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3682 469f88e1 Iustin Pop
      "master": cluster.master_node,
3683 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3684 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3685 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3686 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3687 17463d22 Renรฉ Nussbaumer
      "os_hvp": os_hvp,
3688 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3689 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3690 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3691 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3692 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3693 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3694 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3695 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3696 259578eb Iustin Pop
      "uuid": cluster.uuid,
3697 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3698 a8083063 Iustin Pop
      }
3699 a8083063 Iustin Pop
3700 a8083063 Iustin Pop
    return result
3701 a8083063 Iustin Pop
3702 a8083063 Iustin Pop
3703 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3704 ae5849b5 Michael Hanselmann
  """Return configuration values.
3705 a8083063 Iustin Pop

3706 a8083063 Iustin Pop
  """
3707 a8083063 Iustin Pop
  _OP_REQP = []
3708 642339cf Guido Trotter
  REQ_BGL = False
3709 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3710 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3711 05e50653 Michael Hanselmann
                                  "watcher_pause")
3712 642339cf Guido Trotter
3713 642339cf Guido Trotter
  def ExpandNames(self):
3714 642339cf Guido Trotter
    self.needed_locks = {}
3715 a8083063 Iustin Pop
3716 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3717 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3718 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3719 ae5849b5 Michael Hanselmann
3720 a8083063 Iustin Pop
  def CheckPrereq(self):
3721 a8083063 Iustin Pop
    """No prerequisites.
3722 a8083063 Iustin Pop

3723 a8083063 Iustin Pop
    """
3724 a8083063 Iustin Pop
    pass
3725 a8083063 Iustin Pop
3726 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3727 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3728 a8083063 Iustin Pop

3729 a8083063 Iustin Pop
    """
3730 ae5849b5 Michael Hanselmann
    values = []
3731 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3732 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3733 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3734 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3735 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3736 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3737 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3738 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3739 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3740 ae5849b5 Michael Hanselmann
      else:
3741 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3742 3ccafd0e Iustin Pop
      values.append(entry)
3743 ae5849b5 Michael Hanselmann
    return values
3744 a8083063 Iustin Pop
3745 a8083063 Iustin Pop
3746 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3747 a8083063 Iustin Pop
  """Bring up an instance's disks.
3748 a8083063 Iustin Pop

3749 a8083063 Iustin Pop
  """
3750 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3751 f22a8ba3 Guido Trotter
  REQ_BGL = False
3752 f22a8ba3 Guido Trotter
3753 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3754 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3755 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3756 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3757 f22a8ba3 Guido Trotter
3758 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3759 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3760 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3761 a8083063 Iustin Pop
3762 a8083063 Iustin Pop
  def CheckPrereq(self):
3763 a8083063 Iustin Pop
    """Check prerequisites.
3764 a8083063 Iustin Pop

3765 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3766 a8083063 Iustin Pop

3767 a8083063 Iustin Pop
    """
3768 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3769 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3770 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3771 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3772 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3773 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3774 a8083063 Iustin Pop
3775 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3776 a8083063 Iustin Pop
    """Activate the disks.
3777 a8083063 Iustin Pop

3778 a8083063 Iustin Pop
    """
3779 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3780 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3781 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3782 a8083063 Iustin Pop
    if not disks_ok:
3783 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
    return disks_info
3786 a8083063 Iustin Pop
3787 a8083063 Iustin Pop
3788 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3789 e3443b36 Iustin Pop
                           ignore_size=False):
3790 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3791 a8083063 Iustin Pop

3792 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3793 a8083063 Iustin Pop

3794 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3795 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3796 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3797 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3798 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3799 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3800 e4376078 Iustin Pop
      won't result in an error return from the function
3801 e3443b36 Iustin Pop
  @type ignore_size: boolean
3802 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3803 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3804 e3443b36 Iustin Pop
      when the size is wrong
3805 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3806 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3807 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3808 a8083063 Iustin Pop

3809 a8083063 Iustin Pop
  """
3810 a8083063 Iustin Pop
  device_info = []
3811 a8083063 Iustin Pop
  disks_ok = True
3812 fdbd668d Iustin Pop
  iname = instance.name
3813 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3814 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3815 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3816 fdbd668d Iustin Pop
3817 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3818 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3819 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3820 fdbd668d Iustin Pop
  # SyncSource, etc.)
3821 fdbd668d Iustin Pop
3822 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3823 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3824 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3825 e3443b36 Iustin Pop
      if ignore_size:
3826 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3827 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3828 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3829 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3830 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3831 53c14ef1 Iustin Pop
      if msg:
3832 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3833 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3834 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3835 fdbd668d Iustin Pop
        if not ignore_secondaries:
3836 a8083063 Iustin Pop
          disks_ok = False
3837 fdbd668d Iustin Pop
3838 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3839 fdbd668d Iustin Pop
3840 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3841 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3842 d52ea991 Michael Hanselmann
    dev_path = None
3843 d52ea991 Michael Hanselmann
3844 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3845 fdbd668d Iustin Pop
      if node != instance.primary_node:
3846 fdbd668d Iustin Pop
        continue
3847 e3443b36 Iustin Pop
      if ignore_size:
3848 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3849 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3850 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3851 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3852 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3853 53c14ef1 Iustin Pop
      if msg:
3854 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3855 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3856 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3857 fdbd668d Iustin Pop
        disks_ok = False
3858 d52ea991 Michael Hanselmann
      else:
3859 d52ea991 Michael Hanselmann
        dev_path = result.payload
3860 d52ea991 Michael Hanselmann
3861 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3862 a8083063 Iustin Pop
3863 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3864 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3865 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3866 b352ab5b Iustin Pop
  for disk in instance.disks:
3867 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3868 b352ab5b Iustin Pop
3869 a8083063 Iustin Pop
  return disks_ok, device_info
3870 a8083063 Iustin Pop
3871 a8083063 Iustin Pop
3872 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3873 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3874 3ecf6786 Iustin Pop

3875 3ecf6786 Iustin Pop
  """
3876 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3877 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3878 fe7b0351 Michael Hanselmann
  if not disks_ok:
3879 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3880 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3881 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3882 86d9d3bb Iustin Pop
                         " secondary node,"
3883 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3884 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3885 fe7b0351 Michael Hanselmann
3886 fe7b0351 Michael Hanselmann
3887 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3888 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3889 a8083063 Iustin Pop

3890 a8083063 Iustin Pop
  """
3891 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3892 f22a8ba3 Guido Trotter
  REQ_BGL = False
3893 f22a8ba3 Guido Trotter
3894 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3895 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3896 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3897 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3898 f22a8ba3 Guido Trotter
3899 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3900 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3901 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3902 a8083063 Iustin Pop
3903 a8083063 Iustin Pop
  def CheckPrereq(self):
3904 a8083063 Iustin Pop
    """Check prerequisites.
3905 a8083063 Iustin Pop

3906 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3907 a8083063 Iustin Pop

3908 a8083063 Iustin Pop
    """
3909 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3910 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3911 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3912 a8083063 Iustin Pop
3913 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3914 a8083063 Iustin Pop
    """Deactivate the disks
3915 a8083063 Iustin Pop

3916 a8083063 Iustin Pop
    """
3917 a8083063 Iustin Pop
    instance = self.instance
3918 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3919 a8083063 Iustin Pop
3920 a8083063 Iustin Pop
3921 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3922 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3923 155d6c75 Guido Trotter

3924 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3925 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3926 155d6c75 Guido Trotter

3927 155d6c75 Guido Trotter
  """
3928 31624382 Iustin Pop
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3929 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3930 a8083063 Iustin Pop
3931 a8083063 Iustin Pop
3932 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3933 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3934 a8083063 Iustin Pop

3935 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3936 a8083063 Iustin Pop

3937 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3938 a8083063 Iustin Pop
  ignored.
3939 a8083063 Iustin Pop

3940 a8083063 Iustin Pop
  """
3941 cacfd1fd Iustin Pop
  all_result = True
3942 a8083063 Iustin Pop
  for disk in instance.disks:
3943 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3944 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3945 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3946 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3947 cacfd1fd Iustin Pop
      if msg:
3948 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3949 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3950 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3951 cacfd1fd Iustin Pop
          all_result = False
3952 cacfd1fd Iustin Pop
  return all_result
3953 a8083063 Iustin Pop
3954 a8083063 Iustin Pop
3955 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3956 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3957 d4f16fd9 Iustin Pop

3958 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3959 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3960 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3961 d4f16fd9 Iustin Pop
  exception.
3962 d4f16fd9 Iustin Pop

3963 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3964 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3965 e69d05fd Iustin Pop
  @type node: C{str}
3966 e69d05fd Iustin Pop
  @param node: the node to check
3967 e69d05fd Iustin Pop
  @type reason: C{str}
3968 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3969 e69d05fd Iustin Pop
  @type requested: C{int}
3970 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3971 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3972 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3973 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3974 e69d05fd Iustin Pop
      we cannot check the node
3975 d4f16fd9 Iustin Pop

3976 d4f16fd9 Iustin Pop
  """
3977 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3978 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
3979 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
3980 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3981 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3982 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3983 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
3984 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
3985 d4f16fd9 Iustin Pop
  if requested > free_mem:
3986 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3987 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3988 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
3989 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
3990 d4f16fd9 Iustin Pop
3991 d4f16fd9 Iustin Pop
3992 701384a9 Iustin Pop
def _CheckNodesFreeDisk(lu, nodenames, requested):
3993 701384a9 Iustin Pop
  """Checks if nodes have enough free disk space in the default VG.
3994 701384a9 Iustin Pop

3995 701384a9 Iustin Pop
  This function check if all given nodes have the needed amount of
3996 701384a9 Iustin Pop
  free disk. In case any node has less disk or we cannot get the
3997 701384a9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3998 701384a9 Iustin Pop
  exception.
3999 701384a9 Iustin Pop

4000 701384a9 Iustin Pop
  @type lu: C{LogicalUnit}
4001 701384a9 Iustin Pop
  @param lu: a logical unit from which we get configuration data
4002 701384a9 Iustin Pop
  @type nodenames: C{list}
4003 3a488770 Iustin Pop
  @param nodenames: the list of node names to check
4004 701384a9 Iustin Pop
  @type requested: C{int}
4005 701384a9 Iustin Pop
  @param requested: the amount of disk in MiB to check for
4006 701384a9 Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4007 701384a9 Iustin Pop
      we cannot check the node
4008 701384a9 Iustin Pop

4009 701384a9 Iustin Pop
  """
4010 701384a9 Iustin Pop
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4011 701384a9 Iustin Pop
                                   lu.cfg.GetHypervisorType())
4012 701384a9 Iustin Pop
  for node in nodenames:
4013 701384a9 Iustin Pop
    info = nodeinfo[node]
4014 701384a9 Iustin Pop
    info.Raise("Cannot get current information from node %s" % node,
4015 701384a9 Iustin Pop
               prereq=True, ecode=errors.ECODE_ENVIRON)
4016 701384a9 Iustin Pop
    vg_free = info.payload.get("vg_free", None)
4017 701384a9 Iustin Pop
    if not isinstance(vg_free, int):
4018 701384a9 Iustin Pop
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4019 701384a9 Iustin Pop
                                 " result was '%s'" % (node, vg_free),
4020 701384a9 Iustin Pop
                                 errors.ECODE_ENVIRON)
4021 701384a9 Iustin Pop
    if requested > vg_free:
4022 701384a9 Iustin Pop
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4023 701384a9 Iustin Pop
                                 " required %d MiB, available %d MiB" %
4024 701384a9 Iustin Pop
                                 (node, requested, vg_free),
4025 701384a9 Iustin Pop
                                 errors.ECODE_NORES)
4026 701384a9 Iustin Pop
4027 701384a9 Iustin Pop
4028 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
4029 a8083063 Iustin Pop
  """Starts an instance.
4030 a8083063 Iustin Pop

4031 a8083063 Iustin Pop
  """
4032 a8083063 Iustin Pop
  HPATH = "instance-start"
4033 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4034 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
4035 e873317a Guido Trotter
  REQ_BGL = False
4036 e873317a Guido Trotter
4037 e873317a Guido Trotter
  def ExpandNames(self):
4038 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4039 a8083063 Iustin Pop
4040 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4041 a8083063 Iustin Pop
    """Build hooks env.
4042 a8083063 Iustin Pop

4043 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4044 a8083063 Iustin Pop

4045 a8083063 Iustin Pop
    """
4046 a8083063 Iustin Pop
    env = {
4047 a8083063 Iustin Pop
      "FORCE": self.op.force,
4048 a8083063 Iustin Pop
      }
4049 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4050 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4051 a8083063 Iustin Pop
    return env, nl, nl
4052 a8083063 Iustin Pop
4053 a8083063 Iustin Pop
  def CheckPrereq(self):
4054 a8083063 Iustin Pop
    """Check prerequisites.
4055 a8083063 Iustin Pop

4056 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4057 a8083063 Iustin Pop

4058 a8083063 Iustin Pop
    """
4059 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4060 e873317a Guido Trotter
    assert self.instance is not None, \
4061 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4062 a8083063 Iustin Pop
4063 d04aaa2f Iustin Pop
    # extra beparams
4064 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
4065 d04aaa2f Iustin Pop
    if self.beparams:
4066 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
4067 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4068 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
4069 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4070 d04aaa2f Iustin Pop
      # fill the beparams dict
4071 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4072 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
4073 d04aaa2f Iustin Pop
4074 d04aaa2f Iustin Pop
    # extra hvparams
4075 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
4076 d04aaa2f Iustin Pop
    if self.hvparams:
4077 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
4078 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4079 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
4080 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4081 d04aaa2f Iustin Pop
4082 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
4083 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4084 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4085 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
4086 d04aaa2f Iustin Pop
                                    instance.hvparams)
4087 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
4088 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4089 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
4090 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4091 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
4092 d04aaa2f Iustin Pop
4093 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4094 7527a8a4 Iustin Pop
4095 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4096 5bbd3f7f Michael Hanselmann
    # check bridges existence
4097 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4098 a8083063 Iustin Pop
4099 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4100 f1926756 Guido Trotter
                                              instance.name,
4101 f1926756 Guido Trotter
                                              instance.hypervisor)
4102 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4103 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4104 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
4105 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
4106 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
4107 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
4108 d4f16fd9 Iustin Pop
4109 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4110 a8083063 Iustin Pop
    """Start the instance.
4111 a8083063 Iustin Pop

4112 a8083063 Iustin Pop
    """
4113 a8083063 Iustin Pop
    instance = self.instance
4114 a8083063 Iustin Pop
    force = self.op.force
4115 a8083063 Iustin Pop
4116 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
4117 fe482621 Iustin Pop
4118 a8083063 Iustin Pop
    node_current = instance.primary_node
4119 a8083063 Iustin Pop
4120 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
4121 a8083063 Iustin Pop
4122 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
4123 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
4124 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4125 dd279568 Iustin Pop
    if msg:
4126 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4127 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
4128 a8083063 Iustin Pop
4129 a8083063 Iustin Pop
4130 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
4131 bf6929a2 Alexander Schreiber
  """Reboot an instance.
4132 bf6929a2 Alexander Schreiber

4133 bf6929a2 Alexander Schreiber
  """
4134 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
4135 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
4136 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4137 e873317a Guido Trotter
  REQ_BGL = False
4138 e873317a Guido Trotter
4139 17c3f802 Guido Trotter
  def CheckArguments(self):
4140 17c3f802 Guido Trotter
    """Check the arguments.
4141 17c3f802 Guido Trotter

4142 17c3f802 Guido Trotter
    """
4143 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4144 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4145 17c3f802 Guido Trotter
4146 e873317a Guido Trotter
  def ExpandNames(self):
4147 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4148 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4149 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
4150 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
4151 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
4152 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4153 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
4154 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4155 bf6929a2 Alexander Schreiber
4156 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
4157 bf6929a2 Alexander Schreiber
    """Build hooks env.
4158 bf6929a2 Alexander Schreiber

4159 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
4160 bf6929a2 Alexander Schreiber

4161 bf6929a2 Alexander Schreiber
    """
4162 bf6929a2 Alexander Schreiber
    env = {
4163 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4164 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
4165 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4166 bf6929a2 Alexander Schreiber
      }
4167 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4168 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4169 bf6929a2 Alexander Schreiber
    return env, nl, nl
4170 bf6929a2 Alexander Schreiber
4171 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
4172 bf6929a2 Alexander Schreiber
    """Check prerequisites.
4173 bf6929a2 Alexander Schreiber

4174 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
4175 bf6929a2 Alexander Schreiber

4176 bf6929a2 Alexander Schreiber
    """
4177 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4178 e873317a Guido Trotter
    assert self.instance is not None, \
4179 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4180 bf6929a2 Alexander Schreiber
4181 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4182 7527a8a4 Iustin Pop
4183 5bbd3f7f Michael Hanselmann
    # check bridges existence
4184 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4185 bf6929a2 Alexander Schreiber
4186 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
4187 bf6929a2 Alexander Schreiber
    """Reboot the instance.
4188 bf6929a2 Alexander Schreiber

4189 bf6929a2 Alexander Schreiber
    """
4190 bf6929a2 Alexander Schreiber
    instance = self.instance
4191 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
4192 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
4193 bf6929a2 Alexander Schreiber
4194 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
4195 bf6929a2 Alexander Schreiber
4196 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4197 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
4198 ae48ac32 Iustin Pop
      for disk in instance.disks:
4199 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
4200 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
4201 17c3f802 Guido Trotter
                                             reboot_type,
4202 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4203 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
4204 bf6929a2 Alexander Schreiber
    else:
4205 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
4206 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
4207 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
4208 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4209 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
4210 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4211 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4212 dd279568 Iustin Pop
      if msg:
4213 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4214 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
4215 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
4216 bf6929a2 Alexander Schreiber
4217 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
4218 bf6929a2 Alexander Schreiber
4219 bf6929a2 Alexander Schreiber
4220 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
4221 a8083063 Iustin Pop
  """Shutdown an instance.
4222 a8083063 Iustin Pop

4223 a8083063 Iustin Pop
  """
4224 a8083063 Iustin Pop
  HPATH = "instance-stop"
4225 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4226 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4227 e873317a Guido Trotter
  REQ_BGL = False
4228 e873317a Guido Trotter
4229 6263189c Guido Trotter
  def CheckArguments(self):
4230 6263189c Guido Trotter
    """Check the arguments.
4231 6263189c Guido Trotter

4232 6263189c Guido Trotter
    """
4233 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
4234 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4235 6263189c Guido Trotter
4236 e873317a Guido Trotter
  def ExpandNames(self):
4237 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4238 a8083063 Iustin Pop
4239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4240 a8083063 Iustin Pop
    """Build hooks env.
4241 a8083063 Iustin Pop

4242 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4243 a8083063 Iustin Pop

4244 a8083063 Iustin Pop
    """
4245 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4246 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
4247 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4248 a8083063 Iustin Pop
    return env, nl, nl
4249 a8083063 Iustin Pop
4250 a8083063 Iustin Pop
  def CheckPrereq(self):
4251 a8083063 Iustin Pop
    """Check prerequisites.
4252 a8083063 Iustin Pop

4253 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4254 a8083063 Iustin Pop

4255 a8083063 Iustin Pop
    """
4256 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4257 e873317a Guido Trotter
    assert self.instance is not None, \
4258 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4259 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
4260 a8083063 Iustin Pop
4261 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4262 a8083063 Iustin Pop
    """Shutdown the instance.
4263 a8083063 Iustin Pop

4264 a8083063 Iustin Pop
    """
4265 a8083063 Iustin Pop
    instance = self.instance
4266 a8083063 Iustin Pop
    node_current = instance.primary_node
4267 6263189c Guido Trotter
    timeout = self.timeout
4268 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
4269 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4270 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4271 1fae010f Iustin Pop
    if msg:
4272 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4273 a8083063 Iustin Pop
4274 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
4275 a8083063 Iustin Pop
4276 a8083063 Iustin Pop
4277 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
4278 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
4279 fe7b0351 Michael Hanselmann

4280 fe7b0351 Michael Hanselmann
  """
4281 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
4282 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
4283 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
4284 4e0b4d2d Guido Trotter
  REQ_BGL = False
4285 4e0b4d2d Guido Trotter
4286 4e0b4d2d Guido Trotter
  def ExpandNames(self):
4287 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
4288 fe7b0351 Michael Hanselmann
4289 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
4290 fe7b0351 Michael Hanselmann
    """Build hooks env.
4291 fe7b0351 Michael Hanselmann

4292 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
4293 fe7b0351 Michael Hanselmann

4294 fe7b0351 Michael Hanselmann
    """
4295 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4296 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4297 fe7b0351 Michael Hanselmann
    return env, nl, nl
4298 fe7b0351 Michael Hanselmann
4299 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
4300 fe7b0351 Michael Hanselmann
    """Check prerequisites.
4301 fe7b0351 Michael Hanselmann

4302 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
4303 fe7b0351 Michael Hanselmann

4304 fe7b0351 Michael Hanselmann
    """
4305 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4306 4e0b4d2d Guido Trotter
    assert instance is not None, \
4307 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4308 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4309 4e0b4d2d Guido Trotter
4310 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
4311 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4312 5c983ee5 Iustin Pop
                                 self.op.instance_name,
4313 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
4314 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot reinstall")
4315 d0834de3 Michael Hanselmann
4316 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4317 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4318 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4319 d0834de3 Michael Hanselmann
      # OS verification
4320 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4321 231cd901 Iustin Pop
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4322 d0834de3 Michael Hanselmann
4323 fe7b0351 Michael Hanselmann
    self.instance = instance
4324 fe7b0351 Michael Hanselmann
4325 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4326 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4327 fe7b0351 Michael Hanselmann

4328 fe7b0351 Michael Hanselmann
    """
4329 fe7b0351 Michael Hanselmann
    inst = self.instance
4330 fe7b0351 Michael Hanselmann
4331 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4332 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4333 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4334 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4335 d0834de3 Michael Hanselmann
4336 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4337 fe7b0351 Michael Hanselmann
    try:
4338 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4339 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4340 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4341 dd713605 Iustin Pop
                                             self.op.debug_level)
4342 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4343 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4344 fe7b0351 Michael Hanselmann
    finally:
4345 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4346 fe7b0351 Michael Hanselmann
4347 fe7b0351 Michael Hanselmann
4348 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4349 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4350 bd315bfa Iustin Pop

4351 bd315bfa Iustin Pop
  """
4352 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4353 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4354 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4355 bd315bfa Iustin Pop
  REQ_BGL = False
4356 bd315bfa Iustin Pop
4357 bd315bfa Iustin Pop
  def CheckArguments(self):
4358 bd315bfa Iustin Pop
    """Check the arguments.
4359 bd315bfa Iustin Pop

4360 bd315bfa Iustin Pop
    """
4361 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4362 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4363 bd315bfa Iustin Pop
    for item in self.op.disks:
4364 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4365 bd315bfa Iustin Pop
          item < 0):
4366 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4367 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4368 bd315bfa Iustin Pop
4369 bd315bfa Iustin Pop
  def ExpandNames(self):
4370 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4371 bd315bfa Iustin Pop
4372 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4373 bd315bfa Iustin Pop
    """Build hooks env.
4374 bd315bfa Iustin Pop

4375 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4376 bd315bfa Iustin Pop

4377 bd315bfa Iustin Pop
    """
4378 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4379 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4380 bd315bfa Iustin Pop
    return env, nl, nl
4381 bd315bfa Iustin Pop
4382 bd315bfa Iustin Pop
  def CheckPrereq(self):
4383 bd315bfa Iustin Pop
    """Check prerequisites.
4384 bd315bfa Iustin Pop

4385 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4386 bd315bfa Iustin Pop

4387 bd315bfa Iustin Pop
    """
4388 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4389 bd315bfa Iustin Pop
    assert instance is not None, \
4390 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4391 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4392 bd315bfa Iustin Pop
4393 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4394 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4395 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4396 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4397 bd315bfa Iustin Pop
4398 bd315bfa Iustin Pop
    if not self.op.disks:
4399 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4400 bd315bfa Iustin Pop
    else:
4401 bd315bfa Iustin Pop
      for idx in self.op.disks:
4402 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4403 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4404 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4405 bd315bfa Iustin Pop
4406 bd315bfa Iustin Pop
    self.instance = instance
4407 bd315bfa Iustin Pop
4408 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4409 bd315bfa Iustin Pop
    """Recreate the disks.
4410 bd315bfa Iustin Pop

4411 bd315bfa Iustin Pop
    """
4412 bd315bfa Iustin Pop
    to_skip = []
4413 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4414 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4415 bd315bfa Iustin Pop
        to_skip.append(idx)
4416 bd315bfa Iustin Pop
        continue
4417 bd315bfa Iustin Pop
4418 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4419 bd315bfa Iustin Pop
4420 bd315bfa Iustin Pop
4421 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4422 decd5f45 Iustin Pop
  """Rename an instance.
4423 decd5f45 Iustin Pop

4424 decd5f45 Iustin Pop
  """
4425 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4426 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4427 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4428 decd5f45 Iustin Pop
4429 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4430 decd5f45 Iustin Pop
    """Build hooks env.
4431 decd5f45 Iustin Pop

4432 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4433 decd5f45 Iustin Pop

4434 decd5f45 Iustin Pop
    """
4435 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4436 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4437 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4438 decd5f45 Iustin Pop
    return env, nl, nl
4439 decd5f45 Iustin Pop
4440 decd5f45 Iustin Pop
  def CheckPrereq(self):
4441 decd5f45 Iustin Pop
    """Check prerequisites.
4442 decd5f45 Iustin Pop

4443 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4444 decd5f45 Iustin Pop

4445 decd5f45 Iustin Pop
    """
4446 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4447 cf26a87a Iustin Pop
                                                self.op.instance_name)
4448 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4449 cf26a87a Iustin Pop
    assert instance is not None
4450 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4451 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot rename")
4452 decd5f45 Iustin Pop
    self.instance = instance
4453 decd5f45 Iustin Pop
4454 decd5f45 Iustin Pop
    # new name verification
4455 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4456 decd5f45 Iustin Pop
4457 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4458 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4459 7bde3275 Guido Trotter
    if new_name in instance_list:
4460 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4461 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4462 7bde3275 Guido Trotter
4463 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4464 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4465 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4466 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4467 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4468 decd5f45 Iustin Pop
4469 decd5f45 Iustin Pop
4470 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4471 decd5f45 Iustin Pop
    """Reinstall the instance.
4472 decd5f45 Iustin Pop

4473 decd5f45 Iustin Pop
    """
4474 decd5f45 Iustin Pop
    inst = self.instance
4475 decd5f45 Iustin Pop
    old_name = inst.name
4476 decd5f45 Iustin Pop
4477 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4478 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4479 b23c4333 Manuel Franceschini
4480 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4481 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4482 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4483 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4484 decd5f45 Iustin Pop
4485 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4486 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4487 decd5f45 Iustin Pop
4488 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4489 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4490 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4491 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4492 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4493 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4494 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4495 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4496 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4497 b23c4333 Manuel Franceschini
4498 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4499 decd5f45 Iustin Pop
    try:
4500 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4501 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4502 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4503 96841384 Iustin Pop
      if msg:
4504 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4505 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4506 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4507 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4508 decd5f45 Iustin Pop
    finally:
4509 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4510 decd5f45 Iustin Pop
4511 decd5f45 Iustin Pop
4512 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4513 a8083063 Iustin Pop
  """Remove an instance.
4514 a8083063 Iustin Pop

4515 a8083063 Iustin Pop
  """
4516 a8083063 Iustin Pop
  HPATH = "instance-remove"
4517 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4518 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4519 cf472233 Guido Trotter
  REQ_BGL = False
4520 cf472233 Guido Trotter
4521 17c3f802 Guido Trotter
  def CheckArguments(self):
4522 17c3f802 Guido Trotter
    """Check the arguments.
4523 17c3f802 Guido Trotter

4524 17c3f802 Guido Trotter
    """
4525 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4526 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4527 17c3f802 Guido Trotter
4528 cf472233 Guido Trotter
  def ExpandNames(self):
4529 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4530 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4531 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4532 cf472233 Guido Trotter
4533 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4534 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4535 cf472233 Guido Trotter
      self._LockInstancesNodes()
4536 a8083063 Iustin Pop
4537 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4538 a8083063 Iustin Pop
    """Build hooks env.
4539 a8083063 Iustin Pop

4540 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4541 a8083063 Iustin Pop

4542 a8083063 Iustin Pop
    """
4543 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4544 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4545 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4546 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4547 abd8e836 Iustin Pop
    return env, nl, nl_post
4548 a8083063 Iustin Pop
4549 a8083063 Iustin Pop
  def CheckPrereq(self):
4550 a8083063 Iustin Pop
    """Check prerequisites.
4551 a8083063 Iustin Pop

4552 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4553 a8083063 Iustin Pop

4554 a8083063 Iustin Pop
    """
4555 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4556 cf472233 Guido Trotter
    assert self.instance is not None, \
4557 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4558 a8083063 Iustin Pop
4559 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4560 a8083063 Iustin Pop
    """Remove the instance.
4561 a8083063 Iustin Pop

4562 a8083063 Iustin Pop
    """
4563 a8083063 Iustin Pop
    instance = self.instance
4564 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4565 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4566 a8083063 Iustin Pop
4567 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4568 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4569 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4570 1fae010f Iustin Pop
    if msg:
4571 1d67656e Iustin Pop
      if self.op.ignore_failures:
4572 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4573 1d67656e Iustin Pop
      else:
4574 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4575 1fae010f Iustin Pop
                                 " node %s: %s" %
4576 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4577 a8083063 Iustin Pop
4578 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4579 a8083063 Iustin Pop
4580 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4581 1d67656e Iustin Pop
      if self.op.ignore_failures:
4582 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4583 1d67656e Iustin Pop
      else:
4584 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4585 a8083063 Iustin Pop
4586 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4587 a8083063 Iustin Pop
4588 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4589 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4590 a8083063 Iustin Pop
4591 a8083063 Iustin Pop
4592 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4593 a8083063 Iustin Pop
  """Logical unit for querying instances.
4594 a8083063 Iustin Pop

4595 a8083063 Iustin Pop
  """
4596 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4597 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4598 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4599 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4600 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4601 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4602 5b460366 Iustin Pop
                                    "admin_state",
4603 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4604 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4605 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4606 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4607 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4608 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4609 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4610 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4611 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4612 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4613 19bed813 Iustin Pop
                                    "hvparams",
4614 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4615 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4616 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4617 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4618 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4619 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4620 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4621 31bf511f Iustin Pop
4622 a8083063 Iustin Pop
4623 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4624 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4625 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4626 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4627 a8083063 Iustin Pop
4628 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4629 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4630 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4631 7eb9d8f7 Guido Trotter
4632 57a2fb91 Iustin Pop
    if self.op.names:
4633 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4634 7eb9d8f7 Guido Trotter
    else:
4635 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4636 7eb9d8f7 Guido Trotter
4637 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4638 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4639 57a2fb91 Iustin Pop
    if self.do_locking:
4640 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4641 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4642 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4643 7eb9d8f7 Guido Trotter
4644 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4645 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4646 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4647 7eb9d8f7 Guido Trotter
4648 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4649 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4650 7eb9d8f7 Guido Trotter

4651 7eb9d8f7 Guido Trotter
    """
4652 57a2fb91 Iustin Pop
    pass
4653 069dcc86 Iustin Pop
4654 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4655 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4656 a8083063 Iustin Pop

4657 a8083063 Iustin Pop
    """
4658 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4659 7260cfbe Iustin Pop
    # way too many branches here
4660 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4661 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4662 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4663 a7f5dc98 Iustin Pop
      if self.do_locking:
4664 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4665 a7f5dc98 Iustin Pop
      else:
4666 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4667 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4668 57a2fb91 Iustin Pop
    else:
4669 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4670 a7f5dc98 Iustin Pop
      if self.do_locking:
4671 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4672 a7f5dc98 Iustin Pop
      else:
4673 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4674 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4675 a7f5dc98 Iustin Pop
      if missing:
4676 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4677 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4678 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4679 c1f1cbb2 Iustin Pop
4680 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4681 a8083063 Iustin Pop
4682 a8083063 Iustin Pop
    # begin data gathering
4683 a8083063 Iustin Pop
4684 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4685 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
    bad_nodes = []
4688 cbfc4681 Iustin Pop
    off_nodes = []
4689 ec79568d Iustin Pop
    if self.do_node_query:
4690 a8083063 Iustin Pop
      live_data = {}
4691 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4692 a8083063 Iustin Pop
      for name in nodes:
4693 a8083063 Iustin Pop
        result = node_data[name]
4694 cbfc4681 Iustin Pop
        if result.offline:
4695 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4696 cbfc4681 Iustin Pop
          off_nodes.append(name)
4697 3cebe102 Michael Hanselmann
        if result.fail_msg:
4698 a8083063 Iustin Pop
          bad_nodes.append(name)
4699 781de953 Iustin Pop
        else:
4700 2fa74ef4 Iustin Pop
          if result.payload:
4701 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4702 2fa74ef4 Iustin Pop
          # else no instance is alive
4703 a8083063 Iustin Pop
    else:
4704 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4705 a8083063 Iustin Pop
4706 a8083063 Iustin Pop
    # end data gathering
4707 a8083063 Iustin Pop
4708 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4709 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4710 a8083063 Iustin Pop
    output = []
4711 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4712 a8083063 Iustin Pop
    for instance in instance_list:
4713 a8083063 Iustin Pop
      iout = []
4714 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4715 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4716 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4717 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4718 a8083063 Iustin Pop
      for field in self.op.output_fields:
4719 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4720 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4721 19bed813 Iustin Pop
          val = getattr(instance, field)
4722 a8083063 Iustin Pop
        elif field == "pnode":
4723 a8083063 Iustin Pop
          val = instance.primary_node
4724 a8083063 Iustin Pop
        elif field == "snodes":
4725 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4726 a8083063 Iustin Pop
        elif field == "admin_state":
4727 0d68c45d Iustin Pop
          val = instance.admin_up
4728 a8083063 Iustin Pop
        elif field == "oper_state":
4729 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4730 8a23d2d3 Iustin Pop
            val = None
4731 a8083063 Iustin Pop
          else:
4732 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4733 d8052456 Iustin Pop
        elif field == "status":
4734 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4735 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4736 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4737 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4738 d8052456 Iustin Pop
          else:
4739 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4740 d8052456 Iustin Pop
            if running:
4741 0d68c45d Iustin Pop
              if instance.admin_up:
4742 d8052456 Iustin Pop
                val = "running"
4743 d8052456 Iustin Pop
              else:
4744 d8052456 Iustin Pop
                val = "ERROR_up"
4745 d8052456 Iustin Pop
            else:
4746 0d68c45d Iustin Pop
              if instance.admin_up:
4747 d8052456 Iustin Pop
                val = "ERROR_down"
4748 d8052456 Iustin Pop
              else:
4749 d8052456 Iustin Pop
                val = "ADMIN_down"
4750 a8083063 Iustin Pop
        elif field == "oper_ram":
4751 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4752 8a23d2d3 Iustin Pop
            val = None
4753 a8083063 Iustin Pop
          elif instance.name in live_data:
4754 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4755 a8083063 Iustin Pop
          else:
4756 a8083063 Iustin Pop
            val = "-"
4757 c1ce76bb Iustin Pop
        elif field == "vcpus":
4758 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4759 a8083063 Iustin Pop
        elif field == "disk_template":
4760 a8083063 Iustin Pop
          val = instance.disk_template
4761 a8083063 Iustin Pop
        elif field == "ip":
4762 39a02558 Guido Trotter
          if instance.nics:
4763 39a02558 Guido Trotter
            val = instance.nics[0].ip
4764 39a02558 Guido Trotter
          else:
4765 39a02558 Guido Trotter
            val = None
4766 638c6349 Guido Trotter
        elif field == "nic_mode":
4767 638c6349 Guido Trotter
          if instance.nics:
4768 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4769 638c6349 Guido Trotter
          else:
4770 638c6349 Guido Trotter
            val = None
4771 638c6349 Guido Trotter
        elif field == "nic_link":
4772 39a02558 Guido Trotter
          if instance.nics:
4773 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4774 638c6349 Guido Trotter
          else:
4775 638c6349 Guido Trotter
            val = None
4776 638c6349 Guido Trotter
        elif field == "bridge":
4777 638c6349 Guido Trotter
          if (instance.nics and
4778 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4779 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4780 39a02558 Guido Trotter
          else:
4781 39a02558 Guido Trotter
            val = None
4782 a8083063 Iustin Pop
        elif field == "mac":
4783 39a02558 Guido Trotter
          if instance.nics:
4784 39a02558 Guido Trotter
            val = instance.nics[0].mac
4785 39a02558 Guido Trotter
          else:
4786 39a02558 Guido Trotter
            val = None
4787 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4788 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4789 ad24e046 Iustin Pop
          try:
4790 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4791 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4792 8a23d2d3 Iustin Pop
            val = None
4793 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4794 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4795 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4796 130a6a6f Iustin Pop
        elif field == "tags":
4797 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4798 338e51e8 Iustin Pop
        elif field == "hvparams":
4799 338e51e8 Iustin Pop
          val = i_hv
4800 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4801 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4802 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4803 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4804 338e51e8 Iustin Pop
        elif field == "beparams":
4805 338e51e8 Iustin Pop
          val = i_be
4806 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4807 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4808 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4809 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4810 71c1af58 Iustin Pop
          # matches a variable list
4811 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4812 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4813 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4814 71c1af58 Iustin Pop
              val = len(instance.disks)
4815 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4816 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4817 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4818 3e0cea06 Iustin Pop
              try:
4819 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4820 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4821 71c1af58 Iustin Pop
                val = None
4822 71c1af58 Iustin Pop
            else:
4823 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4824 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4825 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4826 71c1af58 Iustin Pop
              val = len(instance.nics)
4827 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4828 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4829 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4830 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4831 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4832 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4833 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4834 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4835 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4836 638c6349 Guido Trotter
              val = []
4837 638c6349 Guido Trotter
              for nicp in i_nicp:
4838 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4839 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4840 638c6349 Guido Trotter
                else:
4841 638c6349 Guido Trotter
                  val.append(None)
4842 71c1af58 Iustin Pop
            else:
4843 71c1af58 Iustin Pop
              # index-based item
4844 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4845 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4846 71c1af58 Iustin Pop
                val = None
4847 71c1af58 Iustin Pop
              else:
4848 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4849 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4850 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4851 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4852 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4853 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4854 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4855 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4856 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4857 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4858 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4859 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4860 638c6349 Guido Trotter
                  else:
4861 638c6349 Guido Trotter
                    val = None
4862 71c1af58 Iustin Pop
                else:
4863 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4864 71c1af58 Iustin Pop
          else:
4865 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4866 c1ce76bb Iustin Pop
                           field)
4867 a8083063 Iustin Pop
        else:
4868 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4869 a8083063 Iustin Pop
        iout.append(val)
4870 a8083063 Iustin Pop
      output.append(iout)
4871 a8083063 Iustin Pop
4872 a8083063 Iustin Pop
    return output
4873 a8083063 Iustin Pop
4874 a8083063 Iustin Pop
4875 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4876 a8083063 Iustin Pop
  """Failover an instance.
4877 a8083063 Iustin Pop

4878 a8083063 Iustin Pop
  """
4879 a8083063 Iustin Pop
  HPATH = "instance-failover"
4880 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4881 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4882 c9e5c064 Guido Trotter
  REQ_BGL = False
4883 c9e5c064 Guido Trotter
4884 17c3f802 Guido Trotter
  def CheckArguments(self):
4885 17c3f802 Guido Trotter
    """Check the arguments.
4886 17c3f802 Guido Trotter

4887 17c3f802 Guido Trotter
    """
4888 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4889 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4890 17c3f802 Guido Trotter
4891 c9e5c064 Guido Trotter
  def ExpandNames(self):
4892 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4893 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4894 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4895 c9e5c064 Guido Trotter
4896 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4897 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4898 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4899 a8083063 Iustin Pop
4900 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4901 a8083063 Iustin Pop
    """Build hooks env.
4902 a8083063 Iustin Pop

4903 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4904 a8083063 Iustin Pop

4905 a8083063 Iustin Pop
    """
4906 08eec276 Iustin Pop
    instance = self.instance
4907 08eec276 Iustin Pop
    source_node = instance.primary_node
4908 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4909 a8083063 Iustin Pop
    env = {
4910 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4911 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4912 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4913 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4914 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4915 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4916 a8083063 Iustin Pop
      }
4917 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4918 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4919 abd8e836 Iustin Pop
    nl_post = list(nl)
4920 abd8e836 Iustin Pop
    nl_post.append(source_node)
4921 abd8e836 Iustin Pop
    return env, nl, nl_post
4922 a8083063 Iustin Pop
4923 a8083063 Iustin Pop
  def CheckPrereq(self):
4924 a8083063 Iustin Pop
    """Check prerequisites.
4925 a8083063 Iustin Pop

4926 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4927 a8083063 Iustin Pop

4928 a8083063 Iustin Pop
    """
4929 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4930 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4931 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4932 a8083063 Iustin Pop
4933 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4934 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4935 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4936 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
4937 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4938 2a710df1 Michael Hanselmann
4939 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4940 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4941 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4942 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4943 2a710df1 Michael Hanselmann
4944 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4945 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4946 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4947 d27776f0 Iustin Pop
    if instance.admin_up:
4948 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4949 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4950 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4951 d27776f0 Iustin Pop
                           instance.hypervisor)
4952 d27776f0 Iustin Pop
    else:
4953 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4954 d27776f0 Iustin Pop
                   " instance will not be started")
4955 3a7c308e Guido Trotter
4956 a8083063 Iustin Pop
    # check bridge existance
4957 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4958 a8083063 Iustin Pop
4959 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4960 a8083063 Iustin Pop
    """Failover an instance.
4961 a8083063 Iustin Pop

4962 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4963 a8083063 Iustin Pop
    starting it on the secondary.
4964 a8083063 Iustin Pop

4965 a8083063 Iustin Pop
    """
4966 a8083063 Iustin Pop
    instance = self.instance
4967 a8083063 Iustin Pop
4968 a8083063 Iustin Pop
    source_node = instance.primary_node
4969 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4970 a8083063 Iustin Pop
4971 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4972 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4973 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4974 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4975 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
4976 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
4977 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
4978 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
4979 1df79ce6 Michael Hanselmann
    else:
4980 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
4981 a8083063 Iustin Pop
4982 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4983 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4984 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4985 a8083063 Iustin Pop
4986 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4987 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4988 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4989 1fae010f Iustin Pop
    if msg:
4990 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4991 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4992 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4993 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4994 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4995 24a40d57 Iustin Pop
      else:
4996 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4997 1fae010f Iustin Pop
                                 " node %s: %s" %
4998 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4999 a8083063 Iustin Pop
5000 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
5001 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5002 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
5003 a8083063 Iustin Pop
5004 a8083063 Iustin Pop
    instance.primary_node = target_node
5005 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
5006 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5007 a8083063 Iustin Pop
5008 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
5009 0d68c45d Iustin Pop
    if instance.admin_up:
5010 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
5011 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
5012 9a4f63d1 Iustin Pop
                   instance.name, target_node)
5013 12a0cfbe Guido Trotter
5014 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5015 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
5016 12a0cfbe Guido Trotter
      if not disks_ok:
5017 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5018 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
5019 a8083063 Iustin Pop
5020 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
5021 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5022 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5023 dd279568 Iustin Pop
      if msg:
5024 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5025 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5026 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
5027 a8083063 Iustin Pop
5028 a8083063 Iustin Pop
5029 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
5030 53c776b5 Iustin Pop
  """Migrate an instance.
5031 53c776b5 Iustin Pop

5032 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
5033 53c776b5 Iustin Pop
  which is done with shutdown.
5034 53c776b5 Iustin Pop

5035 53c776b5 Iustin Pop
  """
5036 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
5037 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5038 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
5039 53c776b5 Iustin Pop
5040 53c776b5 Iustin Pop
  REQ_BGL = False
5041 53c776b5 Iustin Pop
5042 53c776b5 Iustin Pop
  def ExpandNames(self):
5043 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
5044 3e06e001 Michael Hanselmann
5045 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5046 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5047 53c776b5 Iustin Pop
5048 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5049 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
5050 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
5051 3e06e001 Michael Hanselmann
5052 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
5053 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
5054 53c776b5 Iustin Pop
      self._LockInstancesNodes()
5055 53c776b5 Iustin Pop
5056 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
5057 53c776b5 Iustin Pop
    """Build hooks env.
5058 53c776b5 Iustin Pop

5059 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5060 53c776b5 Iustin Pop

5061 53c776b5 Iustin Pop
    """
5062 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
5063 08eec276 Iustin Pop
    source_node = instance.primary_node
5064 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
5065 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
5066 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
5067 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5068 08eec276 Iustin Pop
    env.update({
5069 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
5070 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
5071 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
5072 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
5073 08eec276 Iustin Pop
        })
5074 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5075 abd8e836 Iustin Pop
    nl_post = list(nl)
5076 abd8e836 Iustin Pop
    nl_post.append(source_node)
5077 abd8e836 Iustin Pop
    return env, nl, nl_post
5078 53c776b5 Iustin Pop
5079 3e06e001 Michael Hanselmann
5080 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
5081 313bcead Iustin Pop
  """Move an instance by data-copying.
5082 313bcead Iustin Pop

5083 313bcead Iustin Pop
  """
5084 313bcead Iustin Pop
  HPATH = "instance-move"
5085 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5086 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
5087 313bcead Iustin Pop
  REQ_BGL = False
5088 313bcead Iustin Pop
5089 17c3f802 Guido Trotter
  def CheckArguments(self):
5090 17c3f802 Guido Trotter
    """Check the arguments.
5091 17c3f802 Guido Trotter

5092 17c3f802 Guido Trotter
    """
5093 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5094 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5095 17c3f802 Guido Trotter
5096 313bcead Iustin Pop
  def ExpandNames(self):
5097 313bcead Iustin Pop
    self._ExpandAndLockInstance()
5098 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5099 313bcead Iustin Pop
    self.op.target_node = target_node
5100 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5101 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5102 313bcead Iustin Pop
5103 313bcead Iustin Pop
  def DeclareLocks(self, level):
5104 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
5105 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
5106 313bcead Iustin Pop
5107 313bcead Iustin Pop
  def BuildHooksEnv(self):
5108 313bcead Iustin Pop
    """Build hooks env.
5109 313bcead Iustin Pop

5110 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5111 313bcead Iustin Pop

5112 313bcead Iustin Pop
    """
5113 313bcead Iustin Pop
    env = {
5114 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
5115 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5116 313bcead Iustin Pop
      }
5117 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5118 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5119 313bcead Iustin Pop
                                       self.op.target_node]
5120 313bcead Iustin Pop
    return env, nl, nl
5121 313bcead Iustin Pop
5122 313bcead Iustin Pop
  def CheckPrereq(self):
5123 313bcead Iustin Pop
    """Check prerequisites.
5124 313bcead Iustin Pop

5125 313bcead Iustin Pop
    This checks that the instance is in the cluster.
5126 313bcead Iustin Pop

5127 313bcead Iustin Pop
    """
5128 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5129 313bcead Iustin Pop
    assert self.instance is not None, \
5130 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
5131 313bcead Iustin Pop
5132 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
5133 313bcead Iustin Pop
    assert node is not None, \
5134 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
5135 313bcead Iustin Pop
5136 313bcead Iustin Pop
    self.target_node = target_node = node.name
5137 313bcead Iustin Pop
5138 313bcead Iustin Pop
    if target_node == instance.primary_node:
5139 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5140 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
5141 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5142 313bcead Iustin Pop
5143 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5144 313bcead Iustin Pop
5145 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
5146 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5147 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5148 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
5149 313bcead Iustin Pop
5150 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
5151 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5152 313bcead Iustin Pop
5153 313bcead Iustin Pop
    if instance.admin_up:
5154 313bcead Iustin Pop
      # check memory requirements on the secondary node
5155 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5156 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5157 313bcead Iustin Pop
                           instance.hypervisor)
5158 313bcead Iustin Pop
    else:
5159 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5160 313bcead Iustin Pop
                   " instance will not be started")
5161 313bcead Iustin Pop
5162 313bcead Iustin Pop
    # check bridge existance
5163 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5164 313bcead Iustin Pop
5165 313bcead Iustin Pop
  def Exec(self, feedback_fn):
5166 313bcead Iustin Pop
    """Move an instance.
5167 313bcead Iustin Pop

5168 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
5169 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
5170 313bcead Iustin Pop

5171 313bcead Iustin Pop
    """
5172 313bcead Iustin Pop
    instance = self.instance
5173 313bcead Iustin Pop
5174 313bcead Iustin Pop
    source_node = instance.primary_node
5175 313bcead Iustin Pop
    target_node = self.target_node
5176 313bcead Iustin Pop
5177 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
5178 313bcead Iustin Pop
                 instance.name, source_node)
5179 313bcead Iustin Pop
5180 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5181 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5182 313bcead Iustin Pop
    msg = result.fail_msg
5183 313bcead Iustin Pop
    if msg:
5184 313bcead Iustin Pop
      if self.op.ignore_consistency:
5185 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5186 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
5187 313bcead Iustin Pop
                             " %s is down. Error details: %s",
5188 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
5189 313bcead Iustin Pop
      else:
5190 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5191 313bcead Iustin Pop
                                 " node %s: %s" %
5192 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
5193 313bcead Iustin Pop
5194 313bcead Iustin Pop
    # create the target disks
5195 313bcead Iustin Pop
    try:
5196 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
5197 313bcead Iustin Pop
    except errors.OpExecError:
5198 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5199 313bcead Iustin Pop
      try:
5200 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5201 313bcead Iustin Pop
      finally:
5202 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5203 313bcead Iustin Pop
        raise
5204 313bcead Iustin Pop
5205 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5206 313bcead Iustin Pop
5207 313bcead Iustin Pop
    errs = []
5208 313bcead Iustin Pop
    # activate, get path, copy the data over
5209 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
5210 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
5211 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5212 313bcead Iustin Pop
                                               instance.name, True)
5213 313bcead Iustin Pop
      if result.fail_msg:
5214 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
5215 313bcead Iustin Pop
                        idx, result.fail_msg)
5216 313bcead Iustin Pop
        errs.append(result.fail_msg)
5217 313bcead Iustin Pop
        break
5218 313bcead Iustin Pop
      dev_path = result.payload
5219 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
5220 313bcead Iustin Pop
                                             target_node, dev_path,
5221 313bcead Iustin Pop
                                             cluster_name)
5222 313bcead Iustin Pop
      if result.fail_msg:
5223 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
5224 313bcead Iustin Pop
                        idx, result.fail_msg)
5225 313bcead Iustin Pop
        errs.append(result.fail_msg)
5226 313bcead Iustin Pop
        break
5227 313bcead Iustin Pop
5228 313bcead Iustin Pop
    if errs:
5229 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
5230 313bcead Iustin Pop
      try:
5231 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5232 313bcead Iustin Pop
      finally:
5233 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5234 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
5235 313bcead Iustin Pop
                                 (",".join(errs),))
5236 313bcead Iustin Pop
5237 313bcead Iustin Pop
    instance.primary_node = target_node
5238 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5239 313bcead Iustin Pop
5240 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
5241 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
5242 313bcead Iustin Pop
5243 313bcead Iustin Pop
    # Only start the instance if it's marked as up
5244 313bcead Iustin Pop
    if instance.admin_up:
5245 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
5246 313bcead Iustin Pop
                   instance.name, target_node)
5247 313bcead Iustin Pop
5248 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5249 313bcead Iustin Pop
                                           ignore_secondaries=True)
5250 313bcead Iustin Pop
      if not disks_ok:
5251 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5252 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
5253 313bcead Iustin Pop
5254 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5255 313bcead Iustin Pop
      msg = result.fail_msg
5256 313bcead Iustin Pop
      if msg:
5257 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5258 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5259 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
5260 313bcead Iustin Pop
5261 313bcead Iustin Pop
5262 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
5263 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
5264 80cb875c Michael Hanselmann

5265 80cb875c Michael Hanselmann
  """
5266 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
5267 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
5268 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
5269 80cb875c Michael Hanselmann
  REQ_BGL = False
5270 80cb875c Michael Hanselmann
5271 80cb875c Michael Hanselmann
  def ExpandNames(self):
5272 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5273 80cb875c Michael Hanselmann
5274 80cb875c Michael Hanselmann
    self.needed_locks = {
5275 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
5276 80cb875c Michael Hanselmann
      }
5277 80cb875c Michael Hanselmann
5278 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5279 80cb875c Michael Hanselmann
5280 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5281 80cb875c Michael Hanselmann
    names = []
5282 80cb875c Michael Hanselmann
    tasklets = []
5283 80cb875c Michael Hanselmann
5284 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5285 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5286 80cb875c Michael Hanselmann
      names.append(inst.name)
5287 80cb875c Michael Hanselmann
5288 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5289 80cb875c Michael Hanselmann
5290 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5291 80cb875c Michael Hanselmann
5292 80cb875c Michael Hanselmann
    # Declare instance locks
5293 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5294 80cb875c Michael Hanselmann
5295 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5296 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5297 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5298 80cb875c Michael Hanselmann
5299 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5300 80cb875c Michael Hanselmann
    """Build hooks env.
5301 80cb875c Michael Hanselmann

5302 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5303 80cb875c Michael Hanselmann

5304 80cb875c Michael Hanselmann
    """
5305 80cb875c Michael Hanselmann
    env = {
5306 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5307 80cb875c Michael Hanselmann
      }
5308 80cb875c Michael Hanselmann
5309 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5310 80cb875c Michael Hanselmann
5311 80cb875c Michael Hanselmann
    return (env, nl, nl)
5312 80cb875c Michael Hanselmann
5313 80cb875c Michael Hanselmann
5314 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5315 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5316 3e06e001 Michael Hanselmann
    """Initializes this class.
5317 3e06e001 Michael Hanselmann

5318 3e06e001 Michael Hanselmann
    """
5319 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5320 464243a7 Michael Hanselmann
5321 3e06e001 Michael Hanselmann
    # Parameters
5322 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5323 3e06e001 Michael Hanselmann
    self.live = live
5324 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5325 3e06e001 Michael Hanselmann
5326 53c776b5 Iustin Pop
  def CheckPrereq(self):
5327 53c776b5 Iustin Pop
    """Check prerequisites.
5328 53c776b5 Iustin Pop

5329 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5330 53c776b5 Iustin Pop

5331 53c776b5 Iustin Pop
    """
5332 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5333 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5334 cf26a87a Iustin Pop
    assert instance is not None
5335 53c776b5 Iustin Pop
5336 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5337 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5338 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5339 53c776b5 Iustin Pop
5340 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5341 53c776b5 Iustin Pop
    if not secondary_nodes:
5342 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5343 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5344 53c776b5 Iustin Pop
5345 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5346 53c776b5 Iustin Pop
5347 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5348 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5349 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5350 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5351 53c776b5 Iustin Pop
                         instance.hypervisor)
5352 53c776b5 Iustin Pop
5353 53c776b5 Iustin Pop
    # check bridge existance
5354 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5355 53c776b5 Iustin Pop
5356 3e06e001 Michael Hanselmann
    if not self.cleanup:
5357 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5358 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5359 53c776b5 Iustin Pop
                                                 instance)
5360 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5361 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5362 53c776b5 Iustin Pop
5363 53c776b5 Iustin Pop
    self.instance = instance
5364 53c776b5 Iustin Pop
5365 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5366 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5367 53c776b5 Iustin Pop

5368 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5369 53c776b5 Iustin Pop

5370 53c776b5 Iustin Pop
    """
5371 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5372 53c776b5 Iustin Pop
    all_done = False
5373 53c776b5 Iustin Pop
    while not all_done:
5374 53c776b5 Iustin Pop
      all_done = True
5375 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5376 53c776b5 Iustin Pop
                                            self.nodes_ip,
5377 53c776b5 Iustin Pop
                                            self.instance.disks)
5378 53c776b5 Iustin Pop
      min_percent = 100
5379 53c776b5 Iustin Pop
      for node, nres in result.items():
5380 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5381 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5382 53c776b5 Iustin Pop
        all_done = all_done and node_done
5383 53c776b5 Iustin Pop
        if node_percent is not None:
5384 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5385 53c776b5 Iustin Pop
      if not all_done:
5386 53c776b5 Iustin Pop
        if min_percent < 100:
5387 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5388 53c776b5 Iustin Pop
        time.sleep(2)
5389 53c776b5 Iustin Pop
5390 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5391 53c776b5 Iustin Pop
    """Demote a node to secondary.
5392 53c776b5 Iustin Pop

5393 53c776b5 Iustin Pop
    """
5394 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5395 53c776b5 Iustin Pop
5396 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5397 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5398 53c776b5 Iustin Pop
5399 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5400 53c776b5 Iustin Pop
                                          self.instance.disks)
5401 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5402 53c776b5 Iustin Pop
5403 53c776b5 Iustin Pop
  def _GoStandalone(self):
5404 53c776b5 Iustin Pop
    """Disconnect from the network.
5405 53c776b5 Iustin Pop

5406 53c776b5 Iustin Pop
    """
5407 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5408 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5409 53c776b5 Iustin Pop
                                               self.instance.disks)
5410 53c776b5 Iustin Pop
    for node, nres in result.items():
5411 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5412 53c776b5 Iustin Pop
5413 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5414 53c776b5 Iustin Pop
    """Reconnect to the network.
5415 53c776b5 Iustin Pop

5416 53c776b5 Iustin Pop
    """
5417 53c776b5 Iustin Pop
    if multimaster:
5418 53c776b5 Iustin Pop
      msg = "dual-master"
5419 53c776b5 Iustin Pop
    else:
5420 53c776b5 Iustin Pop
      msg = "single-master"
5421 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5422 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5423 53c776b5 Iustin Pop
                                           self.instance.disks,
5424 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5425 53c776b5 Iustin Pop
    for node, nres in result.items():
5426 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5427 53c776b5 Iustin Pop
5428 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5429 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5430 53c776b5 Iustin Pop

5431 53c776b5 Iustin Pop
    The cleanup is done by:
5432 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5433 53c776b5 Iustin Pop
        (and update the config if needed)
5434 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5435 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5436 53c776b5 Iustin Pop
      - disconnect from the network
5437 53c776b5 Iustin Pop
      - change disks into single-master mode
5438 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5439 53c776b5 Iustin Pop

5440 53c776b5 Iustin Pop
    """
5441 53c776b5 Iustin Pop
    instance = self.instance
5442 53c776b5 Iustin Pop
    target_node = self.target_node
5443 53c776b5 Iustin Pop
    source_node = self.source_node
5444 53c776b5 Iustin Pop
5445 53c776b5 Iustin Pop
    # check running on only one node
5446 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5447 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5448 53c776b5 Iustin Pop
                     " a bad state)")
5449 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5450 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5451 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5452 53c776b5 Iustin Pop
5453 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5454 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5455 53c776b5 Iustin Pop
5456 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5457 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5458 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5459 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5460 53c776b5 Iustin Pop
                               " and restart this operation.")
5461 53c776b5 Iustin Pop
5462 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5463 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5464 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5465 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5466 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5467 53c776b5 Iustin Pop
5468 53c776b5 Iustin Pop
    if runningon_target:
5469 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5470 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5471 53c776b5 Iustin Pop
                       " updating config" % target_node)
5472 53c776b5 Iustin Pop
      instance.primary_node = target_node
5473 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5474 53c776b5 Iustin Pop
      demoted_node = source_node
5475 53c776b5 Iustin Pop
    else:
5476 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5477 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5478 53c776b5 Iustin Pop
      demoted_node = target_node
5479 53c776b5 Iustin Pop
5480 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5481 53c776b5 Iustin Pop
    try:
5482 53c776b5 Iustin Pop
      self._WaitUntilSync()
5483 53c776b5 Iustin Pop
    except errors.OpExecError:
5484 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5485 53c776b5 Iustin Pop
      # won't be able to sync
5486 53c776b5 Iustin Pop
      pass
5487 53c776b5 Iustin Pop
    self._GoStandalone()
5488 53c776b5 Iustin Pop
    self._GoReconnect(False)
5489 53c776b5 Iustin Pop
    self._WaitUntilSync()
5490 53c776b5 Iustin Pop
5491 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5492 53c776b5 Iustin Pop
5493 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5494 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5495 6906a9d8 Guido Trotter

5496 6906a9d8 Guido Trotter
    """
5497 6906a9d8 Guido Trotter
    target_node = self.target_node
5498 6906a9d8 Guido Trotter
    try:
5499 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5500 6906a9d8 Guido Trotter
      self._GoStandalone()
5501 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5502 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5503 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5504 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5505 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5506 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5507 3e06e001 Michael Hanselmann
                         str(err))
5508 6906a9d8 Guido Trotter
5509 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5510 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5511 6906a9d8 Guido Trotter

5512 6906a9d8 Guido Trotter
    """
5513 6906a9d8 Guido Trotter
    instance = self.instance
5514 6906a9d8 Guido Trotter
    target_node = self.target_node
5515 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5516 6906a9d8 Guido Trotter
5517 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5518 6906a9d8 Guido Trotter
                                                    instance,
5519 6906a9d8 Guido Trotter
                                                    migration_info,
5520 6906a9d8 Guido Trotter
                                                    False)
5521 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5522 6906a9d8 Guido Trotter
    if abort_msg:
5523 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5524 099c52ad Iustin Pop
                    target_node, abort_msg)
5525 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5526 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5527 6906a9d8 Guido Trotter
5528 53c776b5 Iustin Pop
  def _ExecMigration(self):
5529 53c776b5 Iustin Pop
    """Migrate an instance.
5530 53c776b5 Iustin Pop

5531 53c776b5 Iustin Pop
    The migrate is done by:
5532 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5533 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5534 53c776b5 Iustin Pop
      - migrate the instance
5535 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5536 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5537 53c776b5 Iustin Pop
      - change disks into single-master mode
5538 53c776b5 Iustin Pop

5539 53c776b5 Iustin Pop
    """
5540 53c776b5 Iustin Pop
    instance = self.instance
5541 53c776b5 Iustin Pop
    target_node = self.target_node
5542 53c776b5 Iustin Pop
    source_node = self.source_node
5543 53c776b5 Iustin Pop
5544 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5545 53c776b5 Iustin Pop
    for dev in instance.disks:
5546 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5547 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5548 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5549 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5550 53c776b5 Iustin Pop
5551 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5552 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5553 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5554 6906a9d8 Guido Trotter
    if msg:
5555 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5556 0959c824 Iustin Pop
                 (source_node, msg))
5557 6906a9d8 Guido Trotter
      logging.error(log_err)
5558 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5559 6906a9d8 Guido Trotter
5560 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5561 6906a9d8 Guido Trotter
5562 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5563 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5564 53c776b5 Iustin Pop
    self._GoStandalone()
5565 53c776b5 Iustin Pop
    self._GoReconnect(True)
5566 53c776b5 Iustin Pop
    self._WaitUntilSync()
5567 53c776b5 Iustin Pop
5568 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5569 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5570 6906a9d8 Guido Trotter
                                           instance,
5571 6906a9d8 Guido Trotter
                                           migration_info,
5572 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5573 6906a9d8 Guido Trotter
5574 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5575 6906a9d8 Guido Trotter
    if msg:
5576 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5577 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5578 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5579 6906a9d8 Guido Trotter
      self._AbortMigration()
5580 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5581 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5582 6906a9d8 Guido Trotter
                               (instance.name, msg))
5583 6906a9d8 Guido Trotter
5584 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5585 53c776b5 Iustin Pop
    time.sleep(10)
5586 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5587 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5588 3e06e001 Michael Hanselmann
                                            self.live)
5589 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5590 53c776b5 Iustin Pop
    if msg:
5591 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5592 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5593 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5594 6906a9d8 Guido Trotter
      self._AbortMigration()
5595 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5596 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5597 53c776b5 Iustin Pop
                               (instance.name, msg))
5598 53c776b5 Iustin Pop
    time.sleep(10)
5599 53c776b5 Iustin Pop
5600 53c776b5 Iustin Pop
    instance.primary_node = target_node
5601 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5602 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5603 53c776b5 Iustin Pop
5604 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5605 6906a9d8 Guido Trotter
                                              instance,
5606 6906a9d8 Guido Trotter
                                              migration_info,
5607 6906a9d8 Guido Trotter
                                              True)
5608 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5609 6906a9d8 Guido Trotter
    if msg:
5610 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5611 099c52ad Iustin Pop
                    " %s", msg)
5612 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5613 6906a9d8 Guido Trotter
                               msg)
5614 6906a9d8 Guido Trotter
5615 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5616 53c776b5 Iustin Pop
    self._WaitUntilSync()
5617 53c776b5 Iustin Pop
    self._GoStandalone()
5618 53c776b5 Iustin Pop
    self._GoReconnect(False)
5619 53c776b5 Iustin Pop
    self._WaitUntilSync()
5620 53c776b5 Iustin Pop
5621 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5622 53c776b5 Iustin Pop
5623 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5624 53c776b5 Iustin Pop
    """Perform the migration.
5625 53c776b5 Iustin Pop

5626 53c776b5 Iustin Pop
    """
5627 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5628 80cb875c Michael Hanselmann
5629 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5630 53c776b5 Iustin Pop
5631 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5632 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5633 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5634 53c776b5 Iustin Pop
    self.nodes_ip = {
5635 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5636 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5637 53c776b5 Iustin Pop
      }
5638 3e06e001 Michael Hanselmann
5639 3e06e001 Michael Hanselmann
    if self.cleanup:
5640 53c776b5 Iustin Pop
      return self._ExecCleanup()
5641 53c776b5 Iustin Pop
    else:
5642 53c776b5 Iustin Pop
      return self._ExecMigration()
5643 53c776b5 Iustin Pop
5644 53c776b5 Iustin Pop
5645 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5646 428958aa Iustin Pop
                    info, force_open):
5647 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5648 a8083063 Iustin Pop

5649 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5650 a8083063 Iustin Pop
  all its children.
5651 a8083063 Iustin Pop

5652 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5653 a8083063 Iustin Pop

5654 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5655 428958aa Iustin Pop
  @param node: the node on which to create the device
5656 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5657 428958aa Iustin Pop
  @param instance: the instance which owns the device
5658 428958aa Iustin Pop
  @type device: L{objects.Disk}
5659 428958aa Iustin Pop
  @param device: the device to create
5660 428958aa Iustin Pop
  @type force_create: boolean
5661 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5662 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5663 428958aa Iustin Pop
      CreateOnSecondary() attribute
5664 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5665 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5666 428958aa Iustin Pop
  @type force_open: boolean
5667 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5668 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5669 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5670 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5671 428958aa Iustin Pop

5672 a8083063 Iustin Pop
  """
5673 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5674 428958aa Iustin Pop
    force_create = True
5675 796cab27 Iustin Pop
5676 a8083063 Iustin Pop
  if device.children:
5677 a8083063 Iustin Pop
    for child in device.children:
5678 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5679 428958aa Iustin Pop
                      info, force_open)
5680 a8083063 Iustin Pop
5681 428958aa Iustin Pop
  if not force_create:
5682 796cab27 Iustin Pop
    return
5683 796cab27 Iustin Pop
5684 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5685 de12473a Iustin Pop
5686 de12473a Iustin Pop
5687 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5688 de12473a Iustin Pop
  """Create a single block device on a given node.
5689 de12473a Iustin Pop

5690 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5691 de12473a Iustin Pop
  created in advance.
5692 de12473a Iustin Pop

5693 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5694 de12473a Iustin Pop
  @param node: the node on which to create the device
5695 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5696 de12473a Iustin Pop
  @param instance: the instance which owns the device
5697 de12473a Iustin Pop
  @type device: L{objects.Disk}
5698 de12473a Iustin Pop
  @param device: the device to create
5699 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5700 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5701 de12473a Iustin Pop
  @type force_open: boolean
5702 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5703 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5704 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5705 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5706 de12473a Iustin Pop

5707 de12473a Iustin Pop
  """
5708 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5709 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5710 428958aa Iustin Pop
                                       instance.name, force_open, info)
5711 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5712 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5713 a8083063 Iustin Pop
  if device.physical_id is None:
5714 0959c824 Iustin Pop
    device.physical_id = result.payload
5715 a8083063 Iustin Pop
5716 a8083063 Iustin Pop
5717 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5718 923b1523 Iustin Pop
  """Generate a suitable LV name.
5719 923b1523 Iustin Pop

5720 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5721 923b1523 Iustin Pop

5722 923b1523 Iustin Pop
  """
5723 923b1523 Iustin Pop
  results = []
5724 923b1523 Iustin Pop
  for val in exts:
5725 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5726 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5727 923b1523 Iustin Pop
  return results
5728 923b1523 Iustin Pop
5729 923b1523 Iustin Pop
5730 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5731 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5732 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5733 a1f445d3 Iustin Pop

5734 a1f445d3 Iustin Pop
  """
5735 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5736 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5737 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5738 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5739 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5740 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5741 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5742 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5743 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5744 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5745 f9518d38 Iustin Pop
                                      shared_secret),
5746 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5747 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5748 a1f445d3 Iustin Pop
  return drbd_dev
5749 a1f445d3 Iustin Pop
5750 7c0d6283 Michael Hanselmann
5751 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5752 a8083063 Iustin Pop
                          instance_name, primary_node,
5753 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5754 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5755 e2a65344 Iustin Pop
                          base_index):
5756 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5757 a8083063 Iustin Pop

5758 a8083063 Iustin Pop
  """
5759 a8083063 Iustin Pop
  #TODO: compute space requirements
5760 a8083063 Iustin Pop
5761 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5762 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5763 08db7c5c Iustin Pop
  disks = []
5764 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5765 08db7c5c Iustin Pop
    pass
5766 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5767 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5768 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5769 923b1523 Iustin Pop
5770 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5771 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5772 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5773 e2a65344 Iustin Pop
      disk_index = idx + base_index
5774 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5775 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5776 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5777 6ec66eae Iustin Pop
                              mode=disk["mode"])
5778 08db7c5c Iustin Pop
      disks.append(disk_dev)
5779 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5780 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5781 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5782 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5783 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5784 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5785 08db7c5c Iustin Pop
5786 e6c1ff2f Iustin Pop
    names = []
5787 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5788 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5789 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5790 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5791 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5792 112050d9 Iustin Pop
      disk_index = idx + base_index
5793 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5794 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5795 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5796 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5797 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5798 08db7c5c Iustin Pop
      disks.append(disk_dev)
5799 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5800 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5801 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5802 0f1a06e3 Manuel Franceschini
5803 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5804 112050d9 Iustin Pop
      disk_index = idx + base_index
5805 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5806 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5807 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5808 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5809 43e99cff Guido Trotter
                                                         disk_index)),
5810 6ec66eae Iustin Pop
                              mode=disk["mode"])
5811 08db7c5c Iustin Pop
      disks.append(disk_dev)
5812 a8083063 Iustin Pop
  else:
5813 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5814 a8083063 Iustin Pop
  return disks
5815 a8083063 Iustin Pop
5816 a8083063 Iustin Pop
5817 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5818 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5819 3ecf6786 Iustin Pop

5820 3ecf6786 Iustin Pop
  """
5821 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5822 a0c3fea1 Michael Hanselmann
5823 a0c3fea1 Michael Hanselmann
5824 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5825 a8083063 Iustin Pop
  """Create all disks for an instance.
5826 a8083063 Iustin Pop

5827 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5828 a8083063 Iustin Pop

5829 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5830 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5831 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5832 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5833 bd315bfa Iustin Pop
  @type to_skip: list
5834 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5835 621b7678 Iustin Pop
  @type target_node: string
5836 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5837 e4376078 Iustin Pop
  @rtype: boolean
5838 e4376078 Iustin Pop
  @return: the success of the creation
5839 a8083063 Iustin Pop

5840 a8083063 Iustin Pop
  """
5841 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5842 621b7678 Iustin Pop
  if target_node is None:
5843 621b7678 Iustin Pop
    pnode = instance.primary_node
5844 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5845 621b7678 Iustin Pop
  else:
5846 621b7678 Iustin Pop
    pnode = target_node
5847 621b7678 Iustin Pop
    all_nodes = [pnode]
5848 a0c3fea1 Michael Hanselmann
5849 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5850 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5851 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5852 0f1a06e3 Manuel Franceschini
5853 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5854 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5855 0f1a06e3 Manuel Franceschini
5856 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5857 24991749 Iustin Pop
  # LUSetInstanceParams
5858 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5859 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5860 bd315bfa Iustin Pop
      continue
5861 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5862 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5863 a8083063 Iustin Pop
    #HARDCODE
5864 621b7678 Iustin Pop
    for node in all_nodes:
5865 428958aa Iustin Pop
      f_create = node == pnode
5866 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5867 a8083063 Iustin Pop
5868 a8083063 Iustin Pop
5869 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5870 a8083063 Iustin Pop
  """Remove all disks for an instance.
5871 a8083063 Iustin Pop

5872 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5873 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5874 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5875 a8083063 Iustin Pop
  with `_CreateDisks()`).
5876 a8083063 Iustin Pop

5877 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5878 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5879 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5880 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5881 621b7678 Iustin Pop
  @type target_node: string
5882 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5883 e4376078 Iustin Pop
  @rtype: boolean
5884 e4376078 Iustin Pop
  @return: the success of the removal
5885 a8083063 Iustin Pop

5886 a8083063 Iustin Pop
  """
5887 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5888 a8083063 Iustin Pop
5889 e1bc0878 Iustin Pop
  all_result = True
5890 a8083063 Iustin Pop
  for device in instance.disks:
5891 621b7678 Iustin Pop
    if target_node:
5892 621b7678 Iustin Pop
      edata = [(target_node, device)]
5893 621b7678 Iustin Pop
    else:
5894 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5895 621b7678 Iustin Pop
    for node, disk in edata:
5896 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5897 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5898 e1bc0878 Iustin Pop
      if msg:
5899 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5900 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5901 e1bc0878 Iustin Pop
        all_result = False
5902 0f1a06e3 Manuel Franceschini
5903 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5904 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5905 dfc2a24c Guido Trotter
    if target_node:
5906 dfc2a24c Guido Trotter
      tgt = target_node
5907 621b7678 Iustin Pop
    else:
5908 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5909 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5910 621b7678 Iustin Pop
    if result.fail_msg:
5911 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5912 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5913 e1bc0878 Iustin Pop
      all_result = False
5914 0f1a06e3 Manuel Franceschini
5915 e1bc0878 Iustin Pop
  return all_result
5916 a8083063 Iustin Pop
5917 a8083063 Iustin Pop
5918 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5919 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5920 e2fe6369 Iustin Pop

5921 e2fe6369 Iustin Pop
  """
5922 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5923 e2fe6369 Iustin Pop
  req_size_dict = {
5924 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5925 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5926 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5927 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5928 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5929 e2fe6369 Iustin Pop
  }
5930 e2fe6369 Iustin Pop
5931 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5932 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5933 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5934 e2fe6369 Iustin Pop
5935 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5936 e2fe6369 Iustin Pop
5937 e2fe6369 Iustin Pop
5938 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5939 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5940 74409b12 Iustin Pop

5941 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5942 74409b12 Iustin Pop
  used in both instance create and instance modify.
5943 74409b12 Iustin Pop

5944 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5945 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5946 74409b12 Iustin Pop
  @type nodenames: list
5947 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5948 74409b12 Iustin Pop
  @type hvname: string
5949 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5950 74409b12 Iustin Pop
  @type hvparams: dict
5951 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5952 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5953 74409b12 Iustin Pop

5954 74409b12 Iustin Pop
  """
5955 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5956 74409b12 Iustin Pop
                                                  hvname,
5957 74409b12 Iustin Pop
                                                  hvparams)
5958 74409b12 Iustin Pop
  for node in nodenames:
5959 781de953 Iustin Pop
    info = hvinfo[node]
5960 68c6f21c Iustin Pop
    if info.offline:
5961 68c6f21c Iustin Pop
      continue
5962 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5963 74409b12 Iustin Pop
5964 74409b12 Iustin Pop
5965 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5966 a8083063 Iustin Pop
  """Create an instance.
5967 a8083063 Iustin Pop

5968 a8083063 Iustin Pop
  """
5969 a8083063 Iustin Pop
  HPATH = "instance-add"
5970 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5971 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5972 08db7c5c Iustin Pop
              "mode", "start",
5973 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5974 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5975 7baf741d Guido Trotter
  REQ_BGL = False
5976 7baf741d Guido Trotter
5977 5f23e043 Iustin Pop
  def CheckArguments(self):
5978 5f23e043 Iustin Pop
    """Check arguments.
5979 5f23e043 Iustin Pop

5980 5f23e043 Iustin Pop
    """
5981 df4272e5 Iustin Pop
    # set optional parameters to none if they don't exist
5982 df4272e5 Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5983 df4272e5 Iustin Pop
      if not hasattr(self.op, attr):
5984 df4272e5 Iustin Pop
        setattr(self.op, attr, None)
5985 df4272e5 Iustin Pop
5986 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
5987 5f23e043 Iustin Pop
    # for tools
5988 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
5989 5f23e043 Iustin Pop
      self.op.name_check = True
5990 25a8792c Iustin Pop
    if not hasattr(self.op, "no_install"):
5991 25a8792c Iustin Pop
      self.op.no_install = False
5992 25a8792c Iustin Pop
    if self.op.no_install and self.op.start:
5993 25a8792c Iustin Pop
      self.LogInfo("No-installation mode selected, disabling startup")
5994 25a8792c Iustin Pop
      self.op.start = False
5995 44caf5a8 Iustin Pop
    # validate/normalize the instance name
5996 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
5997 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
5998 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
5999 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6000 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
6001 cb7c0198 Iustin Pop
    if (self.op.disk_template == constants.DT_FILE and
6002 cb7c0198 Iustin Pop
        not constants.ENABLE_FILE_STORAGE):
6003 cb7c0198 Iustin Pop
      raise errors.OpPrereqError("File storage disabled at configure time",
6004 cb7c0198 Iustin Pop
                                 errors.ECODE_INVAL)
6005 c3589cf8 Iustin Pop
    # check disk information: either all adopt, or no adopt
6006 c3589cf8 Iustin Pop
    has_adopt = has_no_adopt = False
6007 c3589cf8 Iustin Pop
    for disk in self.op.disks:
6008 c3589cf8 Iustin Pop
      if "adopt" in disk:
6009 c3589cf8 Iustin Pop
        has_adopt = True
6010 c3589cf8 Iustin Pop
      else:
6011 c3589cf8 Iustin Pop
        has_no_adopt = True
6012 c3589cf8 Iustin Pop
    if has_adopt and has_no_adopt:
6013 c3589cf8 Iustin Pop
      raise errors.OpPrereqError("Either all disks have are adoped or none is",
6014 c3589cf8 Iustin Pop
                                 errors.ECODE_INVAL)
6015 c3589cf8 Iustin Pop
    if has_adopt:
6016 c3589cf8 Iustin Pop
      if self.op.disk_template != constants.DT_PLAIN:
6017 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption is only supported for the"
6018 c3589cf8 Iustin Pop
                                   " 'plain' disk template",
6019 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6020 c3589cf8 Iustin Pop
      if self.op.iallocator is not None:
6021 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6022 c3589cf8 Iustin Pop
                                   " iallocator script", errors.ECODE_INVAL)
6023 c3589cf8 Iustin Pop
      if self.op.mode == constants.INSTANCE_IMPORT:
6024 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed for"
6025 c3589cf8 Iustin Pop
                                   " instance import", errors.ECODE_INVAL)
6026 c3589cf8 Iustin Pop
6027 c3589cf8 Iustin Pop
    self.adopt_disks = has_adopt
6028 5f23e043 Iustin Pop
6029 7baf741d Guido Trotter
  def ExpandNames(self):
6030 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
6031 7baf741d Guido Trotter

6032 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
6033 7baf741d Guido Trotter

6034 7baf741d Guido Trotter
    """
6035 7baf741d Guido Trotter
    self.needed_locks = {}
6036 7baf741d Guido Trotter
6037 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
6038 4b2f38dd Iustin Pop
6039 7baf741d Guido Trotter
    # verify creation mode
6040 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
6041 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
6042 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
6043 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
6044 4b2f38dd Iustin Pop
6045 7baf741d Guido Trotter
    # disk template and mirror node verification
6046 5d55819e Iustin Pop
    _CheckDiskTemplate(self.op.disk_template)
6047 7baf741d Guido Trotter
6048 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
6049 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
6050 4b2f38dd Iustin Pop
6051 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6052 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
6053 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
6054 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6055 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
6056 5c983ee5 Iustin Pop
                                  ",".join(enabled_hvs)),
6057 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
6058 4b2f38dd Iustin Pop
6059 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
6060 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6061 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
6062 8705eb96 Iustin Pop
                                  self.op.hvparams)
6063 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6064 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
6065 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
6066 7736a5f2 Iustin Pop
    # check that we don't specify global parameters on an instance
6067 7736a5f2 Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
6068 6785674e Iustin Pop
6069 338e51e8 Iustin Pop
    # fill and remember the beparams dict
6070 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6071 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
6072 338e51e8 Iustin Pop
                                    self.op.beparams)
6073 338e51e8 Iustin Pop
6074 7baf741d Guido Trotter
    #### instance parameters check
6075 7baf741d Guido Trotter
6076 7baf741d Guido Trotter
    # instance name verification
6077 5f23e043 Iustin Pop
    if self.op.name_check:
6078 5f23e043 Iustin Pop
      hostname1 = utils.GetHostInfo(self.op.instance_name)
6079 5f23e043 Iustin Pop
      self.op.instance_name = instance_name = hostname1.name
6080 5f23e043 Iustin Pop
      # used in CheckPrereq for ip ping check
6081 5f23e043 Iustin Pop
      self.check_ip = hostname1.ip
6082 5f23e043 Iustin Pop
    else:
6083 5f23e043 Iustin Pop
      instance_name = self.op.instance_name
6084 5f23e043 Iustin Pop
      self.check_ip = None
6085 7baf741d Guido Trotter
6086 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
6087 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
6088 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
6089 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6090 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
6091 7baf741d Guido Trotter
6092 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6093 7baf741d Guido Trotter
6094 08db7c5c Iustin Pop
    # NIC buildup
6095 08db7c5c Iustin Pop
    self.nics = []
6096 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
6097 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
6098 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
6099 9dce4771 Guido Trotter
      if nic_mode is None:
6100 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6101 9dce4771 Guido Trotter
6102 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
6103 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6104 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
6105 9dce4771 Guido Trotter
      else:
6106 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
6107 9dce4771 Guido Trotter
6108 08db7c5c Iustin Pop
      # ip validity checks
6109 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
6110 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
6111 08db7c5c Iustin Pop
        nic_ip = None
6112 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
6113 5f23e043 Iustin Pop
        if not self.op.name_check:
6114 5f23e043 Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
6115 5f23e043 Iustin Pop
                                     " have been skipped. Aborting.",
6116 5f23e043 Iustin Pop
                                     errors.ECODE_INVAL)
6117 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
6118 08db7c5c Iustin Pop
      else:
6119 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
6120 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6121 5c983ee5 Iustin Pop
                                     " like a valid IP" % ip,
6122 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6123 08db7c5c Iustin Pop
        nic_ip = ip
6124 08db7c5c Iustin Pop
6125 b8716596 Michael Hanselmann
      # TODO: check the ip address for uniqueness
6126 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6127 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6128 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6129 9dce4771 Guido Trotter
6130 08db7c5c Iustin Pop
      # MAC address verification
6131 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
6132 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6133 82187135 Renรฉ Nussbaumer
        mac = utils.NormalizeAndValidateMac(mac)
6134 82187135 Renรฉ Nussbaumer
6135 82187135 Renรฉ Nussbaumer
        try:
6136 82187135 Renรฉ Nussbaumer
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6137 82187135 Renรฉ Nussbaumer
        except errors.ReservationError:
6138 82187135 Renรฉ Nussbaumer
          raise errors.OpPrereqError("MAC address %s already in use"
6139 82187135 Renรฉ Nussbaumer
                                     " in cluster" % mac,
6140 82187135 Renรฉ Nussbaumer
                                     errors.ECODE_NOTUNIQUE)
6141 87e43988 Iustin Pop
6142 08db7c5c Iustin Pop
      # bridge verification
6143 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
6144 9dce4771 Guido Trotter
      link = nic.get("link", None)
6145 9dce4771 Guido Trotter
      if bridge and link:
6146 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6147 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
6148 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
6149 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
6150 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6151 9dce4771 Guido Trotter
      elif bridge:
6152 9dce4771 Guido Trotter
        link = bridge
6153 9dce4771 Guido Trotter
6154 9dce4771 Guido Trotter
      nicparams = {}
6155 9dce4771 Guido Trotter
      if nic_mode_req:
6156 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
6157 9dce4771 Guido Trotter
      if link:
6158 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
6159 9dce4771 Guido Trotter
6160 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
6161 9dce4771 Guido Trotter
                                      nicparams)
6162 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
6163 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
6164 08db7c5c Iustin Pop
6165 08db7c5c Iustin Pop
    # disk checks/pre-build
6166 08db7c5c Iustin Pop
    self.disks = []
6167 08db7c5c Iustin Pop
    for disk in self.op.disks:
6168 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
6169 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
6170 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
6171 5c983ee5 Iustin Pop
                                   mode, errors.ECODE_INVAL)
6172 08db7c5c Iustin Pop
      size = disk.get("size", None)
6173 08db7c5c Iustin Pop
      if size is None:
6174 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
6175 08db7c5c Iustin Pop
      try:
6176 08db7c5c Iustin Pop
        size = int(size)
6177 691744c4 Iustin Pop
      except (TypeError, ValueError):
6178 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
6179 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6180 c3589cf8 Iustin Pop
      new_disk = {"size": size, "mode": mode}
6181 c3589cf8 Iustin Pop
      if "adopt" in disk:
6182 c3589cf8 Iustin Pop
        new_disk["adopt"] = disk["adopt"]
6183 c3589cf8 Iustin Pop
      self.disks.append(new_disk)
6184 08db7c5c Iustin Pop
6185 7baf741d Guido Trotter
    # file storage checks
6186 7baf741d Guido Trotter
    if (self.op.file_driver and
6187 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
6188 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6189 5c983ee5 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
6190 7baf741d Guido Trotter
6191 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6192 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
6193 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6194 7baf741d Guido Trotter
6195 7baf741d Guido Trotter
    ### Node/iallocator related checks
6196 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
6197 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
6198 5c983ee5 Iustin Pop
                                 " node must be given",
6199 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6200 7baf741d Guido Trotter
6201 7baf741d Guido Trotter
    if self.op.iallocator:
6202 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6203 7baf741d Guido Trotter
    else:
6204 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6205 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
6206 7baf741d Guido Trotter
      if self.op.snode is not None:
6207 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6208 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
6209 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6210 7baf741d Guido Trotter
6211 7baf741d Guido Trotter
    # in case of import lock the source node too
6212 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6213 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
6214 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
6215 7baf741d Guido Trotter
6216 b9322a9f Guido Trotter
      if src_path is None:
6217 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
6218 b9322a9f Guido Trotter
6219 b9322a9f Guido Trotter
      if src_node is None:
6220 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6221 b9322a9f Guido Trotter
        self.op.src_node = None
6222 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
6223 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
6224 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
6225 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6226 b9322a9f Guido Trotter
      else:
6227 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6228 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6229 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6230 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
6231 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
6232 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6233 7baf741d Guido Trotter
6234 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
6235 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
6236 f2c05717 Guido Trotter
      # works again!
6237 f2c05717 Guido Trotter
      self.op.force_variant = True
6238 f2c05717 Guido Trotter
6239 25a8792c Iustin Pop
      if self.op.no_install:
6240 25a8792c Iustin Pop
        self.LogInfo("No-installation mode has no effect during import")
6241 25a8792c Iustin Pop
6242 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
6243 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
6244 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
6245 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6246 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
6247 a8083063 Iustin Pop
6248 538475ca Iustin Pop
  def _RunAllocator(self):
6249 538475ca Iustin Pop
    """Run the allocator based on input opcode.
6250 538475ca Iustin Pop

6251 538475ca Iustin Pop
    """
6252 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
6253 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
6254 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6255 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
6256 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
6257 d1c2dd75 Iustin Pop
                     tags=[],
6258 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
6259 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
6260 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
6261 08db7c5c Iustin Pop
                     disks=self.disks,
6262 d1c2dd75 Iustin Pop
                     nics=nics,
6263 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
6264 29859cb7 Iustin Pop
                     )
6265 d1c2dd75 Iustin Pop
6266 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
6267 d1c2dd75 Iustin Pop
6268 d1c2dd75 Iustin Pop
    if not ial.success:
6269 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
6270 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
6271 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
6272 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6273 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6274 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6275 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
6276 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
6277 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
6278 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
6279 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6280 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
6281 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
6282 27579978 Iustin Pop
    if ial.required_nodes == 2:
6283 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
6284 538475ca Iustin Pop
6285 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6286 a8083063 Iustin Pop
    """Build hooks env.
6287 a8083063 Iustin Pop

6288 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
6289 a8083063 Iustin Pop

6290 a8083063 Iustin Pop
    """
6291 a8083063 Iustin Pop
    env = {
6292 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
6293 a8083063 Iustin Pop
      }
6294 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6295 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
6296 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
6297 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
6298 396e1b78 Michael Hanselmann
6299 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
6300 2c2690c9 Iustin Pop
      name=self.op.instance_name,
6301 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
6302 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
6303 4978db17 Iustin Pop
      status=self.op.start,
6304 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
6305 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
6306 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
6307 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
6308 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
6309 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
6310 67fc3042 Iustin Pop
      bep=self.be_full,
6311 67fc3042 Iustin Pop
      hvp=self.hv_full,
6312 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6313 396e1b78 Michael Hanselmann
    ))
6314 a8083063 Iustin Pop
6315 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6316 a8083063 Iustin Pop
          self.secondaries)
6317 a8083063 Iustin Pop
    return env, nl, nl
6318 a8083063 Iustin Pop
6319 a8083063 Iustin Pop
6320 a8083063 Iustin Pop
  def CheckPrereq(self):
6321 a8083063 Iustin Pop
    """Check prerequisites.
6322 a8083063 Iustin Pop

6323 a8083063 Iustin Pop
    """
6324 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6325 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6326 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6327 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6328 eedc99de Manuel Franceschini
6329 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6330 7baf741d Guido Trotter
      src_node = self.op.src_node
6331 7baf741d Guido Trotter
      src_path = self.op.src_path
6332 a8083063 Iustin Pop
6333 c0cbdc67 Guido Trotter
      if src_node is None:
6334 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6335 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
6336 c0cbdc67 Guido Trotter
        found = False
6337 c0cbdc67 Guido Trotter
        for node in exp_list:
6338 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
6339 1b7bfbb7 Iustin Pop
            continue
6340 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
6341 c0cbdc67 Guido Trotter
            found = True
6342 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
6343 c4feafe8 Iustin Pop
            self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6344 c4feafe8 Iustin Pop
                                                         src_path)
6345 c0cbdc67 Guido Trotter
            break
6346 c0cbdc67 Guido Trotter
        if not found:
6347 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
6348 5c983ee5 Iustin Pop
                                      src_path, errors.ECODE_INVAL)
6349 c0cbdc67 Guido Trotter
6350 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
6351 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
6352 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
6353 a8083063 Iustin Pop
6354 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6355 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
6356 5c983ee5 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config",
6357 5c983ee5 Iustin Pop
                                     errors.ECODE_ENVIRON)
6358 a8083063 Iustin Pop
6359 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
6360 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
6361 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6362 5c983ee5 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION),
6363 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
6364 a8083063 Iustin Pop
6365 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6366 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6367 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6368 09acf207 Guido Trotter
      if instance_disks < export_disks:
6369 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6370 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6371 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6372 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6373 a8083063 Iustin Pop
6374 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
6375 09acf207 Guido Trotter
      disk_images = []
6376 09acf207 Guido Trotter
      for idx in range(export_disks):
6377 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6378 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6379 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6380 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6381 c4feafe8 Iustin Pop
          image = utils.PathJoin(src_path, export_name)
6382 09acf207 Guido Trotter
          disk_images.append(image)
6383 09acf207 Guido Trotter
        else:
6384 09acf207 Guido Trotter
          disk_images.append(False)
6385 09acf207 Guido Trotter
6386 09acf207 Guido Trotter
      self.src_images = disk_images
6387 901a65c1 Iustin Pop
6388 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6389 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
6390 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
6391 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6392 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6393 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6394 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6395 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6396 bc89efc3 Guido Trotter
6397 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6398 901a65c1 Iustin Pop
6399 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6400 901a65c1 Iustin Pop
    if self.op.ip_check:
6401 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6402 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6403 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6404 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6405 901a65c1 Iustin Pop
6406 295728df Guido Trotter
    #### mac address generation
6407 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6408 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6409 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6410 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6411 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6412 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6413 295728df Guido Trotter
    # creation job will fail.
6414 295728df Guido Trotter
    for nic in self.nics:
6415 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6416 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6417 295728df Guido Trotter
6418 538475ca Iustin Pop
    #### allocator run
6419 538475ca Iustin Pop
6420 538475ca Iustin Pop
    if self.op.iallocator is not None:
6421 538475ca Iustin Pop
      self._RunAllocator()
6422 0f1a06e3 Manuel Franceschini
6423 901a65c1 Iustin Pop
    #### node related checks
6424 901a65c1 Iustin Pop
6425 901a65c1 Iustin Pop
    # check primary node
6426 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6427 7baf741d Guido Trotter
    assert self.pnode is not None, \
6428 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6429 7527a8a4 Iustin Pop
    if pnode.offline:
6430 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6431 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6432 733a2b6a Iustin Pop
    if pnode.drained:
6433 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6434 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6435 7527a8a4 Iustin Pop
6436 901a65c1 Iustin Pop
    self.secondaries = []
6437 901a65c1 Iustin Pop
6438 901a65c1 Iustin Pop
    # mirror node verification
6439 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6440 7baf741d Guido Trotter
      if self.op.snode is None:
6441 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6442 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6443 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6444 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6445 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6446 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6447 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6448 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6449 a8083063 Iustin Pop
6450 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6451 6785674e Iustin Pop
6452 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6453 08db7c5c Iustin Pop
                                self.disks)
6454 ed1ebc60 Guido Trotter
6455 c3589cf8 Iustin Pop
    # Check lv size requirements, if not adopting
6456 c3589cf8 Iustin Pop
    if req_size is not None and not self.adopt_disks:
6457 701384a9 Iustin Pop
      _CheckNodesFreeDisk(self, nodenames, req_size)
6458 ed1ebc60 Guido Trotter
6459 c3589cf8 Iustin Pop
    if self.adopt_disks: # instead, we must check the adoption data
6460 c3589cf8 Iustin Pop
      all_lvs = set([i["adopt"] for i in self.disks])
6461 c3589cf8 Iustin Pop
      if len(all_lvs) != len(self.disks):
6462 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
6463 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6464 c3589cf8 Iustin Pop
      for lv_name in all_lvs:
6465 c3589cf8 Iustin Pop
        try:
6466 c3589cf8 Iustin Pop
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
6467 c3589cf8 Iustin Pop
        except errors.ReservationError:
6468 c3589cf8 Iustin Pop
          raise errors.OpPrereqError("LV named %s used by another instance" %
6469 c3589cf8 Iustin Pop
                                     lv_name, errors.ECODE_NOTUNIQUE)
6470 c3589cf8 Iustin Pop
6471 c3589cf8 Iustin Pop
      node_lvs = self.rpc.call_lv_list([pnode.name],
6472 c3589cf8 Iustin Pop
                                       self.cfg.GetVGName())[pnode.name]
6473 c3589cf8 Iustin Pop
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
6474 c3589cf8 Iustin Pop
      node_lvs = node_lvs.payload
6475 c3589cf8 Iustin Pop
      delta = all_lvs.difference(node_lvs.keys())
6476 c3589cf8 Iustin Pop
      if delta:
6477 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
6478 c3589cf8 Iustin Pop
                                   utils.CommaJoin(delta),
6479 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6480 c3589cf8 Iustin Pop
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
6481 c3589cf8 Iustin Pop
      if online_lvs:
6482 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Online logical volumes found, cannot"
6483 c3589cf8 Iustin Pop
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
6484 c3589cf8 Iustin Pop
                                   errors.ECODE_STATE)
6485 c3589cf8 Iustin Pop
      # update the size of disk based on what is found
6486 c3589cf8 Iustin Pop
      for dsk in self.disks:
6487 c3589cf8 Iustin Pop
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
6488 c3589cf8 Iustin Pop
6489 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6490 6785674e Iustin Pop
6491 231cd901 Iustin Pop
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
6492 a8083063 Iustin Pop
6493 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6494 a8083063 Iustin Pop
6495 49ce1563 Iustin Pop
    # memory check on primary node
6496 49ce1563 Iustin Pop
    if self.op.start:
6497 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6498 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6499 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6500 338e51e8 Iustin Pop
                           self.op.hypervisor)
6501 49ce1563 Iustin Pop
6502 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6503 08896026 Iustin Pop
6504 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6505 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6506 a8083063 Iustin Pop

6507 a8083063 Iustin Pop
    """
6508 a8083063 Iustin Pop
    instance = self.op.instance_name
6509 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6510 a8083063 Iustin Pop
6511 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6512 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6513 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6514 2a6469d5 Alexander Schreiber
    else:
6515 2a6469d5 Alexander Schreiber
      network_port = None
6516 58acb49d Alexander Schreiber
6517 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
6518 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
6519 31a853d2 Iustin Pop
6520 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
6521 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
6522 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
6523 2c313123 Manuel Franceschini
    else:
6524 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
6525 2c313123 Manuel Franceschini
6526 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
6527 c4feafe8 Iustin Pop
    file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6528 c4feafe8 Iustin Pop
                                      string_file_storage_dir, instance)
6529 0f1a06e3 Manuel Franceschini
6530 0f1a06e3 Manuel Franceschini
6531 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6532 a8083063 Iustin Pop
                                  self.op.disk_template,
6533 a8083063 Iustin Pop
                                  instance, pnode_name,
6534 08db7c5c Iustin Pop
                                  self.secondaries,
6535 08db7c5c Iustin Pop
                                  self.disks,
6536 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6537 e2a65344 Iustin Pop
                                  self.op.file_driver,
6538 e2a65344 Iustin Pop
                                  0)
6539 a8083063 Iustin Pop
6540 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6541 a8083063 Iustin Pop
                            primary_node=pnode_name,
6542 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6543 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6544 4978db17 Iustin Pop
                            admin_up=False,
6545 58acb49d Alexander Schreiber
                            network_port=network_port,
6546 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6547 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6548 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6549 a8083063 Iustin Pop
                            )
6550 a8083063 Iustin Pop
6551 c3589cf8 Iustin Pop
    if self.adopt_disks:
6552 c3589cf8 Iustin Pop
      # rename LVs to the newly-generated names; we need to construct
6553 c3589cf8 Iustin Pop
      # 'fake' LV disks with the old data, plus the new unique_id
6554 c3589cf8 Iustin Pop
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
6555 c3589cf8 Iustin Pop
      rename_to = []
6556 c3589cf8 Iustin Pop
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
6557 c3589cf8 Iustin Pop
        rename_to.append(t_dsk.logical_id)
6558 c3589cf8 Iustin Pop
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
6559 c3589cf8 Iustin Pop
        self.cfg.SetDiskID(t_dsk, pnode_name)
6560 c3589cf8 Iustin Pop
      result = self.rpc.call_blockdev_rename(pnode_name,
6561 c3589cf8 Iustin Pop
                                             zip(tmp_disks, rename_to))
6562 c3589cf8 Iustin Pop
      result.Raise("Failed to rename adoped LVs")
6563 c3589cf8 Iustin Pop
    else:
6564 c3589cf8 Iustin Pop
      feedback_fn("* creating instance disks...")
6565 796cab27 Iustin Pop
      try:
6566 c3589cf8 Iustin Pop
        _CreateDisks(self, iobj)
6567 c3589cf8 Iustin Pop
      except errors.OpExecError:
6568 c3589cf8 Iustin Pop
        self.LogWarning("Device creation failed, reverting...")
6569 c3589cf8 Iustin Pop
        try:
6570 c3589cf8 Iustin Pop
          _RemoveDisks(self, iobj)
6571 c3589cf8 Iustin Pop
        finally:
6572 c3589cf8 Iustin Pop
          self.cfg.ReleaseDRBDMinors(instance)
6573 c3589cf8 Iustin Pop
          raise
6574 a8083063 Iustin Pop
6575 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6576 a8083063 Iustin Pop
6577 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6578 0debfb35 Guido Trotter
6579 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6580 7baf741d Guido Trotter
    # added the instance to the config
6581 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6582 e36e96b4 Guido Trotter
    # Unlock all the nodes
6583 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6584 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6585 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6586 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6587 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6588 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6589 9c8971d7 Guido Trotter
    else:
6590 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6591 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6592 a8083063 Iustin Pop
6593 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6594 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6595 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6596 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6597 a8083063 Iustin Pop
      time.sleep(15)
6598 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6599 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6600 a8083063 Iustin Pop
    else:
6601 a8083063 Iustin Pop
      disk_abort = False
6602 a8083063 Iustin Pop
6603 a8083063 Iustin Pop
    if disk_abort:
6604 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6605 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6606 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6607 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6608 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6609 3ecf6786 Iustin Pop
                               " this instance")
6610 a8083063 Iustin Pop
6611 c3589cf8 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
6612 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6613 25a8792c Iustin Pop
        if not self.op.no_install:
6614 25a8792c Iustin Pop
          feedback_fn("* running the instance OS create scripts...")
6615 25a8792c Iustin Pop
          # FIXME: pass debug option from opcode to backend
6616 25a8792c Iustin Pop
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6617 25a8792c Iustin Pop
                                                 self.op.debug_level)
6618 25a8792c Iustin Pop
          result.Raise("Could not add os for instance %s"
6619 25a8792c Iustin Pop
                       " on node %s" % (instance, pnode_name))
6620 a8083063 Iustin Pop
6621 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6622 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6623 a8083063 Iustin Pop
        src_node = self.op.src_node
6624 09acf207 Guido Trotter
        src_images = self.src_images
6625 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6626 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6627 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6628 09acf207 Guido Trotter
                                                         src_node, src_images,
6629 dd713605 Iustin Pop
                                                         cluster_name,
6630 dd713605 Iustin Pop
                                                         self.op.debug_level)
6631 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6632 944bf548 Iustin Pop
        if msg:
6633 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6634 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6635 a8083063 Iustin Pop
      else:
6636 a8083063 Iustin Pop
        # also checked in the prereq part
6637 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6638 3ecf6786 Iustin Pop
                                     % self.op.mode)
6639 a8083063 Iustin Pop
6640 a8083063 Iustin Pop
    if self.op.start:
6641 4978db17 Iustin Pop
      iobj.admin_up = True
6642 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6643 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6644 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6645 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6646 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6647 a8083063 Iustin Pop
6648 08896026 Iustin Pop
    return list(iobj.all_nodes)
6649 08896026 Iustin Pop
6650 a8083063 Iustin Pop
6651 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6652 a8083063 Iustin Pop
  """Connect to an instance's console.
6653 a8083063 Iustin Pop

6654 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6655 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6656 a8083063 Iustin Pop
  console.
6657 a8083063 Iustin Pop

6658 a8083063 Iustin Pop
  """
6659 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6660 8659b73e Guido Trotter
  REQ_BGL = False
6661 8659b73e Guido Trotter
6662 8659b73e Guido Trotter
  def ExpandNames(self):
6663 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6664 a8083063 Iustin Pop
6665 a8083063 Iustin Pop
  def CheckPrereq(self):
6666 a8083063 Iustin Pop
    """Check prerequisites.
6667 a8083063 Iustin Pop

6668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6669 a8083063 Iustin Pop

6670 a8083063 Iustin Pop
    """
6671 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6672 8659b73e Guido Trotter
    assert self.instance is not None, \
6673 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6674 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6675 a8083063 Iustin Pop
6676 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6677 a8083063 Iustin Pop
    """Connect to the console of an instance
6678 a8083063 Iustin Pop

6679 a8083063 Iustin Pop
    """
6680 a8083063 Iustin Pop
    instance = self.instance
6681 a8083063 Iustin Pop
    node = instance.primary_node
6682 a8083063 Iustin Pop
6683 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6684 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6685 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6686 a8083063 Iustin Pop
6687 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6688 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6689 a8083063 Iustin Pop
6690 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6691 a8083063 Iustin Pop
6692 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6693 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6694 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6695 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6696 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6697 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6698 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6699 b047857b Michael Hanselmann
6700 82122173 Iustin Pop
    # build ssh cmdline
6701 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6702 a8083063 Iustin Pop
6703 a8083063 Iustin Pop
6704 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6705 a8083063 Iustin Pop
  """Replace the disks of an instance.
6706 a8083063 Iustin Pop

6707 a8083063 Iustin Pop
  """
6708 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6709 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6710 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6711 efd990e4 Guido Trotter
  REQ_BGL = False
6712 efd990e4 Guido Trotter
6713 7e9366f7 Iustin Pop
  def CheckArguments(self):
6714 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6715 efd990e4 Guido Trotter
      self.op.remote_node = None
6716 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6717 7e9366f7 Iustin Pop
      self.op.iallocator = None
6718 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6719 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6720 7e9366f7 Iustin Pop
6721 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6722 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6723 7e9366f7 Iustin Pop
6724 7e9366f7 Iustin Pop
  def ExpandNames(self):
6725 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6726 7e9366f7 Iustin Pop
6727 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6728 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6729 2bb5c911 Michael Hanselmann
6730 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6731 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6732 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6733 2bb5c911 Michael Hanselmann
6734 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6735 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6736 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6737 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6738 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6739 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6740 2bb5c911 Michael Hanselmann
6741 efd990e4 Guido Trotter
    else:
6742 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6743 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6744 efd990e4 Guido Trotter
6745 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6746 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6747 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6748 c68174b6 Michael Hanselmann
6749 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6750 2bb5c911 Michael Hanselmann
6751 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6752 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6753 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6754 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6755 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6756 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6757 a8083063 Iustin Pop
6758 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6759 a8083063 Iustin Pop
    """Build hooks env.
6760 a8083063 Iustin Pop

6761 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6762 a8083063 Iustin Pop

6763 a8083063 Iustin Pop
    """
6764 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6765 a8083063 Iustin Pop
    env = {
6766 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6767 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6768 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6769 a8083063 Iustin Pop
      }
6770 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6771 0834c866 Iustin Pop
    nl = [
6772 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6773 2bb5c911 Michael Hanselmann
      instance.primary_node,
6774 0834c866 Iustin Pop
      ]
6775 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6776 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6777 a8083063 Iustin Pop
    return env, nl, nl
6778 a8083063 Iustin Pop
6779 2bb5c911 Michael Hanselmann
6780 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6781 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6782 7ffc5a86 Michael Hanselmann

6783 7ffc5a86 Michael Hanselmann
  """
6784 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6785 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6786 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6787 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6788 7ffc5a86 Michael Hanselmann
6789 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6790 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6791 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6792 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6793 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6794 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6795 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6796 7ffc5a86 Michael Hanselmann
6797 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6798 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6799 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6800 7ffc5a86 Michael Hanselmann
6801 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6802 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6803 7ffc5a86 Michael Hanselmann
6804 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6805 7ffc5a86 Michael Hanselmann
6806 7ffc5a86 Michael Hanselmann
    # Declare node locks
6807 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6808 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6809 7ffc5a86 Michael Hanselmann
6810 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6811 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6812 7ffc5a86 Michael Hanselmann
6813 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6814 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6815 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6816 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6817 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6818 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6819 7ffc5a86 Michael Hanselmann
6820 7ffc5a86 Michael Hanselmann
    else:
6821 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6822 7ffc5a86 Michael Hanselmann
6823 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6824 7ffc5a86 Michael Hanselmann
    # node
6825 7ffc5a86 Michael Hanselmann
    names = []
6826 3a012b41 Michael Hanselmann
    tasklets = []
6827 7ffc5a86 Michael Hanselmann
6828 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6829 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6830 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6831 7ffc5a86 Michael Hanselmann
6832 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6833 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
6834 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
6835 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6836 7ffc5a86 Michael Hanselmann
6837 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6838 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6839 7ffc5a86 Michael Hanselmann
6840 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6841 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6842 7ffc5a86 Michael Hanselmann
6843 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6844 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6845 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6846 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6847 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6848 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6849 7ffc5a86 Michael Hanselmann
6850 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6851 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6852 7ffc5a86 Michael Hanselmann

6853 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6854 7ffc5a86 Michael Hanselmann

6855 7ffc5a86 Michael Hanselmann
    """
6856 7ffc5a86 Michael Hanselmann
    env = {
6857 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6858 7ffc5a86 Michael Hanselmann
      }
6859 7ffc5a86 Michael Hanselmann
6860 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6861 7ffc5a86 Michael Hanselmann
6862 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6863 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6864 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6865 7ffc5a86 Michael Hanselmann
6866 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6867 7ffc5a86 Michael Hanselmann
6868 7ffc5a86 Michael Hanselmann
6869 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6870 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6871 2bb5c911 Michael Hanselmann

6872 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6873 2bb5c911 Michael Hanselmann

6874 2bb5c911 Michael Hanselmann
  """
6875 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6876 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
6877 2bb5c911 Michael Hanselmann
    """Initializes this class.
6878 2bb5c911 Michael Hanselmann

6879 2bb5c911 Michael Hanselmann
    """
6880 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6881 464243a7 Michael Hanselmann
6882 2bb5c911 Michael Hanselmann
    # Parameters
6883 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6884 2bb5c911 Michael Hanselmann
    self.mode = mode
6885 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6886 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6887 2bb5c911 Michael Hanselmann
    self.disks = disks
6888 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
6889 7ea7bcf6 Iustin Pop
    self.early_release = early_release
6890 2bb5c911 Michael Hanselmann
6891 2bb5c911 Michael Hanselmann
    # Runtime data
6892 2bb5c911 Michael Hanselmann
    self.instance = None
6893 2bb5c911 Michael Hanselmann
    self.new_node = None
6894 2bb5c911 Michael Hanselmann
    self.target_node = None
6895 2bb5c911 Michael Hanselmann
    self.other_node = None
6896 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6897 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6898 2bb5c911 Michael Hanselmann
6899 2bb5c911 Michael Hanselmann
  @staticmethod
6900 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6901 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6902 c68174b6 Michael Hanselmann

6903 c68174b6 Michael Hanselmann
    """
6904 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6905 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6906 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6907 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6908 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6909 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
6910 02a00186 Michael Hanselmann
6911 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6912 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6913 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
6914 02a00186 Michael Hanselmann
6915 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6916 02a00186 Michael Hanselmann
      # Not replacing the secondary
6917 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6918 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6919 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
6920 2bb5c911 Michael Hanselmann
6921 2bb5c911 Michael Hanselmann
  @staticmethod
6922 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6923 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6924 2bb5c911 Michael Hanselmann

6925 2bb5c911 Michael Hanselmann
    """
6926 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6927 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6928 2bb5c911 Michael Hanselmann
                     name=instance_name,
6929 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6930 2bb5c911 Michael Hanselmann
6931 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6932 2bb5c911 Michael Hanselmann
6933 2bb5c911 Michael Hanselmann
    if not ial.success:
6934 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6935 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
6936 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6937 2bb5c911 Michael Hanselmann
6938 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6939 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6940 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6941 d984846d Iustin Pop
                                 (iallocator_name,
6942 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
6943 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6944 2bb5c911 Michael Hanselmann
6945 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
6946 2bb5c911 Michael Hanselmann
6947 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6948 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6949 2bb5c911 Michael Hanselmann
6950 2bb5c911 Michael Hanselmann
    return remote_node_name
6951 2bb5c911 Michael Hanselmann
6952 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6953 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6954 2d9005d8 Michael Hanselmann
                                    node_name, True)
6955 942be002 Michael Hanselmann
6956 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6957 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6958 2bb5c911 Michael Hanselmann

6959 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6960 2bb5c911 Michael Hanselmann

6961 2bb5c911 Michael Hanselmann
    """
6962 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6963 e9022531 Iustin Pop
    assert instance is not None, \
6964 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
6965 2bb5c911 Michael Hanselmann
6966 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
6967 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6968 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
6969 a8083063 Iustin Pop
6970 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6971 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6972 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6973 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
6974 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6975 a8083063 Iustin Pop
6976 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
6977 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
6978 94a1b377 Michael Hanselmann
6979 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
6980 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
6981 94a1b377 Michael Hanselmann

6982 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
6983 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
6984 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
6985 94a1b377 Michael Hanselmann
    account.
6986 94a1b377 Michael Hanselmann

6987 94a1b377 Michael Hanselmann
    """
6988 94a1b377 Michael Hanselmann
    instance = self.instance
6989 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
6990 a9e0c397 Iustin Pop
6991 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6992 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6993 2bb5c911 Michael Hanselmann
    else:
6994 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6995 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
6996 b6e82a65 Iustin Pop
6997 a9e0c397 Iustin Pop
    if remote_node is not None:
6998 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6999 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
7000 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
7001 a9e0c397 Iustin Pop
    else:
7002 a9e0c397 Iustin Pop
      self.remote_node_info = None
7003 2bb5c911 Michael Hanselmann
7004 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
7005 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
7006 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
7007 2bb5c911 Michael Hanselmann
7008 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
7009 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
7010 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
7011 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7012 7e9366f7 Iustin Pop
7013 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7014 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
7015 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7016 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7017 942be002 Michael Hanselmann
7018 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
7019 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7020 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7021 942be002 Michael Hanselmann
7022 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
7023 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7024 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
7025 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
7026 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7027 942be002 Michael Hanselmann
7028 942be002 Michael Hanselmann
      if faulty_primary:
7029 942be002 Michael Hanselmann
        self.disks = faulty_primary
7030 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7031 942be002 Michael Hanselmann
        self.other_node = secondary_node
7032 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7033 942be002 Michael Hanselmann
      elif faulty_secondary:
7034 942be002 Michael Hanselmann
        self.disks = faulty_secondary
7035 942be002 Michael Hanselmann
        self.target_node = secondary_node
7036 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7037 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7038 942be002 Michael Hanselmann
      else:
7039 942be002 Michael Hanselmann
        self.disks = []
7040 942be002 Michael Hanselmann
        check_nodes = []
7041 942be002 Michael Hanselmann
7042 942be002 Michael Hanselmann
    else:
7043 942be002 Michael Hanselmann
      # Non-automatic modes
7044 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
7045 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7046 942be002 Michael Hanselmann
        self.other_node = secondary_node
7047 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7048 7e9366f7 Iustin Pop
7049 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
7050 942be002 Michael Hanselmann
        self.target_node = secondary_node
7051 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7052 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7053 a9e0c397 Iustin Pop
7054 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
7055 942be002 Michael Hanselmann
        self.new_node = remote_node
7056 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7057 942be002 Michael Hanselmann
        self.target_node = secondary_node
7058 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
7059 54155f52 Iustin Pop
7060 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
7061 a8083063 Iustin Pop
7062 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7063 9af0fa6a Iustin Pop
        assert old_node_info is not None
7064 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
7065 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
7066 9af0fa6a Iustin Pop
          self.early_release = True
7067 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7068 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
7069 9af0fa6a Iustin Pop
7070 942be002 Michael Hanselmann
      else:
7071 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7072 942be002 Michael Hanselmann
                                     self.mode)
7073 942be002 Michael Hanselmann
7074 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
7075 942be002 Michael Hanselmann
      if not self.disks:
7076 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
7077 a9e0c397 Iustin Pop
7078 2bb5c911 Michael Hanselmann
    for node in check_nodes:
7079 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
7080 e4376078 Iustin Pop
7081 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
7082 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
7083 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
7084 e4376078 Iustin Pop
7085 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
7086 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
7087 e4376078 Iustin Pop
7088 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
7089 2bb5c911 Michael Hanselmann
      if node_name is not None:
7090 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7091 e4376078 Iustin Pop
7092 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
7093 a9e0c397 Iustin Pop
7094 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
7095 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
7096 2bb5c911 Michael Hanselmann

7097 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
7098 cff90b79 Iustin Pop

7099 a9e0c397 Iustin Pop
    """
7100 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
7101 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7102 94a1b377 Michael Hanselmann
7103 942be002 Michael Hanselmann
    if not self.disks:
7104 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
7105 942be002 Michael Hanselmann
      return
7106 942be002 Michael Hanselmann
7107 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
7108 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
7109 7ffc5a86 Michael Hanselmann
7110 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
7111 2bb5c911 Michael Hanselmann
7112 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
7113 2bb5c911 Michael Hanselmann
    if activate_disks:
7114 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
7115 2bb5c911 Michael Hanselmann
7116 2bb5c911 Michael Hanselmann
    try:
7117 942be002 Michael Hanselmann
      # Should we replace the secondary node?
7118 942be002 Michael Hanselmann
      if self.new_node is not None:
7119 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
7120 2bb5c911 Michael Hanselmann
      else:
7121 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
7122 a4eae71f Michael Hanselmann
7123 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
7124 2bb5c911 Michael Hanselmann
7125 2bb5c911 Michael Hanselmann
    finally:
7126 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
7127 5c983ee5 Iustin Pop
      # down instance
7128 2bb5c911 Michael Hanselmann
      if activate_disks:
7129 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7130 2bb5c911 Michael Hanselmann
7131 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
7132 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
7133 2bb5c911 Michael Hanselmann
7134 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
7135 cff90b79 Iustin Pop
7136 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
7137 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
7138 cff90b79 Iustin Pop
    if not results:
7139 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
7140 2bb5c911 Michael Hanselmann
7141 2bb5c911 Michael Hanselmann
    for node in nodes:
7142 781de953 Iustin Pop
      res = results[node]
7143 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
7144 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
7145 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7146 2bb5c911 Michael Hanselmann
                                 (vgname, node))
7147 2bb5c911 Michael Hanselmann
7148 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
7149 2bb5c911 Michael Hanselmann
    # Check disk existence
7150 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7151 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7152 cff90b79 Iustin Pop
        continue
7153 2bb5c911 Michael Hanselmann
7154 2bb5c911 Michael Hanselmann
      for node in nodes:
7155 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7156 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
7157 2bb5c911 Michael Hanselmann
7158 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
7159 2bb5c911 Michael Hanselmann
7160 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7161 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
7162 2bb5c911 Michael Hanselmann
          if not msg:
7163 2bb5c911 Michael Hanselmann
            msg = "disk not found"
7164 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7165 23829f6f Iustin Pop
                                   (idx, node, msg))
7166 cff90b79 Iustin Pop
7167 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7168 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7169 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7170 cff90b79 Iustin Pop
        continue
7171 cff90b79 Iustin Pop
7172 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7173 2bb5c911 Michael Hanselmann
                      (idx, node_name))
7174 2bb5c911 Michael Hanselmann
7175 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7176 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
7177 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7178 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
7179 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
7180 2bb5c911 Michael Hanselmann
7181 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
7182 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
7183 2bb5c911 Michael Hanselmann
    iv_names = {}
7184 2bb5c911 Michael Hanselmann
7185 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7186 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7187 a9e0c397 Iustin Pop
        continue
7188 2bb5c911 Michael Hanselmann
7189 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7190 2bb5c911 Michael Hanselmann
7191 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7192 2bb5c911 Michael Hanselmann
7193 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7194 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
7195 2bb5c911 Michael Hanselmann
7196 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7197 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
7198 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7199 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
7200 2bb5c911 Michael Hanselmann
7201 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
7202 a9e0c397 Iustin Pop
      old_lvs = dev.children
7203 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7204 2bb5c911 Michael Hanselmann
7205 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
7206 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
7207 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7208 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7209 2bb5c911 Michael Hanselmann
7210 2bb5c911 Michael Hanselmann
    return iv_names
7211 2bb5c911 Michael Hanselmann
7212 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
7213 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
7214 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7215 2bb5c911 Michael Hanselmann
7216 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
7217 2bb5c911 Michael Hanselmann
7218 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
7219 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
7220 2bb5c911 Michael Hanselmann
        if not msg:
7221 2bb5c911 Michael Hanselmann
          msg = "disk not found"
7222 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7223 2bb5c911 Michael Hanselmann
                                 (name, msg))
7224 2bb5c911 Michael Hanselmann
7225 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
7226 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7227 2bb5c911 Michael Hanselmann
7228 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
7229 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
7230 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7231 2bb5c911 Michael Hanselmann
7232 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
7233 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
7234 2bb5c911 Michael Hanselmann
7235 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7236 2bb5c911 Michael Hanselmann
        if msg:
7237 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7238 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
7239 2bb5c911 Michael Hanselmann
7240 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
7241 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
7242 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7243 7ea7bcf6 Iustin Pop
7244 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7245 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
7246 2bb5c911 Michael Hanselmann

7247 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
7248 2bb5c911 Michael Hanselmann

7249 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
7250 2bb5c911 Michael Hanselmann

7251 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
7252 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
7253 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
7254 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
7255 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
7256 2bb5c911 Michael Hanselmann

7257 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
7258 2bb5c911 Michael Hanselmann

7259 2bb5c911 Michael Hanselmann
      1. for each modified disk:
7260 2bb5c911 Michael Hanselmann

7261 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
7262 2bb5c911 Michael Hanselmann

7263 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
7264 2bb5c911 Michael Hanselmann

7265 2bb5c911 Michael Hanselmann
    """
7266 2bb5c911 Michael Hanselmann
    steps_total = 6
7267 2bb5c911 Michael Hanselmann
7268 2bb5c911 Michael Hanselmann
    # Step: check device activation
7269 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7270 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
7271 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
7272 2bb5c911 Michael Hanselmann
7273 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
7274 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7275 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
7276 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
7277 2bb5c911 Michael Hanselmann
                                False)
7278 2bb5c911 Michael Hanselmann
7279 2bb5c911 Michael Hanselmann
    # Step: create new storage
7280 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7281 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
7282 a9e0c397 Iustin Pop
7283 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
7284 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7285 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7286 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7287 2bb5c911 Michael Hanselmann
7288 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7289 4d4a651d Michael Hanselmann
                                                     old_lvs)
7290 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
7291 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
7292 cff90b79 Iustin Pop
      #dev.children = []
7293 cff90b79 Iustin Pop
      #cfg.Update(instance)
7294 a9e0c397 Iustin Pop
7295 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
7296 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
7297 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7298 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
7299 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
7300 cff90b79 Iustin Pop
7301 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
7302 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
7303 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
7304 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
7305 2bb5c911 Michael Hanselmann
7306 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
7307 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
7308 cff90b79 Iustin Pop
      for to_ren in old_lvs:
7309 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7310 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
7311 23829f6f Iustin Pop
          # device exists
7312 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7313 cff90b79 Iustin Pop
7314 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
7315 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7316 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
7317 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7318 2bb5c911 Michael Hanselmann
7319 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
7320 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
7321 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
7322 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
7323 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7324 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
7325 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7326 cff90b79 Iustin Pop
7327 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
7328 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
7329 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
7330 a9e0c397 Iustin Pop
7331 cff90b79 Iustin Pop
      for disk in old_lvs:
7332 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
7333 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7334 a9e0c397 Iustin Pop
7335 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7336 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7337 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7338 4d4a651d Michael Hanselmann
                                                  new_lvs)
7339 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7340 2cc1da8b Iustin Pop
      if msg:
7341 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7342 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7343 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7344 4c4e4e1e Iustin Pop
          if msg2:
7345 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7346 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7347 2bb5c911 Michael Hanselmann
                                     "volumes"))
7348 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7349 a9e0c397 Iustin Pop
7350 a9e0c397 Iustin Pop
      dev.children = new_lvs
7351 a9e0c397 Iustin Pop
7352 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7353 a9e0c397 Iustin Pop
7354 7ea7bcf6 Iustin Pop
    cstep = 5
7355 7ea7bcf6 Iustin Pop
    if self.early_release:
7356 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7357 7ea7bcf6 Iustin Pop
      cstep += 1
7358 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7359 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7360 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7361 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7362 7ea7bcf6 Iustin Pop
7363 2bb5c911 Michael Hanselmann
    # Wait for sync
7364 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7365 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7366 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7367 7ea7bcf6 Iustin Pop
    cstep += 1
7368 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7369 a9e0c397 Iustin Pop
7370 2bb5c911 Michael Hanselmann
    # Check all devices manually
7371 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7372 a9e0c397 Iustin Pop
7373 cff90b79 Iustin Pop
    # Step: remove old storage
7374 7ea7bcf6 Iustin Pop
    if not self.early_release:
7375 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7376 7ea7bcf6 Iustin Pop
      cstep += 1
7377 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7378 a9e0c397 Iustin Pop
7379 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7380 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7381 a9e0c397 Iustin Pop

7382 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7383 a9e0c397 Iustin Pop
      - for all disks of the instance:
7384 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7385 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7386 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7387 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7388 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7389 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7390 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7391 a9e0c397 Iustin Pop
          not network enabled
7392 a9e0c397 Iustin Pop
      - wait for sync across all devices
7393 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7394 a9e0c397 Iustin Pop

7395 a9e0c397 Iustin Pop
    Failures are not very well handled.
7396 0834c866 Iustin Pop

7397 a9e0c397 Iustin Pop
    """
7398 0834c866 Iustin Pop
    steps_total = 6
7399 0834c866 Iustin Pop
7400 0834c866 Iustin Pop
    # Step: check device activation
7401 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7402 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7403 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7404 0834c866 Iustin Pop
7405 0834c866 Iustin Pop
    # Step: check other node consistency
7406 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7407 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7408 0834c866 Iustin Pop
7409 0834c866 Iustin Pop
    # Step: create new storage
7410 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7411 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7412 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7413 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7414 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7415 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7416 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7417 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7418 a9e0c397 Iustin Pop
7419 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7420 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7421 a1578d63 Iustin Pop
    # error and the success paths
7422 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7423 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7424 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7425 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7426 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7427 2bb5c911 Michael Hanselmann
7428 2bb5c911 Michael Hanselmann
    iv_names = {}
7429 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7430 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7431 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7432 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7433 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7434 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7435 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7436 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7437 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7438 a2d59d8b Iustin Pop
        p_minor = o_minor1
7439 ffa1c0dc Iustin Pop
      else:
7440 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7441 a2d59d8b Iustin Pop
        p_minor = o_minor2
7442 a2d59d8b Iustin Pop
7443 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7444 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7445 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7446 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7447 a2d59d8b Iustin Pop
7448 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7449 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7450 a2d59d8b Iustin Pop
                    new_net_id)
7451 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7452 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7453 8a6c7011 Iustin Pop
                              children=dev.children,
7454 8a6c7011 Iustin Pop
                              size=dev.size)
7455 796cab27 Iustin Pop
      try:
7456 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7457 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7458 82759cb1 Iustin Pop
      except errors.GenericError:
7459 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7460 796cab27 Iustin Pop
        raise
7461 a9e0c397 Iustin Pop
7462 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7463 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7464 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7465 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7466 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7467 cacfd1fd Iustin Pop
      if msg:
7468 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7469 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7470 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7471 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7472 a9e0c397 Iustin Pop
7473 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7474 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7475 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7476 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7477 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7478 642445d9 Iustin Pop
7479 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7480 a2d59d8b Iustin Pop
    if msg:
7481 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7482 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7483 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7484 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7485 642445d9 Iustin Pop
7486 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7487 642445d9 Iustin Pop
    # the instance to point to the new secondary
7488 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7489 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7490 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7491 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7492 2bb5c911 Michael Hanselmann
7493 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7494 a9e0c397 Iustin Pop
7495 642445d9 Iustin Pop
    # and now perform the drbd attach
7496 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7497 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7498 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7499 4d4a651d Michael Hanselmann
                                            self.new_node],
7500 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7501 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7502 4d4a651d Michael Hanselmann
                                           self.instance.name,
7503 a2d59d8b Iustin Pop
                                           False)
7504 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7505 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7506 a2d59d8b Iustin Pop
      if msg:
7507 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7508 4d4a651d Michael Hanselmann
                           to_node, msg,
7509 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7510 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7511 7ea7bcf6 Iustin Pop
    cstep = 5
7512 7ea7bcf6 Iustin Pop
    if self.early_release:
7513 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7514 7ea7bcf6 Iustin Pop
      cstep += 1
7515 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7516 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7517 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7518 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7519 d5cd389c Iustin Pop
                             self.target_node,
7520 d5cd389c Iustin Pop
                             self.new_node])
7521 a9e0c397 Iustin Pop
7522 2bb5c911 Michael Hanselmann
    # Wait for sync
7523 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7524 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7525 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7526 7ea7bcf6 Iustin Pop
    cstep += 1
7527 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7528 a9e0c397 Iustin Pop
7529 2bb5c911 Michael Hanselmann
    # Check all devices manually
7530 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7531 22985314 Guido Trotter
7532 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7533 7ea7bcf6 Iustin Pop
    if not self.early_release:
7534 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7535 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7536 a9e0c397 Iustin Pop
7537 a8083063 Iustin Pop
7538 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7539 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7540 76aef8fc Michael Hanselmann

7541 76aef8fc Michael Hanselmann
  """
7542 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7543 76aef8fc Michael Hanselmann
  REQ_BGL = False
7544 76aef8fc Michael Hanselmann
7545 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7546 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7547 76aef8fc Michael Hanselmann
7548 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7549 76aef8fc Michael Hanselmann
    self.needed_locks = {
7550 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7551 76aef8fc Michael Hanselmann
      }
7552 76aef8fc Michael Hanselmann
7553 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7554 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7555 7e9c6a78 Iustin Pop
    try:
7556 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7557 7e9c6a78 Iustin Pop
                                  node_name, True):
7558 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7559 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7560 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7561 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7562 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7563 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7564 7e9c6a78 Iustin Pop
      else:
7565 7e9c6a78 Iustin Pop
        raise
7566 76aef8fc Michael Hanselmann
7567 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7568 76aef8fc Michael Hanselmann
    """Check prerequisites.
7569 76aef8fc Michael Hanselmann

7570 76aef8fc Michael Hanselmann
    """
7571 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7572 76aef8fc Michael Hanselmann
7573 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7574 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7575 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7576 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7577 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7578 76aef8fc Michael Hanselmann
7579 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7580 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7581 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7582 7e9c6a78 Iustin Pop
        continue
7583 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7584 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7585 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7586 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7587 76aef8fc Michael Hanselmann
7588 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7589 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7590 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7591 76aef8fc Michael Hanselmann
7592 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7593 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7594 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7595 76aef8fc Michael Hanselmann
                                           self.op.name,
7596 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7597 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7598 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7599 76aef8fc Michael Hanselmann
7600 76aef8fc Michael Hanselmann
7601 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7602 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7603 f7e7689f Iustin Pop

7604 f7e7689f Iustin Pop
  """
7605 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7606 f7e7689f Iustin Pop
  REQ_BGL = False
7607 f7e7689f Iustin Pop
7608 f7e7689f Iustin Pop
  def CheckArguments(self):
7609 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7610 f7e7689f Iustin Pop
      self.op.remote_node = None
7611 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7612 f7e7689f Iustin Pop
      self.op.iallocator = None
7613 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7614 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7615 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7616 f7e7689f Iustin Pop
7617 f7e7689f Iustin Pop
  def ExpandNames(self):
7618 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7619 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7620 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7621 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7622 f7e7689f Iustin Pop
    else:
7623 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7624 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7625 f7e7689f Iustin Pop
7626 f7e7689f Iustin Pop
  def CheckPrereq(self):
7627 f7e7689f Iustin Pop
    pass
7628 f7e7689f Iustin Pop
7629 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7630 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7631 f7e7689f Iustin Pop
      instances = []
7632 f7e7689f Iustin Pop
      for node in self.op.nodes:
7633 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7634 f7e7689f Iustin Pop
      result = []
7635 f7e7689f Iustin Pop
      for i in instances:
7636 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7637 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7638 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7639 f7e7689f Iustin Pop
                                     " secondary" %
7640 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7641 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7642 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7643 f7e7689f Iustin Pop
    else:
7644 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7645 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7646 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7647 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7648 f7e7689f Iustin Pop
      if not ial.success:
7649 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7650 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7651 f7e7689f Iustin Pop
      result = ial.result
7652 f7e7689f Iustin Pop
    return result
7653 f7e7689f Iustin Pop
7654 f7e7689f Iustin Pop
7655 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7656 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7657 8729e0d7 Iustin Pop

7658 8729e0d7 Iustin Pop
  """
7659 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7660 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7661 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7662 31e63dbf Guido Trotter
  REQ_BGL = False
7663 31e63dbf Guido Trotter
7664 31e63dbf Guido Trotter
  def ExpandNames(self):
7665 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7666 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7667 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7668 31e63dbf Guido Trotter
7669 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7670 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7671 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7672 8729e0d7 Iustin Pop
7673 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7674 8729e0d7 Iustin Pop
    """Build hooks env.
7675 8729e0d7 Iustin Pop

7676 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7677 8729e0d7 Iustin Pop

7678 8729e0d7 Iustin Pop
    """
7679 8729e0d7 Iustin Pop
    env = {
7680 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7681 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7682 8729e0d7 Iustin Pop
      }
7683 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7684 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7685 8729e0d7 Iustin Pop
    return env, nl, nl
7686 8729e0d7 Iustin Pop
7687 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7688 8729e0d7 Iustin Pop
    """Check prerequisites.
7689 8729e0d7 Iustin Pop

7690 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7691 8729e0d7 Iustin Pop

7692 8729e0d7 Iustin Pop
    """
7693 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7694 31e63dbf Guido Trotter
    assert instance is not None, \
7695 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7696 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7697 6b12959c Iustin Pop
    for node in nodenames:
7698 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7699 7527a8a4 Iustin Pop
7700 31e63dbf Guido Trotter
7701 8729e0d7 Iustin Pop
    self.instance = instance
7702 8729e0d7 Iustin Pop
7703 728489a3 Guido Trotter
    if instance.disk_template not in constants.DTS_GROWABLE:
7704 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7705 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7706 8729e0d7 Iustin Pop
7707 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7708 8729e0d7 Iustin Pop
7709 2c42c5df Guido Trotter
    if instance.disk_template != constants.DT_FILE:
7710 2c42c5df Guido Trotter
      # TODO: check the free disk space for file, when that feature will be
7711 2c42c5df Guido Trotter
      # supported
7712 2c42c5df Guido Trotter
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
7713 8729e0d7 Iustin Pop
7714 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7715 8729e0d7 Iustin Pop
    """Execute disk grow.
7716 8729e0d7 Iustin Pop

7717 8729e0d7 Iustin Pop
    """
7718 8729e0d7 Iustin Pop
    instance = self.instance
7719 ad24e046 Iustin Pop
    disk = self.disk
7720 6b12959c Iustin Pop
    for node in instance.all_nodes:
7721 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7722 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7723 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7724 5bc556dd Michael Hanselmann
7725 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7726 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7727 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7728 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7729 5bc556dd Michael Hanselmann
      # time is a work-around.
7730 5bc556dd Michael Hanselmann
      time.sleep(5)
7731 5bc556dd Michael Hanselmann
7732 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7733 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7734 6605411d Iustin Pop
    if self.op.wait_for_sync:
7735 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7736 6605411d Iustin Pop
      if disk_abort:
7737 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7738 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7739 8729e0d7 Iustin Pop
7740 8729e0d7 Iustin Pop
7741 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7742 a8083063 Iustin Pop
  """Query runtime instance data.
7743 a8083063 Iustin Pop

7744 a8083063 Iustin Pop
  """
7745 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7746 a987fa48 Guido Trotter
  REQ_BGL = False
7747 ae5849b5 Michael Hanselmann
7748 a987fa48 Guido Trotter
  def ExpandNames(self):
7749 a987fa48 Guido Trotter
    self.needed_locks = {}
7750 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7751 a987fa48 Guido Trotter
7752 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7753 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7754 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7755 a987fa48 Guido Trotter
7756 a987fa48 Guido Trotter
    if self.op.instances:
7757 a987fa48 Guido Trotter
      self.wanted_names = []
7758 a987fa48 Guido Trotter
      for name in self.op.instances:
7759 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7760 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7761 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7762 a987fa48 Guido Trotter
    else:
7763 a987fa48 Guido Trotter
      self.wanted_names = None
7764 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7765 a987fa48 Guido Trotter
7766 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7767 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7768 a987fa48 Guido Trotter
7769 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7770 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7771 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7772 a8083063 Iustin Pop
7773 a8083063 Iustin Pop
  def CheckPrereq(self):
7774 a8083063 Iustin Pop
    """Check prerequisites.
7775 a8083063 Iustin Pop

7776 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7777 a8083063 Iustin Pop

7778 a8083063 Iustin Pop
    """
7779 a987fa48 Guido Trotter
    if self.wanted_names is None:
7780 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7781 a8083063 Iustin Pop
7782 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7783 a987fa48 Guido Trotter
                             in self.wanted_names]
7784 a987fa48 Guido Trotter
    return
7785 a8083063 Iustin Pop
7786 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7787 98825740 Michael Hanselmann
    """Returns the status of a block device
7788 98825740 Michael Hanselmann

7789 98825740 Michael Hanselmann
    """
7790 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7791 98825740 Michael Hanselmann
      return None
7792 98825740 Michael Hanselmann
7793 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7794 98825740 Michael Hanselmann
7795 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7796 98825740 Michael Hanselmann
    if result.offline:
7797 98825740 Michael Hanselmann
      return None
7798 98825740 Michael Hanselmann
7799 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7800 98825740 Michael Hanselmann
7801 98825740 Michael Hanselmann
    status = result.payload
7802 ddfe2228 Michael Hanselmann
    if status is None:
7803 ddfe2228 Michael Hanselmann
      return None
7804 98825740 Michael Hanselmann
7805 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7806 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7807 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7808 98825740 Michael Hanselmann
7809 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7810 a8083063 Iustin Pop
    """Compute block device status.
7811 a8083063 Iustin Pop

7812 a8083063 Iustin Pop
    """
7813 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7814 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7815 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7816 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7817 a8083063 Iustin Pop
      else:
7818 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7819 a8083063 Iustin Pop
7820 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7821 98825740 Michael Hanselmann
                                              instance.name, dev)
7822 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7823 a8083063 Iustin Pop
7824 a8083063 Iustin Pop
    if dev.children:
7825 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7826 a8083063 Iustin Pop
                      for child in dev.children]
7827 a8083063 Iustin Pop
    else:
7828 a8083063 Iustin Pop
      dev_children = []
7829 a8083063 Iustin Pop
7830 a8083063 Iustin Pop
    data = {
7831 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7832 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7833 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7834 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7835 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7836 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7837 a8083063 Iustin Pop
      "children": dev_children,
7838 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7839 c98162a7 Iustin Pop
      "size": dev.size,
7840 a8083063 Iustin Pop
      }
7841 a8083063 Iustin Pop
7842 a8083063 Iustin Pop
    return data
7843 a8083063 Iustin Pop
7844 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7845 a8083063 Iustin Pop
    """Gather and return data"""
7846 a8083063 Iustin Pop
    result = {}
7847 338e51e8 Iustin Pop
7848 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7849 338e51e8 Iustin Pop
7850 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7851 57821cac Iustin Pop
      if not self.op.static:
7852 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7853 57821cac Iustin Pop
                                                  instance.name,
7854 57821cac Iustin Pop
                                                  instance.hypervisor)
7855 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7856 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7857 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7858 57821cac Iustin Pop
          remote_state = "up"
7859 57821cac Iustin Pop
        else:
7860 57821cac Iustin Pop
          remote_state = "down"
7861 a8083063 Iustin Pop
      else:
7862 57821cac Iustin Pop
        remote_state = None
7863 0d68c45d Iustin Pop
      if instance.admin_up:
7864 a8083063 Iustin Pop
        config_state = "up"
7865 0d68c45d Iustin Pop
      else:
7866 0d68c45d Iustin Pop
        config_state = "down"
7867 a8083063 Iustin Pop
7868 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7869 a8083063 Iustin Pop
               for device in instance.disks]
7870 a8083063 Iustin Pop
7871 a8083063 Iustin Pop
      idict = {
7872 a8083063 Iustin Pop
        "name": instance.name,
7873 a8083063 Iustin Pop
        "config_state": config_state,
7874 a8083063 Iustin Pop
        "run_state": remote_state,
7875 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7876 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7877 a8083063 Iustin Pop
        "os": instance.os,
7878 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7879 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7880 a8083063 Iustin Pop
        "disks": disks,
7881 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7882 24838135 Iustin Pop
        "network_port": instance.network_port,
7883 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7884 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
7885 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7886 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7887 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7888 90f72445 Iustin Pop
        "mtime": instance.mtime,
7889 90f72445 Iustin Pop
        "ctime": instance.ctime,
7890 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7891 a8083063 Iustin Pop
        }
7892 a8083063 Iustin Pop
7893 a8083063 Iustin Pop
      result[instance.name] = idict
7894 a8083063 Iustin Pop
7895 a8083063 Iustin Pop
    return result
7896 a8083063 Iustin Pop
7897 a8083063 Iustin Pop
7898 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7899 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7900 a8083063 Iustin Pop

7901 a8083063 Iustin Pop
  """
7902 a8083063 Iustin Pop
  HPATH = "instance-modify"
7903 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7904 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7905 1a5c7281 Guido Trotter
  REQ_BGL = False
7906 1a5c7281 Guido Trotter
7907 24991749 Iustin Pop
  def CheckArguments(self):
7908 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7909 24991749 Iustin Pop
      self.op.nics = []
7910 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7911 24991749 Iustin Pop
      self.op.disks = []
7912 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7913 24991749 Iustin Pop
      self.op.beparams = {}
7914 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7915 24991749 Iustin Pop
      self.op.hvparams = {}
7916 e29e9550 Iustin Pop
    if not hasattr(self.op, "disk_template"):
7917 e29e9550 Iustin Pop
      self.op.disk_template = None
7918 e29e9550 Iustin Pop
    if not hasattr(self.op, "remote_node"):
7919 e29e9550 Iustin Pop
      self.op.remote_node = None
7920 96b39bcc Iustin Pop
    if not hasattr(self.op, "os_name"):
7921 96b39bcc Iustin Pop
      self.op.os_name = None
7922 96b39bcc Iustin Pop
    if not hasattr(self.op, "force_variant"):
7923 96b39bcc Iustin Pop
      self.op.force_variant = False
7924 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7925 e29e9550 Iustin Pop
    if not (self.op.nics or self.op.disks or self.op.disk_template or
7926 96b39bcc Iustin Pop
            self.op.hvparams or self.op.beparams or self.op.os_name):
7927 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
7928 24991749 Iustin Pop
7929 7736a5f2 Iustin Pop
    if self.op.hvparams:
7930 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
7931 7736a5f2 Iustin Pop
7932 24991749 Iustin Pop
    # Disk validation
7933 24991749 Iustin Pop
    disk_addremove = 0
7934 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7935 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7936 24991749 Iustin Pop
        disk_addremove += 1
7937 24991749 Iustin Pop
        continue
7938 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7939 24991749 Iustin Pop
        disk_addremove += 1
7940 24991749 Iustin Pop
      else:
7941 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7942 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
7943 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7944 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7945 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7946 8b46606c Guido Trotter
7947 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7948 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7949 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7950 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
7951 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7952 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7953 24991749 Iustin Pop
        if size is None:
7954 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
7955 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7956 24991749 Iustin Pop
        try:
7957 24991749 Iustin Pop
          size = int(size)
7958 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
7959 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7960 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
7961 24991749 Iustin Pop
        disk_dict['size'] = size
7962 24991749 Iustin Pop
      else:
7963 24991749 Iustin Pop
        # modification of disk
7964 24991749 Iustin Pop
        if 'size' in disk_dict:
7965 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7966 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
7967 24991749 Iustin Pop
7968 24991749 Iustin Pop
    if disk_addremove > 1:
7969 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7970 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7971 24991749 Iustin Pop
7972 e29e9550 Iustin Pop
    if self.op.disks and self.op.disk_template is not None:
7973 e29e9550 Iustin Pop
      raise errors.OpPrereqError("Disk template conversion and other disk"
7974 e29e9550 Iustin Pop
                                 " changes not supported at the same time",
7975 e29e9550 Iustin Pop
                                 errors.ECODE_INVAL)
7976 e29e9550 Iustin Pop
7977 e29e9550 Iustin Pop
    if self.op.disk_template:
7978 e29e9550 Iustin Pop
      _CheckDiskTemplate(self.op.disk_template)
7979 e29e9550 Iustin Pop
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
7980 e29e9550 Iustin Pop
          self.op.remote_node is None):
7981 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
7982 e29e9550 Iustin Pop
                                   " one requires specifying a secondary node",
7983 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
7984 e29e9550 Iustin Pop
7985 24991749 Iustin Pop
    # NIC validation
7986 24991749 Iustin Pop
    nic_addremove = 0
7987 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7988 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7989 24991749 Iustin Pop
        nic_addremove += 1
7990 24991749 Iustin Pop
        continue
7991 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7992 24991749 Iustin Pop
        nic_addremove += 1
7993 24991749 Iustin Pop
      else:
7994 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7995 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
7996 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7997 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7998 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7999 24991749 Iustin Pop
8000 24991749 Iustin Pop
      # nic_dict should be a dict
8001 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
8002 24991749 Iustin Pop
      if nic_ip is not None:
8003 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
8004 24991749 Iustin Pop
          nic_dict['ip'] = None
8005 24991749 Iustin Pop
        else:
8006 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
8007 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8008 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
8009 5c44da6a Guido Trotter
8010 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
8011 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
8012 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
8013 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8014 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
8015 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8016 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
8017 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8018 cd098c41 Guido Trotter
        nic_dict['link'] = None
8019 cd098c41 Guido Trotter
8020 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
8021 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
8022 5c44da6a Guido Trotter
        if nic_mac is None:
8023 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
8024 5c44da6a Guido Trotter
8025 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8026 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8027 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8028 82187135 Renรฉ Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8029 82187135 Renรฉ Nussbaumer
8030 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8031 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8032 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
8033 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8034 5c44da6a Guido Trotter
8035 24991749 Iustin Pop
    if nic_addremove > 1:
8036 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8037 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8038 24991749 Iustin Pop
8039 1a5c7281 Guido Trotter
  def ExpandNames(self):
8040 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
8041 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
8042 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8043 74409b12 Iustin Pop
8044 74409b12 Iustin Pop
  def DeclareLocks(self, level):
8045 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
8046 74409b12 Iustin Pop
      self._LockInstancesNodes()
8047 e29e9550 Iustin Pop
      if self.op.disk_template and self.op.remote_node:
8048 e29e9550 Iustin Pop
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8049 e29e9550 Iustin Pop
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8050 a8083063 Iustin Pop
8051 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8052 a8083063 Iustin Pop
    """Build hooks env.
8053 a8083063 Iustin Pop

8054 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
8055 a8083063 Iustin Pop

8056 a8083063 Iustin Pop
    """
8057 396e1b78 Michael Hanselmann
    args = dict()
8058 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
8059 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
8060 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
8061 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8062 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8063 d8dcf3c9 Guido Trotter
    # information at all.
8064 d8dcf3c9 Guido Trotter
    if self.op.nics:
8065 d8dcf3c9 Guido Trotter
      args['nics'] = []
8066 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
8067 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
8068 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
8069 d8dcf3c9 Guido Trotter
        if idx in nic_override:
8070 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
8071 d8dcf3c9 Guido Trotter
        else:
8072 d8dcf3c9 Guido Trotter
          this_nic_override = {}
8073 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
8074 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
8075 d8dcf3c9 Guido Trotter
        else:
8076 d8dcf3c9 Guido Trotter
          ip = nic.ip
8077 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
8078 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
8079 d8dcf3c9 Guido Trotter
        else:
8080 d8dcf3c9 Guido Trotter
          mac = nic.mac
8081 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
8082 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
8083 62f0dd02 Guido Trotter
        else:
8084 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
8085 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8086 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8087 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8088 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
8089 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8090 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
8091 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
8092 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8093 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8094 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8095 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
8096 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
8097 d8dcf3c9 Guido Trotter
8098 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8099 e29e9550 Iustin Pop
    if self.op.disk_template:
8100 e29e9550 Iustin Pop
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8101 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8102 a8083063 Iustin Pop
    return env, nl, nl
8103 a8083063 Iustin Pop
8104 7e950d31 Iustin Pop
  @staticmethod
8105 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
8106 0329617a Guido Trotter
                        default_values, parameter_types):
8107 0329617a Guido Trotter
    """Return the new params dict for the given params.
8108 0329617a Guido Trotter

8109 0329617a Guido Trotter
    @type old_params: dict
8110 f2fd87d7 Iustin Pop
    @param old_params: old parameters
8111 0329617a Guido Trotter
    @type update_dict: dict
8112 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
8113 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
8114 f2fd87d7 Iustin Pop
                        parameter to its default value
8115 0329617a Guido Trotter
    @type default_values: dict
8116 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
8117 0329617a Guido Trotter
    @type parameter_types: dict
8118 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
8119 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
8120 0329617a Guido Trotter
    @rtype: (dict, dict)
8121 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
8122 0329617a Guido Trotter

8123 0329617a Guido Trotter
    """
8124 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
8125 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
8126 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
8127 0329617a Guido Trotter
        try:
8128 0329617a Guido Trotter
          del params_copy[key]
8129 0329617a Guido Trotter
        except KeyError:
8130 0329617a Guido Trotter
          pass
8131 0329617a Guido Trotter
      else:
8132 0329617a Guido Trotter
        params_copy[key] = val
8133 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
8134 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
8135 0329617a Guido Trotter
    return (params_copy, params_filled)
8136 0329617a Guido Trotter
8137 a8083063 Iustin Pop
  def CheckPrereq(self):
8138 a8083063 Iustin Pop
    """Check prerequisites.
8139 a8083063 Iustin Pop

8140 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
8141 a8083063 Iustin Pop

8142 a8083063 Iustin Pop
    """
8143 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
8144 a8083063 Iustin Pop
8145 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
8146 31a853d2 Iustin Pop
8147 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8148 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
8149 1a5c7281 Guido Trotter
    assert self.instance is not None, \
8150 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
8151 6b12959c Iustin Pop
    pnode = instance.primary_node
8152 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
8153 74409b12 Iustin Pop
8154 e29e9550 Iustin Pop
    if self.op.disk_template:
8155 e29e9550 Iustin Pop
      if instance.disk_template == self.op.disk_template:
8156 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Instance already has disk template %s" %
8157 e29e9550 Iustin Pop
                                   instance.disk_template, errors.ECODE_INVAL)
8158 e29e9550 Iustin Pop
8159 e29e9550 Iustin Pop
      if (instance.disk_template,
8160 e29e9550 Iustin Pop
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8161 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8162 e29e9550 Iustin Pop
                                   " %s to %s" % (instance.disk_template,
8163 e29e9550 Iustin Pop
                                                  self.op.disk_template),
8164 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8165 e29e9550 Iustin Pop
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8166 e29e9550 Iustin Pop
        _CheckNodeOnline(self, self.op.remote_node)
8167 e29e9550 Iustin Pop
        _CheckNodeNotDrained(self, self.op.remote_node)
8168 e29e9550 Iustin Pop
        disks = [{"size": d.size} for d in instance.disks]
8169 e29e9550 Iustin Pop
        required = _ComputeDiskSize(self.op.disk_template, disks)
8170 e29e9550 Iustin Pop
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8171 e29e9550 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot change disk template")
8172 e29e9550 Iustin Pop
8173 338e51e8 Iustin Pop
    # hvparams processing
8174 74409b12 Iustin Pop
    if self.op.hvparams:
8175 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
8176 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
8177 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
8178 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
8179 74409b12 Iustin Pop
      # local check
8180 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
8181 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
8182 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8183 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
8184 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
8185 338e51e8 Iustin Pop
    else:
8186 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
8187 338e51e8 Iustin Pop
8188 338e51e8 Iustin Pop
    # beparams processing
8189 338e51e8 Iustin Pop
    if self.op.beparams:
8190 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
8191 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
8192 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
8193 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
8194 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
8195 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
8196 338e51e8 Iustin Pop
    else:
8197 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
8198 74409b12 Iustin Pop
8199 cfefe007 Guido Trotter
    self.warn = []
8200 647a5d80 Iustin Pop
8201 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8202 647a5d80 Iustin Pop
      mem_check_list = [pnode]
8203 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8204 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
8205 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
8206 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8207 72737a7f Iustin Pop
                                                  instance.hypervisor)
8208 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8209 72737a7f Iustin Pop
                                         instance.hypervisor)
8210 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
8211 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
8212 070e998b Iustin Pop
      if msg:
8213 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
8214 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
8215 070e998b Iustin Pop
                         (pnode,  msg))
8216 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8217 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
8218 070e998b Iustin Pop
                         " free memory information" % pnode)
8219 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
8220 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
8221 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
8222 cfefe007 Guido Trotter
      else:
8223 7ad1af4a Iustin Pop
        if instance_info.payload:
8224 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
8225 cfefe007 Guido Trotter
        else:
8226 cfefe007 Guido Trotter
          # Assume instance not running
8227 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
8228 cfefe007 Guido Trotter
          # and we have no other way to check)
8229 cfefe007 Guido Trotter
          current_mem = 0
8230 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8231 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
8232 cfefe007 Guido Trotter
        if miss_mem > 0:
8233 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
8234 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
8235 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
8236 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
8237 cfefe007 Guido Trotter
8238 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8239 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
8240 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
8241 ea33068f Iustin Pop
            continue
8242 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
8243 070e998b Iustin Pop
          if msg:
8244 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
8245 070e998b Iustin Pop
                             (node, msg))
8246 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
8247 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
8248 070e998b Iustin Pop
                             " memory information" % node)
8249 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8250 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
8251 647a5d80 Iustin Pop
                             " secondary node %s" % node)
8252 5bc84f33 Alexander Schreiber
8253 24991749 Iustin Pop
    # NIC processing
8254 cd098c41 Guido Trotter
    self.nic_pnew = {}
8255 cd098c41 Guido Trotter
    self.nic_pinst = {}
8256 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8257 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8258 24991749 Iustin Pop
        if not instance.nics:
8259 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8260 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8261 24991749 Iustin Pop
        continue
8262 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
8263 24991749 Iustin Pop
        # an existing nic
8264 21bcb9aa Michael Hanselmann
        if not instance.nics:
8265 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8266 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
8267 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
8268 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
8269 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8270 24991749 Iustin Pop
                                     " are 0 to %d" %
8271 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
8272 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8273 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
8274 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
8275 cd098c41 Guido Trotter
      else:
8276 cd098c41 Guido Trotter
        old_nic_params = {}
8277 cd098c41 Guido Trotter
        old_nic_ip = None
8278 cd098c41 Guido Trotter
8279 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
8280 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
8281 cd098c41 Guido Trotter
                                 if key in nic_dict])
8282 cd098c41 Guido Trotter
8283 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
8284 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8285 cd098c41 Guido Trotter
8286 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
8287 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
8288 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
8289 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
8290 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8291 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
8292 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
8293 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8294 cd098c41 Guido Trotter
8295 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8296 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8297 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8298 35c0c8da Iustin Pop
        if msg:
8299 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8300 24991749 Iustin Pop
          if self.force:
8301 24991749 Iustin Pop
            self.warn.append(msg)
8302 24991749 Iustin Pop
          else:
8303 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8304 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8305 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
8306 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
8307 cd098c41 Guido Trotter
        else:
8308 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
8309 cd098c41 Guido Trotter
        if nic_ip is None:
8310 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8311 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
8312 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8313 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8314 5c44da6a Guido Trotter
        if nic_mac is None:
8315 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8316 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8317 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8318 5c44da6a Guido Trotter
          # otherwise generate the mac
8319 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8320 5c44da6a Guido Trotter
        else:
8321 5c44da6a Guido Trotter
          # or validate/reserve the current one
8322 36b66e6e Guido Trotter
          try:
8323 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8324 36b66e6e Guido Trotter
          except errors.ReservationError:
8325 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
8326 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
8327 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
8328 24991749 Iustin Pop
8329 24991749 Iustin Pop
    # DISK processing
8330 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8331 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
8332 5c983ee5 Iustin Pop
                                 " diskless instances",
8333 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8334 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
8335 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8336 24991749 Iustin Pop
        if len(instance.disks) == 1:
8337 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
8338 31624382 Iustin Pop
                                     " an instance", errors.ECODE_INVAL)
8339 31624382 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot remove disks")
8340 24991749 Iustin Pop
8341 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
8342 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
8343 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8344 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
8345 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
8346 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8347 24991749 Iustin Pop
        # an existing disk
8348 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
8349 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8350 24991749 Iustin Pop
                                     " are 0 to %d" %
8351 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
8352 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8353 24991749 Iustin Pop
8354 96b39bcc Iustin Pop
    # OS change
8355 96b39bcc Iustin Pop
    if self.op.os_name and not self.op.force:
8356 96b39bcc Iustin Pop
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8357 96b39bcc Iustin Pop
                      self.op.force_variant)
8358 96b39bcc Iustin Pop
8359 a8083063 Iustin Pop
    return
8360 a8083063 Iustin Pop
8361 e29e9550 Iustin Pop
  def _ConvertPlainToDrbd(self, feedback_fn):
8362 e29e9550 Iustin Pop
    """Converts an instance from plain to drbd.
8363 e29e9550 Iustin Pop

8364 e29e9550 Iustin Pop
    """
8365 e29e9550 Iustin Pop
    feedback_fn("Converting template to drbd")
8366 e29e9550 Iustin Pop
    instance = self.instance
8367 e29e9550 Iustin Pop
    pnode = instance.primary_node
8368 e29e9550 Iustin Pop
    snode = self.op.remote_node
8369 e29e9550 Iustin Pop
8370 e29e9550 Iustin Pop
    # create a fake disk info for _GenerateDiskTemplate
8371 e29e9550 Iustin Pop
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
8372 e29e9550 Iustin Pop
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
8373 e29e9550 Iustin Pop
                                      instance.name, pnode, [snode],
8374 e29e9550 Iustin Pop
                                      disk_info, None, None, 0)
8375 e29e9550 Iustin Pop
    info = _GetInstanceInfoText(instance)
8376 e29e9550 Iustin Pop
    feedback_fn("Creating aditional volumes...")
8377 e29e9550 Iustin Pop
    # first, create the missing data and meta devices
8378 e29e9550 Iustin Pop
    for disk in new_disks:
8379 e29e9550 Iustin Pop
      # unfortunately this is... not too nice
8380 e29e9550 Iustin Pop
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
8381 e29e9550 Iustin Pop
                            info, True)
8382 e29e9550 Iustin Pop
      for child in disk.children:
8383 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
8384 e29e9550 Iustin Pop
    # at this stage, all new LVs have been created, we can rename the
8385 e29e9550 Iustin Pop
    # old ones
8386 e29e9550 Iustin Pop
    feedback_fn("Renaming original volumes...")
8387 e29e9550 Iustin Pop
    rename_list = [(o, n.children[0].logical_id)
8388 e29e9550 Iustin Pop
                   for (o, n) in zip(instance.disks, new_disks)]
8389 e29e9550 Iustin Pop
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
8390 e29e9550 Iustin Pop
    result.Raise("Failed to rename original LVs")
8391 e29e9550 Iustin Pop
8392 e29e9550 Iustin Pop
    feedback_fn("Initializing DRBD devices...")
8393 e29e9550 Iustin Pop
    # all child devices are in place, we can now create the DRBD devices
8394 e29e9550 Iustin Pop
    for disk in new_disks:
8395 e29e9550 Iustin Pop
      for node in [pnode, snode]:
8396 e29e9550 Iustin Pop
        f_create = node == pnode
8397 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
8398 e29e9550 Iustin Pop
8399 e29e9550 Iustin Pop
    # at this point, the instance has been modified
8400 e29e9550 Iustin Pop
    instance.disk_template = constants.DT_DRBD8
8401 e29e9550 Iustin Pop
    instance.disks = new_disks
8402 e29e9550 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8403 e29e9550 Iustin Pop
8404 e29e9550 Iustin Pop
    # disks are created, waiting for sync
8405 e29e9550 Iustin Pop
    disk_abort = not _WaitForSync(self, instance)
8406 e29e9550 Iustin Pop
    if disk_abort:
8407 e29e9550 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
8408 e29e9550 Iustin Pop
                               " this instance, please cleanup manually")
8409 e29e9550 Iustin Pop
8410 2f414c48 Iustin Pop
  def _ConvertDrbdToPlain(self, feedback_fn):
8411 2f414c48 Iustin Pop
    """Converts an instance from drbd to plain.
8412 2f414c48 Iustin Pop

8413 2f414c48 Iustin Pop
    """
8414 2f414c48 Iustin Pop
    instance = self.instance
8415 2f414c48 Iustin Pop
    assert len(instance.secondary_nodes) == 1
8416 2f414c48 Iustin Pop
    pnode = instance.primary_node
8417 2f414c48 Iustin Pop
    snode = instance.secondary_nodes[0]
8418 2f414c48 Iustin Pop
    feedback_fn("Converting template to plain")
8419 2f414c48 Iustin Pop
8420 2f414c48 Iustin Pop
    old_disks = instance.disks
8421 2f414c48 Iustin Pop
    new_disks = [d.children[0] for d in old_disks]
8422 2f414c48 Iustin Pop
8423 2f414c48 Iustin Pop
    # copy over size and mode
8424 2f414c48 Iustin Pop
    for parent, child in zip(old_disks, new_disks):
8425 2f414c48 Iustin Pop
      child.size = parent.size
8426 2f414c48 Iustin Pop
      child.mode = parent.mode
8427 2f414c48 Iustin Pop
8428 2f414c48 Iustin Pop
    # update instance structure
8429 2f414c48 Iustin Pop
    instance.disks = new_disks
8430 2f414c48 Iustin Pop
    instance.disk_template = constants.DT_PLAIN
8431 2f414c48 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8432 2f414c48 Iustin Pop
8433 2f414c48 Iustin Pop
    feedback_fn("Removing volumes on the secondary node...")
8434 2f414c48 Iustin Pop
    for disk in old_disks:
8435 2f414c48 Iustin Pop
      self.cfg.SetDiskID(disk, snode)
8436 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
8437 2f414c48 Iustin Pop
      if msg:
8438 2f414c48 Iustin Pop
        self.LogWarning("Could not remove block device %s on node %s,"
8439 2f414c48 Iustin Pop
                        " continuing anyway: %s", disk.iv_name, snode, msg)
8440 2f414c48 Iustin Pop
8441 2f414c48 Iustin Pop
    feedback_fn("Removing unneeded volumes on the primary node...")
8442 2f414c48 Iustin Pop
    for idx, disk in enumerate(old_disks):
8443 2f414c48 Iustin Pop
      meta = disk.children[1]
8444 2f414c48 Iustin Pop
      self.cfg.SetDiskID(meta, pnode)
8445 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
8446 2f414c48 Iustin Pop
      if msg:
8447 2f414c48 Iustin Pop
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
8448 2f414c48 Iustin Pop
                        " continuing anyway: %s", idx, pnode, msg)
8449 2f414c48 Iustin Pop
8450 2f414c48 Iustin Pop
8451 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8452 a8083063 Iustin Pop
    """Modifies an instance.
8453 a8083063 Iustin Pop

8454 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8455 24991749 Iustin Pop

8456 a8083063 Iustin Pop
    """
8457 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8458 cfefe007 Guido Trotter
    # feedback_fn there.
8459 cfefe007 Guido Trotter
    for warn in self.warn:
8460 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8461 cfefe007 Guido Trotter
8462 a8083063 Iustin Pop
    result = []
8463 a8083063 Iustin Pop
    instance = self.instance
8464 24991749 Iustin Pop
    # disk changes
8465 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8466 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8467 24991749 Iustin Pop
        # remove the last disk
8468 24991749 Iustin Pop
        device = instance.disks.pop()
8469 24991749 Iustin Pop
        device_idx = len(instance.disks)
8470 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8471 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8472 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8473 e1bc0878 Iustin Pop
          if msg:
8474 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8475 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8476 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8477 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8478 24991749 Iustin Pop
        # add a new disk
8479 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8480 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8481 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8482 24991749 Iustin Pop
        else:
8483 24991749 Iustin Pop
          file_driver = file_path = None
8484 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8485 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8486 24991749 Iustin Pop
                                         instance.disk_template,
8487 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8488 24991749 Iustin Pop
                                         instance.secondary_nodes,
8489 24991749 Iustin Pop
                                         [disk_dict],
8490 24991749 Iustin Pop
                                         file_path,
8491 24991749 Iustin Pop
                                         file_driver,
8492 24991749 Iustin Pop
                                         disk_idx_base)[0]
8493 24991749 Iustin Pop
        instance.disks.append(new_disk)
8494 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8495 24991749 Iustin Pop
8496 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8497 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8498 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8499 24991749 Iustin Pop
        #HARDCODE
8500 428958aa Iustin Pop
        for node in instance.all_nodes:
8501 428958aa Iustin Pop
          f_create = node == instance.primary_node
8502 796cab27 Iustin Pop
          try:
8503 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8504 428958aa Iustin Pop
                            f_create, info, f_create)
8505 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8506 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8507 428958aa Iustin Pop
                            " node %s: %s",
8508 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8509 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8510 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8511 24991749 Iustin Pop
      else:
8512 24991749 Iustin Pop
        # change a given disk
8513 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8514 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8515 e29e9550 Iustin Pop
8516 e29e9550 Iustin Pop
    if self.op.disk_template:
8517 e29e9550 Iustin Pop
      r_shut = _ShutdownInstanceDisks(self, instance)
8518 e29e9550 Iustin Pop
      if not r_shut:
8519 e29e9550 Iustin Pop
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
8520 e29e9550 Iustin Pop
                                 " proceed with disk template conversion")
8521 e29e9550 Iustin Pop
      mode = (instance.disk_template, self.op.disk_template)
8522 e29e9550 Iustin Pop
      try:
8523 e29e9550 Iustin Pop
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
8524 e29e9550 Iustin Pop
      except:
8525 e29e9550 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
8526 e29e9550 Iustin Pop
        raise
8527 e29e9550 Iustin Pop
      result.append(("disk_template", self.op.disk_template))
8528 e29e9550 Iustin Pop
8529 24991749 Iustin Pop
    # NIC changes
8530 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8531 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8532 24991749 Iustin Pop
        # remove the last nic
8533 24991749 Iustin Pop
        del instance.nics[-1]
8534 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8535 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8536 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8537 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8538 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8539 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8540 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8541 24991749 Iustin Pop
        instance.nics.append(new_nic)
8542 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8543 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8544 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8545 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8546 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8547 cd098c41 Guido Trotter
                       )))
8548 24991749 Iustin Pop
      else:
8549 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8550 24991749 Iustin Pop
          if key in nic_dict:
8551 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8552 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8553 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8554 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8555 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8556 24991749 Iustin Pop
8557 24991749 Iustin Pop
    # hvparams changes
8558 74409b12 Iustin Pop
    if self.op.hvparams:
8559 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8560 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8561 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8562 24991749 Iustin Pop
8563 24991749 Iustin Pop
    # beparams changes
8564 338e51e8 Iustin Pop
    if self.op.beparams:
8565 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8566 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8567 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8568 a8083063 Iustin Pop
8569 96b39bcc Iustin Pop
    # OS change
8570 96b39bcc Iustin Pop
    if self.op.os_name:
8571 96b39bcc Iustin Pop
      instance.os = self.op.os_name
8572 96b39bcc Iustin Pop
8573 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8574 a8083063 Iustin Pop
8575 a8083063 Iustin Pop
    return result
8576 a8083063 Iustin Pop
8577 e29e9550 Iustin Pop
  _DISK_CONVERSIONS = {
8578 e29e9550 Iustin Pop
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
8579 2f414c48 Iustin Pop
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
8580 e29e9550 Iustin Pop
    }
8581 a8083063 Iustin Pop
8582 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8583 a8083063 Iustin Pop
  """Query the exports list
8584 a8083063 Iustin Pop

8585 a8083063 Iustin Pop
  """
8586 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8587 21a15682 Guido Trotter
  REQ_BGL = False
8588 21a15682 Guido Trotter
8589 21a15682 Guido Trotter
  def ExpandNames(self):
8590 21a15682 Guido Trotter
    self.needed_locks = {}
8591 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8592 21a15682 Guido Trotter
    if not self.op.nodes:
8593 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8594 21a15682 Guido Trotter
    else:
8595 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8596 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8597 a8083063 Iustin Pop
8598 a8083063 Iustin Pop
  def CheckPrereq(self):
8599 21a15682 Guido Trotter
    """Check prerequisites.
8600 a8083063 Iustin Pop

8601 a8083063 Iustin Pop
    """
8602 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8603 a8083063 Iustin Pop
8604 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8605 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8606 a8083063 Iustin Pop

8607 e4376078 Iustin Pop
    @rtype: dict
8608 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8609 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8610 e4376078 Iustin Pop
        that node.
8611 a8083063 Iustin Pop

8612 a8083063 Iustin Pop
    """
8613 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8614 b04285f2 Guido Trotter
    result = {}
8615 b04285f2 Guido Trotter
    for node in rpcresult:
8616 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8617 b04285f2 Guido Trotter
        result[node] = False
8618 b04285f2 Guido Trotter
      else:
8619 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8620 b04285f2 Guido Trotter
8621 b04285f2 Guido Trotter
    return result
8622 a8083063 Iustin Pop
8623 a8083063 Iustin Pop
8624 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8625 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8626 a8083063 Iustin Pop

8627 a8083063 Iustin Pop
  """
8628 a8083063 Iustin Pop
  HPATH = "instance-export"
8629 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8630 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8631 6657590e Guido Trotter
  REQ_BGL = False
8632 6657590e Guido Trotter
8633 17c3f802 Guido Trotter
  def CheckArguments(self):
8634 17c3f802 Guido Trotter
    """Check the arguments.
8635 17c3f802 Guido Trotter

8636 17c3f802 Guido Trotter
    """
8637 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8638 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8639 17c3f802 Guido Trotter
8640 6657590e Guido Trotter
  def ExpandNames(self):
8641 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8642 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8643 6657590e Guido Trotter
    #
8644 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8645 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8646 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8647 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8648 6657590e Guido Trotter
    #    then one to remove, after
8649 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8650 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8651 6657590e Guido Trotter
8652 6657590e Guido Trotter
  def DeclareLocks(self, level):
8653 6657590e Guido Trotter
    """Last minute lock declaration."""
8654 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8655 a8083063 Iustin Pop
8656 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8657 a8083063 Iustin Pop
    """Build hooks env.
8658 a8083063 Iustin Pop

8659 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8660 a8083063 Iustin Pop

8661 a8083063 Iustin Pop
    """
8662 a8083063 Iustin Pop
    env = {
8663 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8664 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8665 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8666 a8083063 Iustin Pop
      }
8667 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8668 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8669 a8083063 Iustin Pop
          self.op.target_node]
8670 a8083063 Iustin Pop
    return env, nl, nl
8671 a8083063 Iustin Pop
8672 a8083063 Iustin Pop
  def CheckPrereq(self):
8673 a8083063 Iustin Pop
    """Check prerequisites.
8674 a8083063 Iustin Pop

8675 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8676 a8083063 Iustin Pop

8677 a8083063 Iustin Pop
    """
8678 6657590e Guido Trotter
    instance_name = self.op.instance_name
8679 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8680 6657590e Guido Trotter
    assert self.instance is not None, \
8681 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8682 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8683 a8083063 Iustin Pop
8684 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8685 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8686 cf26a87a Iustin Pop
    assert self.dst_node is not None
8687 a8083063 Iustin Pop
8688 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8689 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8690 a8083063 Iustin Pop
8691 b6023d6c Manuel Franceschini
    # instance disk type verification
8692 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8693 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8694 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8695 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8696 b6023d6c Manuel Franceschini
8697 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8698 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8699 a8083063 Iustin Pop

8700 a8083063 Iustin Pop
    """
8701 a8083063 Iustin Pop
    instance = self.instance
8702 a8083063 Iustin Pop
    dst_node = self.dst_node
8703 a8083063 Iustin Pop
    src_node = instance.primary_node
8704 37972df0 Michael Hanselmann
8705 a8083063 Iustin Pop
    if self.op.shutdown:
8706 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8707 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8708 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8709 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8710 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8711 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8712 a8083063 Iustin Pop
8713 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
8714 a8083063 Iustin Pop
8715 a8083063 Iustin Pop
    snap_disks = []
8716 a8083063 Iustin Pop
8717 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8718 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8719 998c712c Iustin Pop
    for disk in instance.disks:
8720 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8721 998c712c Iustin Pop
8722 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8723 3e53a60b Michael Hanselmann
8724 3e53a60b Michael Hanselmann
    if activate_disks:
8725 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8726 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8727 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8728 3e53a60b Michael Hanselmann
8729 a8083063 Iustin Pop
    try:
8730 3e53a60b Michael Hanselmann
      # per-disk results
8731 3e53a60b Michael Hanselmann
      dresults = []
8732 3e53a60b Michael Hanselmann
      try:
8733 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
8734 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8735 3e53a60b Michael Hanselmann
                      (idx, src_node))
8736 3e53a60b Michael Hanselmann
8737 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
8738 3e53a60b Michael Hanselmann
          # passed
8739 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8740 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8741 3e53a60b Michael Hanselmann
          if msg:
8742 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8743 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
8744 3e53a60b Michael Hanselmann
            snap_disks.append(False)
8745 3e53a60b Michael Hanselmann
          else:
8746 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
8747 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8748 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
8749 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
8750 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
8751 37972df0 Michael Hanselmann
8752 3e53a60b Michael Hanselmann
      finally:
8753 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
8754 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
8755 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8756 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8757 3e53a60b Michael Hanselmann
          if msg:
8758 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
8759 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
8760 3e53a60b Michael Hanselmann
8761 3e53a60b Michael Hanselmann
      # TODO: check for size
8762 3e53a60b Michael Hanselmann
8763 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
8764 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
8765 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
8766 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
8767 3e53a60b Michael Hanselmann
        if dev:
8768 4a0e011f Iustin Pop
          # FIXME: pass debug from opcode to backend
8769 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8770 4a0e011f Iustin Pop
                                                 instance, cluster_name,
8771 dd713605 Iustin Pop
                                                 idx, self.op.debug_level)
8772 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8773 3e53a60b Michael Hanselmann
          if msg:
8774 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
8775 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8776 3e53a60b Michael Hanselmann
            dresults.append(False)
8777 3e53a60b Michael Hanselmann
          else:
8778 3e53a60b Michael Hanselmann
            dresults.append(True)
8779 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8780 3e53a60b Michael Hanselmann
          if msg:
8781 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8782 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8783 19d7f90a Guido Trotter
        else:
8784 084f05a5 Iustin Pop
          dresults.append(False)
8785 a8083063 Iustin Pop
8786 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8787 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8788 3e53a60b Michael Hanselmann
                                             snap_disks)
8789 3e53a60b Michael Hanselmann
      fin_resu = True
8790 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8791 3e53a60b Michael Hanselmann
      if msg:
8792 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8793 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8794 3e53a60b Michael Hanselmann
        fin_resu = False
8795 3e53a60b Michael Hanselmann
8796 3e53a60b Michael Hanselmann
    finally:
8797 3e53a60b Michael Hanselmann
      if activate_disks:
8798 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8799 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8800 a8083063 Iustin Pop
8801 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8802 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8803 a8083063 Iustin Pop
8804 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8805 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8806 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8807 35fbcd11 Iustin Pop
    iname = instance.name
8808 a8083063 Iustin Pop
    if nodelist:
8809 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8810 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8811 a8083063 Iustin Pop
      for node in exportlist:
8812 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8813 781de953 Iustin Pop
          continue
8814 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8815 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8816 35fbcd11 Iustin Pop
          if msg:
8817 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8818 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8819 084f05a5 Iustin Pop
    return fin_resu, dresults
8820 5c947f38 Iustin Pop
8821 5c947f38 Iustin Pop
8822 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8823 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8824 9ac99fda Guido Trotter

8825 9ac99fda Guido Trotter
  """
8826 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
8827 3656b3af Guido Trotter
  REQ_BGL = False
8828 3656b3af Guido Trotter
8829 3656b3af Guido Trotter
  def ExpandNames(self):
8830 3656b3af Guido Trotter
    self.needed_locks = {}
8831 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
8832 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
8833 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
8834 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8835 9ac99fda Guido Trotter
8836 9ac99fda Guido Trotter
  def CheckPrereq(self):
8837 9ac99fda Guido Trotter
    """Check prerequisites.
8838 9ac99fda Guido Trotter
    """
8839 9ac99fda Guido Trotter
    pass
8840 9ac99fda Guido Trotter
8841 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
8842 9ac99fda Guido Trotter
    """Remove any export.
8843 9ac99fda Guido Trotter

8844 9ac99fda Guido Trotter
    """
8845 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8846 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
8847 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
8848 9ac99fda Guido Trotter
    fqdn_warn = False
8849 9ac99fda Guido Trotter
    if not instance_name:
8850 9ac99fda Guido Trotter
      fqdn_warn = True
8851 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
8852 9ac99fda Guido Trotter
8853 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8854 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
8855 9ac99fda Guido Trotter
    found = False
8856 9ac99fda Guido Trotter
    for node in exportlist:
8857 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
8858 1b7bfbb7 Iustin Pop
      if msg:
8859 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8860 781de953 Iustin Pop
        continue
8861 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
8862 9ac99fda Guido Trotter
        found = True
8863 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
8864 4c4e4e1e Iustin Pop
        msg = result.fail_msg
8865 35fbcd11 Iustin Pop
        if msg:
8866 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
8867 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
8868 9ac99fda Guido Trotter
8869 9ac99fda Guido Trotter
    if fqdn_warn and not found:
8870 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
8871 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
8872 9ac99fda Guido Trotter
                  " Domain Name.")
8873 9ac99fda Guido Trotter
8874 9ac99fda Guido Trotter
8875 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
8876 5c947f38 Iustin Pop
  """Generic tags LU.
8877 5c947f38 Iustin Pop

8878 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
8879 5c947f38 Iustin Pop

8880 5c947f38 Iustin Pop
  """
8881 5c947f38 Iustin Pop
8882 8646adce Guido Trotter
  def ExpandNames(self):
8883 8646adce Guido Trotter
    self.needed_locks = {}
8884 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
8885 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
8886 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
8887 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
8888 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
8889 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
8890 8646adce Guido Trotter
8891 8646adce Guido Trotter
  def CheckPrereq(self):
8892 8646adce Guido Trotter
    """Check prerequisites.
8893 8646adce Guido Trotter

8894 8646adce Guido Trotter
    """
8895 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
8896 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
8897 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
8898 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
8899 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
8900 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8901 5c947f38 Iustin Pop
    else:
8902 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8903 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
8904 5c947f38 Iustin Pop
8905 5c947f38 Iustin Pop
8906 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
8907 5c947f38 Iustin Pop
  """Returns the tags of a given object.
8908 5c947f38 Iustin Pop

8909 5c947f38 Iustin Pop
  """
8910 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
8911 8646adce Guido Trotter
  REQ_BGL = False
8912 5c947f38 Iustin Pop
8913 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8914 5c947f38 Iustin Pop
    """Returns the tag list.
8915 5c947f38 Iustin Pop

8916 5c947f38 Iustin Pop
    """
8917 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8918 5c947f38 Iustin Pop
8919 5c947f38 Iustin Pop
8920 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8921 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8922 73415719 Iustin Pop

8923 73415719 Iustin Pop
  """
8924 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8925 8646adce Guido Trotter
  REQ_BGL = False
8926 8646adce Guido Trotter
8927 8646adce Guido Trotter
  def ExpandNames(self):
8928 8646adce Guido Trotter
    self.needed_locks = {}
8929 73415719 Iustin Pop
8930 73415719 Iustin Pop
  def CheckPrereq(self):
8931 73415719 Iustin Pop
    """Check prerequisites.
8932 73415719 Iustin Pop

8933 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8934 73415719 Iustin Pop

8935 73415719 Iustin Pop
    """
8936 73415719 Iustin Pop
    try:
8937 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8938 73415719 Iustin Pop
    except re.error, err:
8939 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8940 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
8941 73415719 Iustin Pop
8942 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8943 73415719 Iustin Pop
    """Returns the tag list.
8944 73415719 Iustin Pop

8945 73415719 Iustin Pop
    """
8946 73415719 Iustin Pop
    cfg = self.cfg
8947 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8948 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8949 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8950 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8951 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8952 73415719 Iustin Pop
    results = []
8953 73415719 Iustin Pop
    for path, target in tgts:
8954 73415719 Iustin Pop
      for tag in target.GetTags():
8955 73415719 Iustin Pop
        if self.re.search(tag):
8956 73415719 Iustin Pop
          results.append((path, tag))
8957 73415719 Iustin Pop
    return results
8958 73415719 Iustin Pop
8959 73415719 Iustin Pop
8960 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8961 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8962 5c947f38 Iustin Pop

8963 5c947f38 Iustin Pop
  """
8964 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8965 8646adce Guido Trotter
  REQ_BGL = False
8966 5c947f38 Iustin Pop
8967 5c947f38 Iustin Pop
  def CheckPrereq(self):
8968 5c947f38 Iustin Pop
    """Check prerequisites.
8969 5c947f38 Iustin Pop

8970 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8971 5c947f38 Iustin Pop

8972 5c947f38 Iustin Pop
    """
8973 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8974 f27302fa Iustin Pop
    for tag in self.op.tags:
8975 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8976 5c947f38 Iustin Pop
8977 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8978 5c947f38 Iustin Pop
    """Sets the tag.
8979 5c947f38 Iustin Pop

8980 5c947f38 Iustin Pop
    """
8981 5c947f38 Iustin Pop
    try:
8982 f27302fa Iustin Pop
      for tag in self.op.tags:
8983 f27302fa Iustin Pop
        self.target.AddTag(tag)
8984 5c947f38 Iustin Pop
    except errors.TagError, err:
8985 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8986 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8987 5c947f38 Iustin Pop
8988 5c947f38 Iustin Pop
8989 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8990 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8991 5c947f38 Iustin Pop

8992 5c947f38 Iustin Pop
  """
8993 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8994 8646adce Guido Trotter
  REQ_BGL = False
8995 5c947f38 Iustin Pop
8996 5c947f38 Iustin Pop
  def CheckPrereq(self):
8997 5c947f38 Iustin Pop
    """Check prerequisites.
8998 5c947f38 Iustin Pop

8999 5c947f38 Iustin Pop
    This checks that we have the given tag.
9000 5c947f38 Iustin Pop

9001 5c947f38 Iustin Pop
    """
9002 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9003 f27302fa Iustin Pop
    for tag in self.op.tags:
9004 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9005 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
9006 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
9007 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
9008 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
9009 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
9010 f27302fa Iustin Pop
      diff_names.sort()
9011 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
9012 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9013 5c947f38 Iustin Pop
9014 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9015 5c947f38 Iustin Pop
    """Remove the tag from the object.
9016 5c947f38 Iustin Pop

9017 5c947f38 Iustin Pop
    """
9018 f27302fa Iustin Pop
    for tag in self.op.tags:
9019 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
9020 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9021 06009e27 Iustin Pop
9022 0eed6e61 Guido Trotter
9023 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
9024 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
9025 06009e27 Iustin Pop

9026 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
9027 06009e27 Iustin Pop
  time.
9028 06009e27 Iustin Pop

9029 06009e27 Iustin Pop
  """
9030 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
9031 fbe9022f Guido Trotter
  REQ_BGL = False
9032 06009e27 Iustin Pop
9033 fbe9022f Guido Trotter
  def ExpandNames(self):
9034 fbe9022f Guido Trotter
    """Expand names and set required locks.
9035 06009e27 Iustin Pop

9036 fbe9022f Guido Trotter
    This expands the node list, if any.
9037 06009e27 Iustin Pop

9038 06009e27 Iustin Pop
    """
9039 fbe9022f Guido Trotter
    self.needed_locks = {}
9040 06009e27 Iustin Pop
    if self.op.on_nodes:
9041 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
9042 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9043 fbe9022f Guido Trotter
      # more information.
9044 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9045 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9046 fbe9022f Guido Trotter
9047 fbe9022f Guido Trotter
  def CheckPrereq(self):
9048 fbe9022f Guido Trotter
    """Check prerequisites.
9049 fbe9022f Guido Trotter

9050 fbe9022f Guido Trotter
    """
9051 06009e27 Iustin Pop
9052 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
9053 06009e27 Iustin Pop
    """Do the actual sleep.
9054 06009e27 Iustin Pop

9055 06009e27 Iustin Pop
    """
9056 06009e27 Iustin Pop
    if self.op.on_master:
9057 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
9058 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
9059 06009e27 Iustin Pop
    if self.op.on_nodes:
9060 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9061 06009e27 Iustin Pop
      for node, node_result in result.items():
9062 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
9063 d61df03e Iustin Pop
9064 d61df03e Iustin Pop
9065 d1c2dd75 Iustin Pop
class IAllocator(object):
9066 d1c2dd75 Iustin Pop
  """IAllocator framework.
9067 d61df03e Iustin Pop

9068 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
9069 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
9070 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
9071 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
9072 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
9073 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
9074 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
9075 d1c2dd75 Iustin Pop
      easy usage
9076 d61df03e Iustin Pop

9077 d61df03e Iustin Pop
  """
9078 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
9079 7260cfbe Iustin Pop
  # lots of instance attributes
9080 29859cb7 Iustin Pop
  _ALLO_KEYS = [
9081 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
9082 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
9083 d1c2dd75 Iustin Pop
    ]
9084 29859cb7 Iustin Pop
  _RELO_KEYS = [
9085 8d3f86a0 Iustin Pop
    "name", "relocate_from",
9086 29859cb7 Iustin Pop
    ]
9087 7f60a422 Iustin Pop
  _EVAC_KEYS = [
9088 7f60a422 Iustin Pop
    "evac_nodes",
9089 7f60a422 Iustin Pop
    ]
9090 d1c2dd75 Iustin Pop
9091 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
9092 923ddac0 Michael Hanselmann
    self.cfg = cfg
9093 923ddac0 Michael Hanselmann
    self.rpc = rpc
9094 d1c2dd75 Iustin Pop
    # init buffer variables
9095 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
9096 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
9097 29859cb7 Iustin Pop
    self.mode = mode
9098 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
9099 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
9100 a0add446 Iustin Pop
    self.hypervisor = None
9101 29859cb7 Iustin Pop
    self.relocate_from = None
9102 8d3f86a0 Iustin Pop
    self.name = None
9103 7f60a422 Iustin Pop
    self.evac_nodes = None
9104 27579978 Iustin Pop
    # computed fields
9105 27579978 Iustin Pop
    self.required_nodes = None
9106 d1c2dd75 Iustin Pop
    # init result fields
9107 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
9108 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9109 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
9110 9757cc90 Iustin Pop
      fn = self._AddNewInstance
9111 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9112 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
9113 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
9114 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9115 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
9116 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
9117 29859cb7 Iustin Pop
    else:
9118 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
9119 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
9120 d1c2dd75 Iustin Pop
    for key in kwargs:
9121 29859cb7 Iustin Pop
      if key not in keyset:
9122 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
9123 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9124 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
9125 7f60a422 Iustin Pop
9126 29859cb7 Iustin Pop
    for key in keyset:
9127 d1c2dd75 Iustin Pop
      if key not in kwargs:
9128 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
9129 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9130 9757cc90 Iustin Pop
    self._BuildInputData(fn)
9131 d1c2dd75 Iustin Pop
9132 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
9133 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
9134 d1c2dd75 Iustin Pop

9135 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
9136 d1c2dd75 Iustin Pop

9137 d1c2dd75 Iustin Pop
    """
9138 923ddac0 Michael Hanselmann
    cfg = self.cfg
9139 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
9140 d1c2dd75 Iustin Pop
    # cluster data
9141 d1c2dd75 Iustin Pop
    data = {
9142 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
9143 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
9144 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
9145 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
9146 d1c2dd75 Iustin Pop
      # we don't have job IDs
9147 d61df03e Iustin Pop
      }
9148 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
9149 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
9150 6286519f Iustin Pop
9151 d1c2dd75 Iustin Pop
    # node data
9152 d1c2dd75 Iustin Pop
    node_results = {}
9153 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
9154 8cc7e742 Guido Trotter
9155 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9156 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
9157 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9158 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
9159 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9160 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
9161 8cc7e742 Guido Trotter
9162 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
9163 923ddac0 Michael Hanselmann
                                        hypervisor_name)
9164 923ddac0 Michael Hanselmann
    node_iinfo = \
9165 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
9166 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
9167 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
9168 1325da74 Iustin Pop
      # first fill in static (config-based) values
9169 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
9170 d1c2dd75 Iustin Pop
      pnr = {
9171 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
9172 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
9173 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
9174 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
9175 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
9176 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
9177 d1c2dd75 Iustin Pop
        }
9178 1325da74 Iustin Pop
9179 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
9180 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
9181 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
9182 4c4e4e1e Iustin Pop
                                nname)
9183 070e998b Iustin Pop
        remote_info = nresult.payload
9184 b142ef15 Iustin Pop
9185 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
9186 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
9187 1325da74 Iustin Pop
          if attr not in remote_info:
9188 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
9189 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
9190 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
9191 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
9192 070e998b Iustin Pop
                                     " for '%s': %s" %
9193 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
9194 1325da74 Iustin Pop
        # compute memory used by primary instances
9195 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
9196 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
9197 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
9198 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
9199 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
9200 1325da74 Iustin Pop
              i_used_mem = 0
9201 1325da74 Iustin Pop
            else:
9202 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
9203 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
9204 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
9205 1325da74 Iustin Pop
9206 1325da74 Iustin Pop
            if iinfo.admin_up:
9207 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
9208 1325da74 Iustin Pop
9209 1325da74 Iustin Pop
        # compute memory used by instances
9210 1325da74 Iustin Pop
        pnr_dyn = {
9211 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
9212 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
9213 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
9214 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
9215 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
9216 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
9217 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
9218 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
9219 1325da74 Iustin Pop
          }
9220 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
9221 1325da74 Iustin Pop
9222 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
9223 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
9224 d1c2dd75 Iustin Pop
9225 d1c2dd75 Iustin Pop
    # instance data
9226 d1c2dd75 Iustin Pop
    instance_data = {}
9227 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
9228 a9fe7e8f Guido Trotter
      nic_data = []
9229 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
9230 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
9231 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
9232 a9fe7e8f Guido Trotter
            nic.nicparams)
9233 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
9234 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
9235 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
9236 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
9237 a9fe7e8f Guido Trotter
                   }
9238 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
9239 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
9240 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
9241 d1c2dd75 Iustin Pop
      pir = {
9242 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
9243 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
9244 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
9245 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
9246 d1c2dd75 Iustin Pop
        "os": iinfo.os,
9247 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
9248 d1c2dd75 Iustin Pop
        "nics": nic_data,
9249 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
9250 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
9251 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
9252 d1c2dd75 Iustin Pop
        }
9253 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
9254 88ae4f85 Iustin Pop
                                                 pir["disks"])
9255 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
9256 d61df03e Iustin Pop
9257 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
9258 d61df03e Iustin Pop
9259 d1c2dd75 Iustin Pop
    self.in_data = data
9260 d61df03e Iustin Pop
9261 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
9262 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
9263 d61df03e Iustin Pop

9264 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
9265 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9266 d61df03e Iustin Pop

9267 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9268 d1c2dd75 Iustin Pop
    done.
9269 d61df03e Iustin Pop

9270 d1c2dd75 Iustin Pop
    """
9271 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
9272 d1c2dd75 Iustin Pop
9273 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
9274 27579978 Iustin Pop
      self.required_nodes = 2
9275 27579978 Iustin Pop
    else:
9276 27579978 Iustin Pop
      self.required_nodes = 1
9277 d1c2dd75 Iustin Pop
    request = {
9278 d1c2dd75 Iustin Pop
      "name": self.name,
9279 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
9280 d1c2dd75 Iustin Pop
      "tags": self.tags,
9281 d1c2dd75 Iustin Pop
      "os": self.os,
9282 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
9283 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
9284 d1c2dd75 Iustin Pop
      "disks": self.disks,
9285 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
9286 d1c2dd75 Iustin Pop
      "nics": self.nics,
9287 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9288 d1c2dd75 Iustin Pop
      }
9289 9757cc90 Iustin Pop
    return request
9290 298fe380 Iustin Pop
9291 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
9292 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
9293 298fe380 Iustin Pop

9294 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
9295 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9296 d61df03e Iustin Pop

9297 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9298 d1c2dd75 Iustin Pop
    done.
9299 d61df03e Iustin Pop

9300 d1c2dd75 Iustin Pop
    """
9301 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
9302 27579978 Iustin Pop
    if instance is None:
9303 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
9304 27579978 Iustin Pop
                                   " IAllocator" % self.name)
9305 27579978 Iustin Pop
9306 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
9307 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
9308 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
9309 27579978 Iustin Pop
9310 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
9311 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
9312 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
9313 2a139bb0 Iustin Pop
9314 27579978 Iustin Pop
    self.required_nodes = 1
9315 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
9316 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
9317 27579978 Iustin Pop
9318 d1c2dd75 Iustin Pop
    request = {
9319 d1c2dd75 Iustin Pop
      "name": self.name,
9320 27579978 Iustin Pop
      "disk_space_total": disk_space,
9321 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9322 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
9323 d1c2dd75 Iustin Pop
      }
9324 9757cc90 Iustin Pop
    return request
9325 d61df03e Iustin Pop
9326 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
9327 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
9328 7f60a422 Iustin Pop

9329 7f60a422 Iustin Pop
    """
9330 7f60a422 Iustin Pop
    request = {
9331 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
9332 7f60a422 Iustin Pop
      }
9333 7f60a422 Iustin Pop
    return request
9334 7f60a422 Iustin Pop
9335 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
9336 d1c2dd75 Iustin Pop
    """Build input data structures.
9337 d61df03e Iustin Pop

9338 d1c2dd75 Iustin Pop
    """
9339 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
9340 d61df03e Iustin Pop
9341 9757cc90 Iustin Pop
    request = fn()
9342 9757cc90 Iustin Pop
    request["type"] = self.mode
9343 9757cc90 Iustin Pop
    self.in_data["request"] = request
9344 d61df03e Iustin Pop
9345 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
9346 d61df03e Iustin Pop
9347 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
9348 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
9349 298fe380 Iustin Pop

9350 d1c2dd75 Iustin Pop
    """
9351 72737a7f Iustin Pop
    if call_fn is None:
9352 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
9353 298fe380 Iustin Pop
9354 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
9355 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
9356 8d528b7c Iustin Pop
9357 87f5c298 Iustin Pop
    self.out_text = result.payload
9358 d1c2dd75 Iustin Pop
    if validate:
9359 d1c2dd75 Iustin Pop
      self._ValidateResult()
9360 298fe380 Iustin Pop
9361 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
9362 d1c2dd75 Iustin Pop
    """Process the allocator results.
9363 538475ca Iustin Pop

9364 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
9365 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
9366 538475ca Iustin Pop

9367 d1c2dd75 Iustin Pop
    """
9368 d1c2dd75 Iustin Pop
    try:
9369 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
9370 d1c2dd75 Iustin Pop
    except Exception, err:
9371 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
9372 d1c2dd75 Iustin Pop
9373 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
9374 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
9375 538475ca Iustin Pop
9376 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
9377 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
9378 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
9379 680f0a89 Iustin Pop
      del rdict["nodes"]
9380 680f0a89 Iustin Pop
9381 680f0a89 Iustin Pop
    for key in "success", "info", "result":
9382 d1c2dd75 Iustin Pop
      if key not in rdict:
9383 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
9384 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
9385 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
9386 538475ca Iustin Pop
9387 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
9388 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
9389 d1c2dd75 Iustin Pop
                               " is not a list")
9390 d1c2dd75 Iustin Pop
    self.out_data = rdict
9391 538475ca Iustin Pop
9392 538475ca Iustin Pop
9393 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
9394 d61df03e Iustin Pop
  """Run allocator tests.
9395 d61df03e Iustin Pop

9396 d61df03e Iustin Pop
  This LU runs the allocator tests
9397 d61df03e Iustin Pop

9398 d61df03e Iustin Pop
  """
9399 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
9400 d61df03e Iustin Pop
9401 d61df03e Iustin Pop
  def CheckPrereq(self):
9402 d61df03e Iustin Pop
    """Check prerequisites.
9403 d61df03e Iustin Pop

9404 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
9405 d61df03e Iustin Pop

9406 d61df03e Iustin Pop
    """
9407 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9408 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
9409 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
9410 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
9411 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
9412 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
9413 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
9414 d61df03e Iustin Pop
      if iname is not None:
9415 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
9416 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
9417 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
9418 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
9419 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9420 d61df03e Iustin Pop
      for row in self.op.nics:
9421 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9422 d61df03e Iustin Pop
            "mac" not in row or
9423 d61df03e Iustin Pop
            "ip" not in row or
9424 d61df03e Iustin Pop
            "bridge" not in row):
9425 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
9426 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9427 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
9428 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
9429 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9430 d61df03e Iustin Pop
      for row in self.op.disks:
9431 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9432 d61df03e Iustin Pop
            "size" not in row or
9433 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
9434 d61df03e Iustin Pop
            "mode" not in row or
9435 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
9436 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
9437 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9438 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
9439 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
9440 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9441 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
9442 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
9443 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9444 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
9445 d61df03e Iustin Pop
      self.op.name = fname
9446 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
9447 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9448 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
9449 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
9450 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
9451 d61df03e Iustin Pop
    else:
9452 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
9453 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
9454 d61df03e Iustin Pop
9455 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
9456 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
9457 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
9458 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9459 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
9460 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
9461 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
9462 d61df03e Iustin Pop
9463 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
9464 d61df03e Iustin Pop
    """Run the allocator test.
9465 d61df03e Iustin Pop

9466 d61df03e Iustin Pop
    """
9467 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9468 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9469 29859cb7 Iustin Pop
                       mode=self.op.mode,
9470 29859cb7 Iustin Pop
                       name=self.op.name,
9471 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
9472 29859cb7 Iustin Pop
                       disks=self.op.disks,
9473 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
9474 29859cb7 Iustin Pop
                       os=self.op.os,
9475 29859cb7 Iustin Pop
                       tags=self.op.tags,
9476 29859cb7 Iustin Pop
                       nics=self.op.nics,
9477 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9478 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9479 29859cb7 Iustin Pop
                       )
9480 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9481 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9482 29859cb7 Iustin Pop
                       mode=self.op.mode,
9483 29859cb7 Iustin Pop
                       name=self.op.name,
9484 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9485 29859cb7 Iustin Pop
                       )
9486 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9487 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9488 823a72bc Iustin Pop
                       mode=self.op.mode,
9489 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9490 823a72bc Iustin Pop
    else:
9491 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9492 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9493 d61df03e Iustin Pop
9494 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9495 d1c2dd75 Iustin Pop
      result = ial.in_text
9496 298fe380 Iustin Pop
    else:
9497 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9498 d1c2dd75 Iustin Pop
      result = ial.out_text
9499 298fe380 Iustin Pop
    return result