Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3953242f

History | View | Annotate | Download (332.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 b98bf262 Michael Hanselmann
import OpenSSL
37 a8083063 Iustin Pop
38 a8083063 Iustin Pop
from ganeti import ssh
39 a8083063 Iustin Pop
from ganeti import utils
40 a8083063 Iustin Pop
from ganeti import errors
41 a8083063 Iustin Pop
from ganeti import hypervisor
42 6048c986 Guido Trotter
from ganeti import locking
43 a8083063 Iustin Pop
from ganeti import constants
44 a8083063 Iustin Pop
from ganeti import objects
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
55 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
64 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
65 20777413 Iustin Pop

66 a8083063 Iustin Pop
  """
67 a8083063 Iustin Pop
  HPATH = None
68 a8083063 Iustin Pop
  HTYPE = None
69 a8083063 Iustin Pop
  _OP_REQP = []
70 7e55040e Guido Trotter
  REQ_BGL = True
71 a8083063 Iustin Pop
72 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
73 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
74 a8083063 Iustin Pop

75 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
76 a8083063 Iustin Pop
    validity.
77 a8083063 Iustin Pop

78 a8083063 Iustin Pop
    """
79 5bfac263 Iustin Pop
    self.proc = processor
80 a8083063 Iustin Pop
    self.op = op
81 77b657a3 Guido Trotter
    self.cfg = context.cfg
82 77b657a3 Guido Trotter
    self.context = context
83 72737a7f Iustin Pop
    self.rpc = rpc
84 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
85 d465bdc8 Guido Trotter
    self.needed_locks = None
86 6683bba2 Guido Trotter
    self.acquired_locks = {}
87 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
88 ca2a79e1 Guido Trotter
    self.add_locks = {}
89 ca2a79e1 Guido Trotter
    self.remove_locks = {}
90 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
91 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
92 c92b310a Michael Hanselmann
    self.__ssh = None
93 86d9d3bb Iustin Pop
    # logging
94 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
95 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
96 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
97 20777413 Iustin Pop
    # support for dry-run
98 20777413 Iustin Pop
    self.dry_run_result = None
99 ee844e20 Iustin Pop
    # support for generic debug attribute
100 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
101 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
102 ee844e20 Iustin Pop
      self.op.debug_level = 0
103 c92b310a Michael Hanselmann
104 6fd35c4d Michael Hanselmann
    # Tasklets
105 3a012b41 Michael Hanselmann
    self.tasklets = None
106 6fd35c4d Michael Hanselmann
107 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
108 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
109 a8083063 Iustin Pop
      if attr_val is None:
110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
111 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
112 6fd35c4d Michael Hanselmann
113 4be4691d Iustin Pop
    self.CheckArguments()
114 a8083063 Iustin Pop
115 c92b310a Michael Hanselmann
  def __GetSSH(self):
116 c92b310a Michael Hanselmann
    """Returns the SshRunner object
117 c92b310a Michael Hanselmann

118 c92b310a Michael Hanselmann
    """
119 c92b310a Michael Hanselmann
    if not self.__ssh:
120 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
121 c92b310a Michael Hanselmann
    return self.__ssh
122 c92b310a Michael Hanselmann
123 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
124 c92b310a Michael Hanselmann
125 4be4691d Iustin Pop
  def CheckArguments(self):
126 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
127 4be4691d Iustin Pop

128 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
129 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
130 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
131 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
132 4be4691d Iustin Pop

133 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
134 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
135 4be4691d Iustin Pop
        waited for them)
136 4be4691d Iustin Pop

137 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
138 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
139 4be4691d Iustin Pop

140 4be4691d Iustin Pop
    """
141 4be4691d Iustin Pop
    pass
142 4be4691d Iustin Pop
143 d465bdc8 Guido Trotter
  def ExpandNames(self):
144 d465bdc8 Guido Trotter
    """Expand names for this LU.
145 d465bdc8 Guido Trotter

146 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
147 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
148 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
149 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
150 d465bdc8 Guido Trotter

151 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
152 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
153 d465bdc8 Guido Trotter
    as values. Rules:
154 e4376078 Iustin Pop

155 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
156 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
157 e4376078 Iustin Pop
      - don't put anything for the BGL level
158 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
159 d465bdc8 Guido Trotter

160 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
161 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
162 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
163 3977a4c1 Guido Trotter

164 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
165 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
166 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
167 6fd35c4d Michael Hanselmann

168 e4376078 Iustin Pop
    Examples::
169 e4376078 Iustin Pop

170 e4376078 Iustin Pop
      # Acquire all nodes and one instance
171 e4376078 Iustin Pop
      self.needed_locks = {
172 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
173 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
174 e4376078 Iustin Pop
      }
175 e4376078 Iustin Pop
      # Acquire just two nodes
176 e4376078 Iustin Pop
      self.needed_locks = {
177 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
178 e4376078 Iustin Pop
      }
179 e4376078 Iustin Pop
      # Acquire no locks
180 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
181 d465bdc8 Guido Trotter

182 d465bdc8 Guido Trotter
    """
183 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
184 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
185 d465bdc8 Guido Trotter
    # time.
186 d465bdc8 Guido Trotter
    if self.REQ_BGL:
187 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
188 d465bdc8 Guido Trotter
    else:
189 d465bdc8 Guido Trotter
      raise NotImplementedError
190 d465bdc8 Guido Trotter
191 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
192 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
193 fb8dcb62 Guido Trotter

194 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
195 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
196 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
197 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
198 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
199 fb8dcb62 Guido Trotter
    default it does nothing.
200 fb8dcb62 Guido Trotter

201 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
202 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
203 fb8dcb62 Guido Trotter

204 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
205 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
206 fb8dcb62 Guido Trotter

207 fb8dcb62 Guido Trotter
    """
208 fb8dcb62 Guido Trotter
209 a8083063 Iustin Pop
  def CheckPrereq(self):
210 a8083063 Iustin Pop
    """Check prerequisites for this LU.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
213 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
214 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
215 a8083063 Iustin Pop
    allowed.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
218 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
221 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
225 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
226 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
227 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
228 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
229 6fd35c4d Michael Hanselmann
    else:
230 6fd35c4d Michael Hanselmann
      raise NotImplementedError
231 a8083063 Iustin Pop
232 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
233 a8083063 Iustin Pop
    """Execute the LU.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
236 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
237 a8083063 Iustin Pop
    code, or expected.
238 a8083063 Iustin Pop

239 a8083063 Iustin Pop
    """
240 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
241 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
242 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
243 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
244 6fd35c4d Michael Hanselmann
    else:
245 6fd35c4d Michael Hanselmann
      raise NotImplementedError
246 a8083063 Iustin Pop
247 a8083063 Iustin Pop
  def BuildHooksEnv(self):
248 a8083063 Iustin Pop
    """Build hooks environment for this LU.
249 a8083063 Iustin Pop

250 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
251 a8083063 Iustin Pop
    containing the environment that will be used for running the
252 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
253 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
254 a8083063 Iustin Pop
    the hook should run after the execution.
255 a8083063 Iustin Pop

256 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
257 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
258 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
259 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
260 a8083063 Iustin Pop

261 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
262 a8083063 Iustin Pop

263 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
264 a8083063 Iustin Pop
    not be called.
265 a8083063 Iustin Pop

266 a8083063 Iustin Pop
    """
267 a8083063 Iustin Pop
    raise NotImplementedError
268 a8083063 Iustin Pop
269 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
270 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
271 1fce5219 Guido Trotter

272 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
273 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
274 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
275 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
276 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
277 1fce5219 Guido Trotter

278 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
279 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
280 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
281 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
282 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
283 e4376078 Iustin Pop
        in the PRE phase
284 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
285 e4376078 Iustin Pop
        and hook results
286 1fce5219 Guido Trotter

287 1fce5219 Guido Trotter
    """
288 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
289 2d54e29c Iustin Pop
    # be a function warnings
290 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
291 1fce5219 Guido Trotter
    return lu_result
292 1fce5219 Guido Trotter
293 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
294 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
295 43905206 Guido Trotter

296 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
297 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
298 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
299 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
300 43905206 Guido Trotter
    before.
301 43905206 Guido Trotter

302 43905206 Guido Trotter
    """
303 43905206 Guido Trotter
    if self.needed_locks is None:
304 43905206 Guido Trotter
      self.needed_locks = {}
305 43905206 Guido Trotter
    else:
306 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
307 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
308 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
309 cf26a87a Iustin Pop
                                                self.op.instance_name)
310 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
311 43905206 Guido Trotter
312 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
313 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
316 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
317 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
318 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
319 c4a2fee1 Guido Trotter

320 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
321 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
322 c4a2fee1 Guido Trotter

323 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
324 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
325 c4a2fee1 Guido Trotter

326 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
327 c4a2fee1 Guido Trotter

328 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
329 e4376078 Iustin Pop
        self._LockInstancesNodes()
330 c4a2fee1 Guido Trotter

331 a82ce292 Guido Trotter
    @type primary_only: boolean
332 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
333 a82ce292 Guido Trotter

334 c4a2fee1 Guido Trotter
    """
335 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
336 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
337 c4a2fee1 Guido Trotter
338 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
339 c4a2fee1 Guido Trotter
340 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
341 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
342 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
343 c4a2fee1 Guido Trotter
    wanted_nodes = []
344 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
345 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
346 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
347 a82ce292 Guido Trotter
      if not primary_only:
348 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
349 9513b6ab Guido Trotter
350 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
351 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
352 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
353 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
354 c4a2fee1 Guido Trotter
355 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
356 c4a2fee1 Guido Trotter
357 a8083063 Iustin Pop
358 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
359 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
360 a8083063 Iustin Pop

361 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
362 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
363 a8083063 Iustin Pop

364 a8083063 Iustin Pop
  """
365 a8083063 Iustin Pop
  HPATH = None
366 a8083063 Iustin Pop
  HTYPE = None
367 a8083063 Iustin Pop
368 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
369 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
370 fc8a6b8f Iustin Pop

371 fc8a6b8f Iustin Pop
    This just raises an error.
372 fc8a6b8f Iustin Pop

373 fc8a6b8f Iustin Pop
    """
374 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
375 fc8a6b8f Iustin Pop
376 a8083063 Iustin Pop
377 9a6800e1 Michael Hanselmann
class Tasklet:
378 9a6800e1 Michael Hanselmann
  """Tasklet base class.
379 9a6800e1 Michael Hanselmann

380 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
381 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
382 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
383 9a6800e1 Michael Hanselmann

384 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
385 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
386 9a6800e1 Michael Hanselmann
    - Implement Exec
387 9a6800e1 Michael Hanselmann

388 9a6800e1 Michael Hanselmann
  """
389 464243a7 Michael Hanselmann
  def __init__(self, lu):
390 464243a7 Michael Hanselmann
    self.lu = lu
391 464243a7 Michael Hanselmann
392 464243a7 Michael Hanselmann
    # Shortcuts
393 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
394 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
395 464243a7 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
397 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
400 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
401 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
404 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
405 9a6800e1 Michael Hanselmann

406 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
407 9a6800e1 Michael Hanselmann
    hasn't been done before.
408 9a6800e1 Michael Hanselmann

409 9a6800e1 Michael Hanselmann
    """
410 9a6800e1 Michael Hanselmann
    raise NotImplementedError
411 9a6800e1 Michael Hanselmann
412 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
413 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
414 9a6800e1 Michael Hanselmann

415 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
416 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
417 9a6800e1 Michael Hanselmann
    expected.
418 9a6800e1 Michael Hanselmann

419 9a6800e1 Michael Hanselmann
    """
420 9a6800e1 Michael Hanselmann
    raise NotImplementedError
421 9a6800e1 Michael Hanselmann
422 9a6800e1 Michael Hanselmann
423 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
424 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
425 83120a01 Michael Hanselmann

426 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
427 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
428 e4376078 Iustin Pop
  @type nodes: list
429 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
430 e4376078 Iustin Pop
  @rtype: list
431 e4376078 Iustin Pop
  @return: the list of nodes, sorted
432 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
433 83120a01 Michael Hanselmann

434 83120a01 Michael Hanselmann
  """
435 3312b702 Iustin Pop
  if not isinstance(nodes, list):
436 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
437 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
438 dcb93971 Michael Hanselmann
439 ea47808a Guido Trotter
  if not nodes:
440 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
441 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
442 dcb93971 Michael Hanselmann
443 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
444 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
445 3312b702 Iustin Pop
446 3312b702 Iustin Pop
447 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
448 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
449 3312b702 Iustin Pop

450 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
451 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
452 e4376078 Iustin Pop
  @type instances: list
453 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
454 e4376078 Iustin Pop
  @rtype: list
455 e4376078 Iustin Pop
  @return: the list of instances, sorted
456 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
457 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
458 3312b702 Iustin Pop

459 3312b702 Iustin Pop
  """
460 3312b702 Iustin Pop
  if not isinstance(instances, list):
461 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
462 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
463 3312b702 Iustin Pop
464 3312b702 Iustin Pop
  if instances:
465 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
466 3312b702 Iustin Pop
  else:
467 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
468 a7f5dc98 Iustin Pop
  return wanted
469 dcb93971 Michael Hanselmann
470 dcb93971 Michael Hanselmann
471 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
472 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
473 83120a01 Michael Hanselmann

474 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
475 31bf511f Iustin Pop
  @param static: static fields set
476 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
477 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
478 83120a01 Michael Hanselmann

479 83120a01 Michael Hanselmann
  """
480 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
481 31bf511f Iustin Pop
  f.Extend(static)
482 31bf511f Iustin Pop
  f.Extend(dynamic)
483 dcb93971 Michael Hanselmann
484 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
485 31bf511f Iustin Pop
  if delta:
486 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
487 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
488 dcb93971 Michael Hanselmann
489 dcb93971 Michael Hanselmann
490 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
491 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
492 a5961235 Iustin Pop

493 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
494 a5961235 Iustin Pop
  or None (but that it always exists).
495 a5961235 Iustin Pop

496 a5961235 Iustin Pop
  """
497 a5961235 Iustin Pop
  val = getattr(op, name, None)
498 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
499 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
500 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
501 a5961235 Iustin Pop
  setattr(op, name, val)
502 a5961235 Iustin Pop
503 a5961235 Iustin Pop
504 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
505 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
506 7736a5f2 Iustin Pop

507 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
508 7736a5f2 Iustin Pop
  global params.
509 7736a5f2 Iustin Pop

510 7736a5f2 Iustin Pop
  """
511 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
512 7736a5f2 Iustin Pop
  if used_globals:
513 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
514 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
515 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
516 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
517 7736a5f2 Iustin Pop
518 7736a5f2 Iustin Pop
519 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
520 a5961235 Iustin Pop
  """Ensure that a given node is online.
521 a5961235 Iustin Pop

522 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
523 a5961235 Iustin Pop
  @param node: the node to check
524 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
525 a5961235 Iustin Pop

526 a5961235 Iustin Pop
  """
527 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
528 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
529 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
530 a5961235 Iustin Pop
531 a5961235 Iustin Pop
532 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
533 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
534 733a2b6a Iustin Pop

535 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
536 733a2b6a Iustin Pop
  @param node: the node to check
537 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
538 733a2b6a Iustin Pop

539 733a2b6a Iustin Pop
  """
540 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
541 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
542 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
543 733a2b6a Iustin Pop
544 733a2b6a Iustin Pop
545 231cd901 Iustin Pop
def _CheckNodeHasOS(lu, node, os_name, force_variant):
546 231cd901 Iustin Pop
  """Ensure that a node supports a given OS.
547 231cd901 Iustin Pop

548 231cd901 Iustin Pop
  @param lu: the LU on behalf of which we make the check
549 231cd901 Iustin Pop
  @param node: the node to check
550 231cd901 Iustin Pop
  @param os_name: the OS to query about
551 231cd901 Iustin Pop
  @param force_variant: whether to ignore variant errors
552 231cd901 Iustin Pop
  @raise errors.OpPrereqError: if the node is not supporting the OS
553 231cd901 Iustin Pop

554 231cd901 Iustin Pop
  """
555 231cd901 Iustin Pop
  result = lu.rpc.call_os_get(node, os_name)
556 231cd901 Iustin Pop
  result.Raise("OS '%s' not in supported OS list for node %s" %
557 231cd901 Iustin Pop
               (os_name, node),
558 231cd901 Iustin Pop
               prereq=True, ecode=errors.ECODE_INVAL)
559 231cd901 Iustin Pop
  if not force_variant:
560 231cd901 Iustin Pop
    _CheckOSVariant(result.payload, os_name)
561 231cd901 Iustin Pop
562 231cd901 Iustin Pop
563 5d55819e Iustin Pop
def _CheckDiskTemplate(template):
564 5d55819e Iustin Pop
  """Ensure a given disk template is valid.
565 5d55819e Iustin Pop

566 5d55819e Iustin Pop
  """
567 5d55819e Iustin Pop
  if template not in constants.DISK_TEMPLATES:
568 5d55819e Iustin Pop
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
569 5d55819e Iustin Pop
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
570 5d55819e Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
571 5d55819e Iustin Pop
572 5d55819e Iustin Pop
573 31624382 Iustin Pop
def _CheckInstanceDown(lu, instance, reason):
574 31624382 Iustin Pop
  """Ensure that an instance is not running."""
575 31624382 Iustin Pop
  if instance.admin_up:
576 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
577 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
578 31624382 Iustin Pop
579 31624382 Iustin Pop
  pnode = instance.primary_node
580 31624382 Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
581 31624382 Iustin Pop
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
582 31624382 Iustin Pop
              prereq=True, ecode=errors.ECODE_ENVIRON)
583 31624382 Iustin Pop
584 31624382 Iustin Pop
  if instance.name in ins_l.payload:
585 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is running, %s" %
586 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
587 31624382 Iustin Pop
588 31624382 Iustin Pop
589 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
590 cf26a87a Iustin Pop
  """Expand an item name.
591 cf26a87a Iustin Pop

592 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
593 cf26a87a Iustin Pop
  @param name: requested item name
594 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
595 cf26a87a Iustin Pop
  @return: the resolved (full) name
596 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
597 cf26a87a Iustin Pop

598 cf26a87a Iustin Pop
  """
599 cf26a87a Iustin Pop
  full_name = fn(name)
600 cf26a87a Iustin Pop
  if full_name is None:
601 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
602 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
603 cf26a87a Iustin Pop
  return full_name
604 cf26a87a Iustin Pop
605 cf26a87a Iustin Pop
606 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
607 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
608 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
609 cf26a87a Iustin Pop
610 cf26a87a Iustin Pop
611 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
612 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
613 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
614 cf26a87a Iustin Pop
615 cf26a87a Iustin Pop
616 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
617 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
618 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
619 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
620 e4376078 Iustin Pop

621 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
622 e4376078 Iustin Pop

623 e4376078 Iustin Pop
  @type name: string
624 e4376078 Iustin Pop
  @param name: the name of the instance
625 e4376078 Iustin Pop
  @type primary_node: string
626 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
627 e4376078 Iustin Pop
  @type secondary_nodes: list
628 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
629 e4376078 Iustin Pop
  @type os_type: string
630 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
631 0d68c45d Iustin Pop
  @type status: boolean
632 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
633 e4376078 Iustin Pop
  @type memory: string
634 e4376078 Iustin Pop
  @param memory: the memory size of the instance
635 e4376078 Iustin Pop
  @type vcpus: string
636 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
637 e4376078 Iustin Pop
  @type nics: list
638 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
639 5e3d3eb3 Guido Trotter
      the NICs the instance has
640 2c2690c9 Iustin Pop
  @type disk_template: string
641 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
642 2c2690c9 Iustin Pop
  @type disks: list
643 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
644 67fc3042 Iustin Pop
  @type bep: dict
645 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
646 67fc3042 Iustin Pop
  @type hvp: dict
647 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
648 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
649 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
650 e4376078 Iustin Pop
  @rtype: dict
651 e4376078 Iustin Pop
  @return: the hook environment for this instance
652 ecb215b5 Michael Hanselmann

653 396e1b78 Michael Hanselmann
  """
654 0d68c45d Iustin Pop
  if status:
655 0d68c45d Iustin Pop
    str_status = "up"
656 0d68c45d Iustin Pop
  else:
657 0d68c45d Iustin Pop
    str_status = "down"
658 396e1b78 Michael Hanselmann
  env = {
659 0e137c28 Iustin Pop
    "OP_TARGET": name,
660 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
661 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
662 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
663 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
664 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
665 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
666 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
667 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
668 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
669 396e1b78 Michael Hanselmann
  }
670 396e1b78 Michael Hanselmann
671 396e1b78 Michael Hanselmann
  if nics:
672 396e1b78 Michael Hanselmann
    nic_count = len(nics)
673 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
674 396e1b78 Michael Hanselmann
      if ip is None:
675 396e1b78 Michael Hanselmann
        ip = ""
676 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
677 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
678 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
679 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
680 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
681 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
682 396e1b78 Michael Hanselmann
  else:
683 396e1b78 Michael Hanselmann
    nic_count = 0
684 396e1b78 Michael Hanselmann
685 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
686 396e1b78 Michael Hanselmann
687 2c2690c9 Iustin Pop
  if disks:
688 2c2690c9 Iustin Pop
    disk_count = len(disks)
689 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
690 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
691 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
692 2c2690c9 Iustin Pop
  else:
693 2c2690c9 Iustin Pop
    disk_count = 0
694 2c2690c9 Iustin Pop
695 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
696 2c2690c9 Iustin Pop
697 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
698 67fc3042 Iustin Pop
    for key, value in source.items():
699 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
700 67fc3042 Iustin Pop
701 396e1b78 Michael Hanselmann
  return env
702 396e1b78 Michael Hanselmann
703 96acbc09 Michael Hanselmann
704 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
705 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
706 62f0dd02 Guido Trotter

707 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
708 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
709 62f0dd02 Guido Trotter

710 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
711 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
712 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
713 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
714 62f0dd02 Guido Trotter

715 62f0dd02 Guido Trotter
  """
716 62f0dd02 Guido Trotter
  hooks_nics = []
717 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
718 62f0dd02 Guido Trotter
  for nic in nics:
719 62f0dd02 Guido Trotter
    ip = nic.ip
720 62f0dd02 Guido Trotter
    mac = nic.mac
721 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
722 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
723 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
724 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
725 62f0dd02 Guido Trotter
  return hooks_nics
726 396e1b78 Michael Hanselmann
727 96acbc09 Michael Hanselmann
728 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
729 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
730 ecb215b5 Michael Hanselmann

731 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
732 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
733 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
734 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
735 e4376078 Iustin Pop
      environment
736 e4376078 Iustin Pop
  @type override: dict
737 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
738 e4376078 Iustin Pop
      our values
739 e4376078 Iustin Pop
  @rtype: dict
740 e4376078 Iustin Pop
  @return: the hook environment dictionary
741 e4376078 Iustin Pop

742 ecb215b5 Michael Hanselmann
  """
743 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
744 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
745 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
746 396e1b78 Michael Hanselmann
  args = {
747 396e1b78 Michael Hanselmann
    'name': instance.name,
748 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
749 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
750 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
751 0d68c45d Iustin Pop
    'status': instance.admin_up,
752 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
753 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
754 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
755 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
756 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
757 67fc3042 Iustin Pop
    'bep': bep,
758 67fc3042 Iustin Pop
    'hvp': hvp,
759 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
760 396e1b78 Michael Hanselmann
  }
761 396e1b78 Michael Hanselmann
  if override:
762 396e1b78 Michael Hanselmann
    args.update(override)
763 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
764 396e1b78 Michael Hanselmann
765 396e1b78 Michael Hanselmann
766 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
767 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
768 ec0292f1 Iustin Pop

769 ec0292f1 Iustin Pop
  """
770 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
771 ec0292f1 Iustin Pop
  if mod_list:
772 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
773 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
774 ec0292f1 Iustin Pop
    for name in mod_list:
775 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
776 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
777 ec0292f1 Iustin Pop
  if mc_now > mc_max:
778 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
779 ec0292f1 Iustin Pop
               (mc_now, mc_max))
780 ec0292f1 Iustin Pop
781 ec0292f1 Iustin Pop
782 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
783 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
784 6d7e1f20 Guido Trotter

785 6d7e1f20 Guido Trotter
  """
786 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
787 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
788 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
789 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
790 6d7e1f20 Guido Trotter
  return mc_now < mc_should
791 6d7e1f20 Guido Trotter
792 6d7e1f20 Guido Trotter
793 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
794 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
795 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
796 b165e77e Guido Trotter

797 b165e77e Guido Trotter
  """
798 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
799 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
800 b165e77e Guido Trotter
                for nic in target_nics]
801 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
802 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
803 b165e77e Guido Trotter
  if brlist:
804 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
805 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
806 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
807 b165e77e Guido Trotter
808 b165e77e Guido Trotter
809 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
810 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
811 bf6929a2 Alexander Schreiber

812 bf6929a2 Alexander Schreiber
  """
813 b165e77e Guido Trotter
  if node is None:
814 29921401 Iustin Pop
    node = instance.primary_node
815 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
816 bf6929a2 Alexander Schreiber
817 bf6929a2 Alexander Schreiber
818 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
819 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
820 f2c05717 Guido Trotter

821 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
822 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
823 f2c05717 Guido Trotter
  @type name: string
824 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
825 f2c05717 Guido Trotter

826 f2c05717 Guido Trotter
  """
827 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
828 f2c05717 Guido Trotter
    return
829 f2c05717 Guido Trotter
  try:
830 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
831 f2c05717 Guido Trotter
  except IndexError:
832 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
833 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
834 f2c05717 Guido Trotter
835 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
836 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
837 f2c05717 Guido Trotter
838 f2c05717 Guido Trotter
839 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
840 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
841 5ba9701d Michael Hanselmann
842 5ba9701d Michael Hanselmann
843 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
844 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
845 e9721add Michael Hanselmann

846 e9721add Michael Hanselmann
  """
847 e9721add Michael Hanselmann
848 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
849 e9721add Michael Hanselmann
850 e9721add Michael Hanselmann
851 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
852 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
853 80cb875c Michael Hanselmann

854 80cb875c Michael Hanselmann
  """
855 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
856 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
857 80cb875c Michael Hanselmann
858 80cb875c Michael Hanselmann
859 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
860 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
861 692738fc Michael Hanselmann

862 692738fc Michael Hanselmann
  """
863 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
864 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
865 692738fc Michael Hanselmann
866 692738fc Michael Hanselmann
867 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
868 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
869 efb8da02 Michael Hanselmann

870 efb8da02 Michael Hanselmann
  """
871 efb8da02 Michael Hanselmann
  # Special case for file storage
872 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
873 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
874 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
875 efb8da02 Michael Hanselmann
876 efb8da02 Michael Hanselmann
  return []
877 efb8da02 Michael Hanselmann
878 efb8da02 Michael Hanselmann
879 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
880 2d9005d8 Michael Hanselmann
  faulty = []
881 2d9005d8 Michael Hanselmann
882 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
883 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
884 2d9005d8 Michael Hanselmann
885 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
886 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
887 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
888 2d9005d8 Michael Hanselmann
889 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
890 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
891 2d9005d8 Michael Hanselmann
      faulty.append(idx)
892 2d9005d8 Michael Hanselmann
893 2d9005d8 Michael Hanselmann
  return faulty
894 2d9005d8 Michael Hanselmann
895 2d9005d8 Michael Hanselmann
896 b98bf262 Michael Hanselmann
def _FormatTimestamp(secs):
897 b98bf262 Michael Hanselmann
  """Formats a Unix timestamp with the local timezone.
898 b98bf262 Michael Hanselmann

899 b98bf262 Michael Hanselmann
  """
900 b98bf262 Michael Hanselmann
  return time.strftime("%F %T %Z", time.gmtime(secs))
901 b98bf262 Michael Hanselmann
902 b98bf262 Michael Hanselmann
903 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
904 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
905 b5f5fae9 Luca Bigliardi

906 b5f5fae9 Luca Bigliardi
  """
907 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
908 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
909 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
910 b5f5fae9 Luca Bigliardi
911 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
912 b5f5fae9 Luca Bigliardi
    """Build hooks env.
913 b5f5fae9 Luca Bigliardi

914 b5f5fae9 Luca Bigliardi
    """
915 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
916 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
917 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
918 b5f5fae9 Luca Bigliardi
919 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
920 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
921 b5f5fae9 Luca Bigliardi

922 b5f5fae9 Luca Bigliardi
    """
923 b5f5fae9 Luca Bigliardi
    return True
924 b5f5fae9 Luca Bigliardi
925 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
926 b5f5fae9 Luca Bigliardi
    """Nothing to do.
927 b5f5fae9 Luca Bigliardi

928 b5f5fae9 Luca Bigliardi
    """
929 b5f5fae9 Luca Bigliardi
    return True
930 b5f5fae9 Luca Bigliardi
931 b5f5fae9 Luca Bigliardi
932 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
933 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
934 a8083063 Iustin Pop

935 a8083063 Iustin Pop
  """
936 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
937 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
938 a8083063 Iustin Pop
  _OP_REQP = []
939 a8083063 Iustin Pop
940 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
941 b2c750a4 Luca Bigliardi
    """Build hooks env.
942 b2c750a4 Luca Bigliardi

943 b2c750a4 Luca Bigliardi
    """
944 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
945 b2c750a4 Luca Bigliardi
    return env, [], []
946 b2c750a4 Luca Bigliardi
947 a8083063 Iustin Pop
  def CheckPrereq(self):
948 a8083063 Iustin Pop
    """Check prerequisites.
949 a8083063 Iustin Pop

950 a8083063 Iustin Pop
    This checks whether the cluster is empty.
951 a8083063 Iustin Pop

952 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
953 a8083063 Iustin Pop

954 a8083063 Iustin Pop
    """
955 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
956 a8083063 Iustin Pop
957 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
958 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
959 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
960 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
961 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
962 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
963 db915bd1 Michael Hanselmann
    if instancelist:
964 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
965 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
966 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
969 a8083063 Iustin Pop
    """Destroys the cluster.
970 a8083063 Iustin Pop

971 a8083063 Iustin Pop
    """
972 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
973 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
974 3141ad3b Luca Bigliardi
975 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
976 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
977 3141ad3b Luca Bigliardi
    try:
978 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
979 3141ad3b Luca Bigliardi
    except:
980 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
981 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
982 3141ad3b Luca Bigliardi
983 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
984 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
985 b989b9d9 Ken Wehr
986 b989b9d9 Ken Wehr
    if modify_ssh_setup:
987 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
988 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
989 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
990 b989b9d9 Ken Wehr
991 140aa4a8 Iustin Pop
    return master
992 a8083063 Iustin Pop
993 a8083063 Iustin Pop
994 b98bf262 Michael Hanselmann
def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
995 b98bf262 Michael Hanselmann
                            warn_days=constants.SSL_CERT_EXPIRATION_WARN,
996 b98bf262 Michael Hanselmann
                            error_days=constants.SSL_CERT_EXPIRATION_ERROR):
997 b98bf262 Michael Hanselmann
  """Verifies certificate details for LUVerifyCluster.
998 b98bf262 Michael Hanselmann

999 b98bf262 Michael Hanselmann
  """
1000 b98bf262 Michael Hanselmann
  if expired:
1001 b98bf262 Michael Hanselmann
    msg = "Certificate %s is expired" % filename
1002 b98bf262 Michael Hanselmann
1003 b98bf262 Michael Hanselmann
    if not_before is not None and not_after is not None:
1004 b98bf262 Michael Hanselmann
      msg += (" (valid from %s to %s)" %
1005 b98bf262 Michael Hanselmann
              (_FormatTimestamp(not_before),
1006 b98bf262 Michael Hanselmann
               _FormatTimestamp(not_after)))
1007 b98bf262 Michael Hanselmann
    elif not_before is not None:
1008 b98bf262 Michael Hanselmann
      msg += " (valid from %s)" % _FormatTimestamp(not_before)
1009 b98bf262 Michael Hanselmann
    elif not_after is not None:
1010 b98bf262 Michael Hanselmann
      msg += " (valid until %s)" % _FormatTimestamp(not_after)
1011 b98bf262 Michael Hanselmann
1012 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR, msg)
1013 b98bf262 Michael Hanselmann
1014 b98bf262 Michael Hanselmann
  elif not_before is not None and not_before > now:
1015 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_WARNING,
1016 b98bf262 Michael Hanselmann
            "Certificate %s not yet valid (valid from %s)" %
1017 b98bf262 Michael Hanselmann
            (filename, _FormatTimestamp(not_before)))
1018 b98bf262 Michael Hanselmann
1019 b98bf262 Michael Hanselmann
  elif not_after is not None:
1020 b98bf262 Michael Hanselmann
    remaining_days = int((not_after - now) / (24 * 3600))
1021 b98bf262 Michael Hanselmann
1022 b98bf262 Michael Hanselmann
    msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
1023 b98bf262 Michael Hanselmann
1024 b98bf262 Michael Hanselmann
    if remaining_days <= error_days:
1025 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_ERROR, msg)
1026 b98bf262 Michael Hanselmann
1027 b98bf262 Michael Hanselmann
    if remaining_days <= warn_days:
1028 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_WARNING, msg)
1029 b98bf262 Michael Hanselmann
1030 b98bf262 Michael Hanselmann
  return (None, None)
1031 b98bf262 Michael Hanselmann
1032 b98bf262 Michael Hanselmann
1033 b98bf262 Michael Hanselmann
def _VerifyCertificate(filename):
1034 b98bf262 Michael Hanselmann
  """Verifies a certificate for LUVerifyCluster.
1035 b98bf262 Michael Hanselmann

1036 b98bf262 Michael Hanselmann
  @type filename: string
1037 b98bf262 Michael Hanselmann
  @param filename: Path to PEM file
1038 b98bf262 Michael Hanselmann

1039 b98bf262 Michael Hanselmann
  """
1040 b98bf262 Michael Hanselmann
  try:
1041 b98bf262 Michael Hanselmann
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1042 b98bf262 Michael Hanselmann
                                           utils.ReadFile(filename))
1043 b98bf262 Michael Hanselmann
  except Exception, err: # pylint: disable-msg=W0703
1044 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR,
1045 b98bf262 Michael Hanselmann
            "Failed to load X509 certificate %s: %s" % (filename, err))
1046 b98bf262 Michael Hanselmann
1047 b98bf262 Michael Hanselmann
  # Depending on the pyOpenSSL version, this can just return (None, None)
1048 b98bf262 Michael Hanselmann
  (not_before, not_after) = utils.GetX509CertValidity(cert)
1049 b98bf262 Michael Hanselmann
1050 b98bf262 Michael Hanselmann
  return _VerifyCertificateInner(filename, cert.has_expired(),
1051 b98bf262 Michael Hanselmann
                                 not_before, not_after, time.time())
1052 b98bf262 Michael Hanselmann
1053 b98bf262 Michael Hanselmann
1054 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
1055 a8083063 Iustin Pop
  """Verifies the cluster status.
1056 a8083063 Iustin Pop

1057 a8083063 Iustin Pop
  """
1058 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
1059 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
1060 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1061 d4b9d97f Guido Trotter
  REQ_BGL = False
1062 d4b9d97f Guido Trotter
1063 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
1064 7c874ee1 Iustin Pop
  TNODE = "node"
1065 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
1066 7c874ee1 Iustin Pop
1067 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1068 b98bf262 Michael Hanselmann
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1069 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1070 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1071 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1072 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1073 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1074 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1075 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
1076 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1077 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1078 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
1079 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
1080 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
1081 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
1082 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1083 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1084 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
1085 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
1086 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
1087 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
1088 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
1089 7c874ee1 Iustin Pop
1090 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
1091 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
1092 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
1093 a0c9776a Iustin Pop
1094 02c521e4 Iustin Pop
  class NodeImage(object):
1095 02c521e4 Iustin Pop
    """A class representing the logical and physical status of a node.
1096 02c521e4 Iustin Pop

1097 02c521e4 Iustin Pop
    @ivar volumes: a structure as returned from
1098 3a488770 Iustin Pop
        L{ganeti.backend.GetVolumeList} (runtime)
1099 02c521e4 Iustin Pop
    @ivar instances: a list of running instances (runtime)
1100 02c521e4 Iustin Pop
    @ivar pinst: list of configured primary instances (config)
1101 02c521e4 Iustin Pop
    @ivar sinst: list of configured secondary instances (config)
1102 02c521e4 Iustin Pop
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1103 02c521e4 Iustin Pop
        of this node (config)
1104 02c521e4 Iustin Pop
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1105 02c521e4 Iustin Pop
    @ivar dfree: free disk, as reported by the node (runtime)
1106 02c521e4 Iustin Pop
    @ivar offline: the offline status (config)
1107 02c521e4 Iustin Pop
    @type rpc_fail: boolean
1108 02c521e4 Iustin Pop
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1109 02c521e4 Iustin Pop
        not whether the individual keys were correct) (runtime)
1110 02c521e4 Iustin Pop
    @type lvm_fail: boolean
1111 02c521e4 Iustin Pop
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1112 02c521e4 Iustin Pop
    @type hyp_fail: boolean
1113 02c521e4 Iustin Pop
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1114 02c521e4 Iustin Pop
    @type ghost: boolean
1115 02c521e4 Iustin Pop
    @ivar ghost: whether this is a known node or not (config)
1116 02c521e4 Iustin Pop

1117 02c521e4 Iustin Pop
    """
1118 02c521e4 Iustin Pop
    def __init__(self, offline=False):
1119 02c521e4 Iustin Pop
      self.volumes = {}
1120 02c521e4 Iustin Pop
      self.instances = []
1121 02c521e4 Iustin Pop
      self.pinst = []
1122 02c521e4 Iustin Pop
      self.sinst = []
1123 02c521e4 Iustin Pop
      self.sbp = {}
1124 02c521e4 Iustin Pop
      self.mfree = 0
1125 02c521e4 Iustin Pop
      self.dfree = 0
1126 02c521e4 Iustin Pop
      self.offline = offline
1127 02c521e4 Iustin Pop
      self.rpc_fail = False
1128 02c521e4 Iustin Pop
      self.lvm_fail = False
1129 02c521e4 Iustin Pop
      self.hyp_fail = False
1130 02c521e4 Iustin Pop
      self.ghost = False
1131 02c521e4 Iustin Pop
1132 d4b9d97f Guido Trotter
  def ExpandNames(self):
1133 d4b9d97f Guido Trotter
    self.needed_locks = {
1134 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1135 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1136 d4b9d97f Guido Trotter
    }
1137 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1138 a8083063 Iustin Pop
1139 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
1140 7c874ee1 Iustin Pop
    """Format an error message.
1141 7c874ee1 Iustin Pop

1142 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
1143 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
1144 7c874ee1 Iustin Pop

1145 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
1146 7c874ee1 Iustin Pop

1147 7c874ee1 Iustin Pop
    """
1148 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1149 7c874ee1 Iustin Pop
    itype, etxt = ecode
1150 7c874ee1 Iustin Pop
    # first complete the msg
1151 7c874ee1 Iustin Pop
    if args:
1152 7c874ee1 Iustin Pop
      msg = msg % args
1153 7c874ee1 Iustin Pop
    # then format the whole message
1154 7c874ee1 Iustin Pop
    if self.op.error_codes:
1155 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1156 7c874ee1 Iustin Pop
    else:
1157 7c874ee1 Iustin Pop
      if item:
1158 7c874ee1 Iustin Pop
        item = " " + item
1159 7c874ee1 Iustin Pop
      else:
1160 7c874ee1 Iustin Pop
        item = ""
1161 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1162 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1163 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1164 7c874ee1 Iustin Pop
1165 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1166 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1167 a0c9776a Iustin Pop

1168 a0c9776a Iustin Pop
    """
1169 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1170 a0c9776a Iustin Pop
    if cond:
1171 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1172 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1173 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1174 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1175 a0c9776a Iustin Pop
1176 02c521e4 Iustin Pop
  def _VerifyNode(self, ninfo, nresult):
1177 a8083063 Iustin Pop
    """Run multiple tests against a node.
1178 a8083063 Iustin Pop

1179 112f18a5 Iustin Pop
    Test list:
1180 e4376078 Iustin Pop

1181 a8083063 Iustin Pop
      - compares ganeti version
1182 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1183 a8083063 Iustin Pop
      - checks config file checksum
1184 a8083063 Iustin Pop
      - checks ssh to other nodes
1185 a8083063 Iustin Pop

1186 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1187 02c521e4 Iustin Pop
    @param ninfo: the node to check
1188 02c521e4 Iustin Pop
    @param nresult: the results from the node
1189 02c521e4 Iustin Pop
    @rtype: boolean
1190 02c521e4 Iustin Pop
    @return: whether overall this call was successful (and we can expect
1191 02c521e4 Iustin Pop
         reasonable values in the respose)
1192 098c0958 Michael Hanselmann

1193 a8083063 Iustin Pop
    """
1194 02c521e4 Iustin Pop
    node = ninfo.name
1195 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1196 25361b9a Iustin Pop
1197 02c521e4 Iustin Pop
    # main result, nresult should be a non-empty dict
1198 02c521e4 Iustin Pop
    test = not nresult or not isinstance(nresult, dict)
1199 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1200 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1201 a0c9776a Iustin Pop
    if test:
1202 02c521e4 Iustin Pop
      return False
1203 25361b9a Iustin Pop
1204 a8083063 Iustin Pop
    # compares ganeti version
1205 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1206 02c521e4 Iustin Pop
    remote_version = nresult.get("version", None)
1207 a0c9776a Iustin Pop
    test = not (remote_version and
1208 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1209 a0c9776a Iustin Pop
                len(remote_version) == 2)
1210 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1211 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1212 a0c9776a Iustin Pop
    if test:
1213 02c521e4 Iustin Pop
      return False
1214 a0c9776a Iustin Pop
1215 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1216 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1217 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1218 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1219 a0c9776a Iustin Pop
    if test:
1220 02c521e4 Iustin Pop
      return False
1221 a8083063 Iustin Pop
1222 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1223 a8083063 Iustin Pop
1224 e9ce0a64 Iustin Pop
    # full package version
1225 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1226 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1227 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1228 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1229 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1230 e9ce0a64 Iustin Pop
1231 02c521e4 Iustin Pop
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1232 02c521e4 Iustin Pop
    if isinstance(hyp_result, dict):
1233 02c521e4 Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1234 02c521e4 Iustin Pop
        test = hv_result is not None
1235 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1236 02c521e4 Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1237 a8083063 Iustin Pop
1238 a8083063 Iustin Pop
1239 02c521e4 Iustin Pop
    test = nresult.get(constants.NV_NODESETUP,
1240 02c521e4 Iustin Pop
                           ["Missing NODESETUP results"])
1241 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1242 02c521e4 Iustin Pop
             "; ".join(test))
1243 02c521e4 Iustin Pop
1244 02c521e4 Iustin Pop
    return True
1245 02c521e4 Iustin Pop
1246 02c521e4 Iustin Pop
  def _VerifyNodeTime(self, ninfo, nresult,
1247 02c521e4 Iustin Pop
                      nvinfo_starttime, nvinfo_endtime):
1248 02c521e4 Iustin Pop
    """Check the node time.
1249 02c521e4 Iustin Pop

1250 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1251 02c521e4 Iustin Pop
    @param ninfo: the node to check
1252 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1253 02c521e4 Iustin Pop
    @param nvinfo_starttime: the start time of the RPC call
1254 02c521e4 Iustin Pop
    @param nvinfo_endtime: the end time of the RPC call
1255 02c521e4 Iustin Pop

1256 02c521e4 Iustin Pop
    """
1257 02c521e4 Iustin Pop
    node = ninfo.name
1258 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1259 02c521e4 Iustin Pop
1260 02c521e4 Iustin Pop
    ntime = nresult.get(constants.NV_TIME, None)
1261 02c521e4 Iustin Pop
    try:
1262 02c521e4 Iustin Pop
      ntime_merged = utils.MergeTime(ntime)
1263 02c521e4 Iustin Pop
    except (ValueError, TypeError):
1264 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1265 02c521e4 Iustin Pop
      return
1266 02c521e4 Iustin Pop
1267 02c521e4 Iustin Pop
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1268 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1269 02c521e4 Iustin Pop
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1270 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1271 02c521e4 Iustin Pop
    else:
1272 02c521e4 Iustin Pop
      ntime_diff = None
1273 02c521e4 Iustin Pop
1274 02c521e4 Iustin Pop
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1275 02c521e4 Iustin Pop
             "Node time diverges by at least %s from master node time",
1276 02c521e4 Iustin Pop
             ntime_diff)
1277 02c521e4 Iustin Pop
1278 02c521e4 Iustin Pop
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1279 02c521e4 Iustin Pop
    """Check the node time.
1280 02c521e4 Iustin Pop

1281 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1282 02c521e4 Iustin Pop
    @param ninfo: the node to check
1283 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1284 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1285 02c521e4 Iustin Pop

1286 02c521e4 Iustin Pop
    """
1287 02c521e4 Iustin Pop
    if vg_name is None:
1288 02c521e4 Iustin Pop
      return
1289 02c521e4 Iustin Pop
1290 02c521e4 Iustin Pop
    node = ninfo.name
1291 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1292 02c521e4 Iustin Pop
1293 02c521e4 Iustin Pop
    # checks vg existence and size > 20G
1294 02c521e4 Iustin Pop
    vglist = nresult.get(constants.NV_VGLIST, None)
1295 02c521e4 Iustin Pop
    test = not vglist
1296 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1297 02c521e4 Iustin Pop
    if not test:
1298 02c521e4 Iustin Pop
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1299 02c521e4 Iustin Pop
                                            constants.MIN_VG_SIZE)
1300 02c521e4 Iustin Pop
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1301 02c521e4 Iustin Pop
1302 02c521e4 Iustin Pop
    # check pv names
1303 02c521e4 Iustin Pop
    pvlist = nresult.get(constants.NV_PVLIST, None)
1304 02c521e4 Iustin Pop
    test = pvlist is None
1305 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1306 a0c9776a Iustin Pop
    if not test:
1307 02c521e4 Iustin Pop
      # check that ':' is not present in PV names, since it's a
1308 02c521e4 Iustin Pop
      # special character for lvcreate (denotes the range of PEs to
1309 02c521e4 Iustin Pop
      # use on the PV)
1310 02c521e4 Iustin Pop
      for _, pvname, owner_vg in pvlist:
1311 02c521e4 Iustin Pop
        test = ":" in pvname
1312 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1313 02c521e4 Iustin Pop
                 " '%s' of VG '%s'", pvname, owner_vg)
1314 02c521e4 Iustin Pop
1315 02c521e4 Iustin Pop
  def _VerifyNodeNetwork(self, ninfo, nresult):
1316 02c521e4 Iustin Pop
    """Check the node time.
1317 02c521e4 Iustin Pop

1318 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1319 02c521e4 Iustin Pop
    @param ninfo: the node to check
1320 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1321 02c521e4 Iustin Pop

1322 02c521e4 Iustin Pop
    """
1323 02c521e4 Iustin Pop
    node = ninfo.name
1324 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1325 02c521e4 Iustin Pop
1326 02c521e4 Iustin Pop
    test = constants.NV_NODELIST not in nresult
1327 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1328 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1329 a0c9776a Iustin Pop
    if not test:
1330 02c521e4 Iustin Pop
      if nresult[constants.NV_NODELIST]:
1331 02c521e4 Iustin Pop
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1332 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1333 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1334 25361b9a Iustin Pop
1335 02c521e4 Iustin Pop
    test = constants.NV_NODENETTEST not in nresult
1336 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1337 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1338 a0c9776a Iustin Pop
    if not test:
1339 02c521e4 Iustin Pop
      if nresult[constants.NV_NODENETTEST]:
1340 02c521e4 Iustin Pop
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1341 7c874ee1 Iustin Pop
        for anode in nlist:
1342 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1343 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1344 02c521e4 Iustin Pop
                   anode, nresult[constants.NV_NODENETTEST][anode])
1345 a8083063 Iustin Pop
1346 02c521e4 Iustin Pop
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1347 a8083063 Iustin Pop
    """Verify an instance.
1348 a8083063 Iustin Pop

1349 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1350 a8083063 Iustin Pop
    available on the instance's node.
1351 a8083063 Iustin Pop

1352 a8083063 Iustin Pop
    """
1353 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1354 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1355 a8083063 Iustin Pop
1356 a8083063 Iustin Pop
    node_vol_should = {}
1357 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1358 a8083063 Iustin Pop
1359 a8083063 Iustin Pop
    for node in node_vol_should:
1360 02c521e4 Iustin Pop
      n_img = node_image[node]
1361 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1362 02c521e4 Iustin Pop
        # ignore missing volumes on offline or broken nodes
1363 0a66c968 Iustin Pop
        continue
1364 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1365 02c521e4 Iustin Pop
        test = volume not in n_img.volumes
1366 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1367 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1368 a8083063 Iustin Pop
1369 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1370 02c521e4 Iustin Pop
      pri_img = node_image[node_current]
1371 02c521e4 Iustin Pop
      test = instance not in pri_img.instances and not pri_img.offline
1372 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1373 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1374 a0c9776a Iustin Pop
               node_current)
1375 a8083063 Iustin Pop
1376 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1377 a8083063 Iustin Pop
      if (not node == node_current):
1378 02c521e4 Iustin Pop
        test = instance in n_img.instances
1379 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1380 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1381 a8083063 Iustin Pop
1382 02c521e4 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1383 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1384 a8083063 Iustin Pop

1385 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1386 a8083063 Iustin Pop
    reported as unknown.
1387 a8083063 Iustin Pop

1388 a8083063 Iustin Pop
    """
1389 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1390 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1391 02c521e4 Iustin Pop
        # skip non-healthy nodes
1392 02c521e4 Iustin Pop
        continue
1393 02c521e4 Iustin Pop
      for volume in n_img.volumes:
1394 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1395 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1396 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1397 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1398 a8083063 Iustin Pop
1399 02c521e4 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_image):
1400 a8083063 Iustin Pop
    """Verify the list of running instances.
1401 a8083063 Iustin Pop

1402 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1403 a8083063 Iustin Pop

1404 a8083063 Iustin Pop
    """
1405 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1406 02c521e4 Iustin Pop
      for o_inst in n_img.instances:
1407 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1408 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1409 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1410 a8083063 Iustin Pop
1411 02c521e4 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1412 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1413 2b3b6ddd Guido Trotter

1414 02c521e4 Iustin Pop
    Check that if one single node dies we can still start all the
1415 02c521e4 Iustin Pop
    instances it was primary for.
1416 2b3b6ddd Guido Trotter

1417 2b3b6ddd Guido Trotter
    """
1418 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1419 02c521e4 Iustin Pop
      # This code checks that every node which is now listed as
1420 02c521e4 Iustin Pop
      # secondary has enough memory to host all instances it is
1421 02c521e4 Iustin Pop
      # supposed to should a single other node in the cluster fail.
1422 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1423 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1424 02c521e4 Iustin Pop
      # WARNING: we currently take into account down instances as well
1425 02c521e4 Iustin Pop
      # as up ones, considering that even if they're down someone
1426 02c521e4 Iustin Pop
      # might want to start them even in the event of a node failure.
1427 02c521e4 Iustin Pop
      for prinode, instances in n_img.sbp.items():
1428 2b3b6ddd Guido Trotter
        needed_mem = 0
1429 2b3b6ddd Guido Trotter
        for instance in instances:
1430 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1431 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1432 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1433 02c521e4 Iustin Pop
        test = n_img.mfree < needed_mem
1434 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1435 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1436 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1437 2b3b6ddd Guido Trotter
1438 02c521e4 Iustin Pop
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1439 02c521e4 Iustin Pop
                       master_files):
1440 02c521e4 Iustin Pop
    """Verifies and computes the node required file checksums.
1441 02c521e4 Iustin Pop

1442 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1443 02c521e4 Iustin Pop
    @param ninfo: the node to check
1444 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1445 02c521e4 Iustin Pop
    @param file_list: required list of files
1446 02c521e4 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1447 02c521e4 Iustin Pop
    @param master_files: list of files that only masters should have
1448 02c521e4 Iustin Pop

1449 02c521e4 Iustin Pop
    """
1450 02c521e4 Iustin Pop
    node = ninfo.name
1451 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1452 02c521e4 Iustin Pop
1453 02c521e4 Iustin Pop
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1454 02c521e4 Iustin Pop
    test = not isinstance(remote_cksum, dict)
1455 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1456 02c521e4 Iustin Pop
             "node hasn't returned file checksum data")
1457 02c521e4 Iustin Pop
    if test:
1458 02c521e4 Iustin Pop
      return
1459 02c521e4 Iustin Pop
1460 02c521e4 Iustin Pop
    for file_name in file_list:
1461 02c521e4 Iustin Pop
      node_is_mc = ninfo.master_candidate
1462 02c521e4 Iustin Pop
      must_have = (file_name not in master_files) or node_is_mc
1463 02c521e4 Iustin Pop
      # missing
1464 02c521e4 Iustin Pop
      test1 = file_name not in remote_cksum
1465 02c521e4 Iustin Pop
      # invalid checksum
1466 02c521e4 Iustin Pop
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1467 02c521e4 Iustin Pop
      # existing and good
1468 02c521e4 Iustin Pop
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1469 02c521e4 Iustin Pop
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1470 02c521e4 Iustin Pop
               "file '%s' missing", file_name)
1471 02c521e4 Iustin Pop
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1472 02c521e4 Iustin Pop
               "file '%s' has wrong checksum", file_name)
1473 02c521e4 Iustin Pop
      # not candidate and this is not a must-have file
1474 02c521e4 Iustin Pop
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1475 02c521e4 Iustin Pop
               "file '%s' should not exist on non master"
1476 02c521e4 Iustin Pop
               " candidates (and the file is outdated)", file_name)
1477 02c521e4 Iustin Pop
      # all good, except non-master/non-must have combination
1478 02c521e4 Iustin Pop
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1479 02c521e4 Iustin Pop
               "file '%s' should not exist"
1480 02c521e4 Iustin Pop
               " on non master candidates", file_name)
1481 02c521e4 Iustin Pop
1482 02c521e4 Iustin Pop
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1483 02c521e4 Iustin Pop
    """Verifies and the node DRBD status.
1484 02c521e4 Iustin Pop

1485 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1486 02c521e4 Iustin Pop
    @param ninfo: the node to check
1487 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1488 02c521e4 Iustin Pop
    @param instanceinfo: the dict of instances
1489 02c521e4 Iustin Pop
    @param drbd_map: the DRBD map as returned by
1490 02c521e4 Iustin Pop
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1491 02c521e4 Iustin Pop

1492 02c521e4 Iustin Pop
    """
1493 02c521e4 Iustin Pop
    node = ninfo.name
1494 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1495 02c521e4 Iustin Pop
1496 02c521e4 Iustin Pop
    # compute the DRBD minors
1497 02c521e4 Iustin Pop
    node_drbd = {}
1498 02c521e4 Iustin Pop
    for minor, instance in drbd_map[node].items():
1499 02c521e4 Iustin Pop
      test = instance not in instanceinfo
1500 02c521e4 Iustin Pop
      _ErrorIf(test, self.ECLUSTERCFG, None,
1501 02c521e4 Iustin Pop
               "ghost instance '%s' in temporary DRBD map", instance)
1502 02c521e4 Iustin Pop
        # ghost instance should not be running, but otherwise we
1503 02c521e4 Iustin Pop
        # don't give double warnings (both ghost instance and
1504 02c521e4 Iustin Pop
        # unallocated minor in use)
1505 02c521e4 Iustin Pop
      if test:
1506 02c521e4 Iustin Pop
        node_drbd[minor] = (instance, False)
1507 02c521e4 Iustin Pop
      else:
1508 02c521e4 Iustin Pop
        instance = instanceinfo[instance]
1509 02c521e4 Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1510 02c521e4 Iustin Pop
1511 02c521e4 Iustin Pop
    # and now check them
1512 02c521e4 Iustin Pop
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1513 02c521e4 Iustin Pop
    test = not isinstance(used_minors, (tuple, list))
1514 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEDRBD, node,
1515 02c521e4 Iustin Pop
             "cannot parse drbd status file: %s", str(used_minors))
1516 02c521e4 Iustin Pop
    if test:
1517 02c521e4 Iustin Pop
      # we cannot check drbd status
1518 02c521e4 Iustin Pop
      return
1519 02c521e4 Iustin Pop
1520 02c521e4 Iustin Pop
    for minor, (iname, must_exist) in node_drbd.items():
1521 02c521e4 Iustin Pop
      test = minor not in used_minors and must_exist
1522 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1523 02c521e4 Iustin Pop
               "drbd minor %d of instance %s is not active", minor, iname)
1524 02c521e4 Iustin Pop
    for minor in used_minors:
1525 02c521e4 Iustin Pop
      test = minor not in node_drbd
1526 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1527 02c521e4 Iustin Pop
               "unallocated drbd minor %d is in use", minor)
1528 02c521e4 Iustin Pop
1529 02c521e4 Iustin Pop
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1530 02c521e4 Iustin Pop
    """Verifies and updates the node volume data.
1531 02c521e4 Iustin Pop

1532 02c521e4 Iustin Pop
    This function will update a L{NodeImage}'s internal structures
1533 02c521e4 Iustin Pop
    with data from the remote call.
1534 02c521e4 Iustin Pop

1535 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1536 02c521e4 Iustin Pop
    @param ninfo: the node to check
1537 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1538 02c521e4 Iustin Pop
    @param nimg: the node image object
1539 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1540 02c521e4 Iustin Pop

1541 02c521e4 Iustin Pop
    """
1542 02c521e4 Iustin Pop
    node = ninfo.name
1543 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1544 02c521e4 Iustin Pop
1545 02c521e4 Iustin Pop
    nimg.lvm_fail = True
1546 02c521e4 Iustin Pop
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1547 02c521e4 Iustin Pop
    if vg_name is None:
1548 02c521e4 Iustin Pop
      pass
1549 02c521e4 Iustin Pop
    elif isinstance(lvdata, basestring):
1550 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1551 02c521e4 Iustin Pop
               utils.SafeEncode(lvdata))
1552 02c521e4 Iustin Pop
    elif not isinstance(lvdata, dict):
1553 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1554 02c521e4 Iustin Pop
    else:
1555 02c521e4 Iustin Pop
      nimg.volumes = lvdata
1556 02c521e4 Iustin Pop
      nimg.lvm_fail = False
1557 02c521e4 Iustin Pop
1558 02c521e4 Iustin Pop
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1559 02c521e4 Iustin Pop
    """Verifies and updates the node instance list.
1560 02c521e4 Iustin Pop

1561 02c521e4 Iustin Pop
    If the listing was successful, then updates this node's instance
1562 02c521e4 Iustin Pop
    list. Otherwise, it marks the RPC call as failed for the instance
1563 02c521e4 Iustin Pop
    list key.
1564 02c521e4 Iustin Pop

1565 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1566 02c521e4 Iustin Pop
    @param ninfo: the node to check
1567 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1568 02c521e4 Iustin Pop
    @param nimg: the node image object
1569 02c521e4 Iustin Pop

1570 02c521e4 Iustin Pop
    """
1571 02c521e4 Iustin Pop
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1572 02c521e4 Iustin Pop
    test = not isinstance(idata, list)
1573 02c521e4 Iustin Pop
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1574 02c521e4 Iustin Pop
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1575 02c521e4 Iustin Pop
    if test:
1576 02c521e4 Iustin Pop
      nimg.hyp_fail = True
1577 02c521e4 Iustin Pop
    else:
1578 02c521e4 Iustin Pop
      nimg.instances = idata
1579 02c521e4 Iustin Pop
1580 02c521e4 Iustin Pop
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1581 02c521e4 Iustin Pop
    """Verifies and computes a node information map
1582 02c521e4 Iustin Pop

1583 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1584 02c521e4 Iustin Pop
    @param ninfo: the node to check
1585 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1586 02c521e4 Iustin Pop
    @param nimg: the node image object
1587 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1588 02c521e4 Iustin Pop

1589 02c521e4 Iustin Pop
    """
1590 02c521e4 Iustin Pop
    node = ninfo.name
1591 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1592 02c521e4 Iustin Pop
1593 02c521e4 Iustin Pop
    # try to read free memory (from the hypervisor)
1594 02c521e4 Iustin Pop
    hv_info = nresult.get(constants.NV_HVINFO, None)
1595 02c521e4 Iustin Pop
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1596 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1597 02c521e4 Iustin Pop
    if not test:
1598 02c521e4 Iustin Pop
      try:
1599 02c521e4 Iustin Pop
        nimg.mfree = int(hv_info["memory_free"])
1600 02c521e4 Iustin Pop
      except (ValueError, TypeError):
1601 02c521e4 Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1602 02c521e4 Iustin Pop
                 "node returned invalid nodeinfo, check hypervisor")
1603 02c521e4 Iustin Pop
1604 02c521e4 Iustin Pop
    # FIXME: devise a free space model for file based instances as well
1605 02c521e4 Iustin Pop
    if vg_name is not None:
1606 02c521e4 Iustin Pop
      test = (constants.NV_VGLIST not in nresult or
1607 02c521e4 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST])
1608 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODELVM, node,
1609 02c521e4 Iustin Pop
               "node didn't return data for the volume group '%s'"
1610 02c521e4 Iustin Pop
               " - it is either missing or broken", vg_name)
1611 02c521e4 Iustin Pop
      if not test:
1612 02c521e4 Iustin Pop
        try:
1613 02c521e4 Iustin Pop
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1614 02c521e4 Iustin Pop
        except (ValueError, TypeError):
1615 02c521e4 Iustin Pop
          _ErrorIf(True, self.ENODERPC, node,
1616 02c521e4 Iustin Pop
                   "node returned invalid LVM info, check LVM status")
1617 02c521e4 Iustin Pop
1618 a8083063 Iustin Pop
  def CheckPrereq(self):
1619 a8083063 Iustin Pop
    """Check prerequisites.
1620 a8083063 Iustin Pop

1621 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1622 e54c4c5e Guido Trotter
    all its members are valid.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
    """
1625 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1626 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1627 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1628 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1629 a8083063 Iustin Pop
1630 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1631 d8fff41c Guido Trotter
    """Build hooks env.
1632 d8fff41c Guido Trotter

1633 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1634 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1635 d8fff41c Guido Trotter

1636 d8fff41c Guido Trotter
    """
1637 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1638 35e994e9 Iustin Pop
    env = {
1639 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1640 35e994e9 Iustin Pop
      }
1641 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1642 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1643 35e994e9 Iustin Pop
1644 d8fff41c Guido Trotter
    return env, [], all_nodes
1645 d8fff41c Guido Trotter
1646 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1647 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 a0c9776a Iustin Pop
    self.bad = False
1651 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1652 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1653 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1654 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1655 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1656 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1657 a8083063 Iustin Pop
1658 b98bf262 Michael Hanselmann
    # Check the cluster certificates
1659 b98bf262 Michael Hanselmann
    for cert_filename in constants.ALL_CERT_FILES:
1660 b98bf262 Michael Hanselmann
      (errcode, msg) = _VerifyCertificate(cert_filename)
1661 b98bf262 Michael Hanselmann
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1662 b98bf262 Michael Hanselmann
1663 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1664 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1665 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1666 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1667 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1668 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1669 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1670 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1671 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1672 02c521e4 Iustin Pop
    n_offline = 0 # Count of offline nodes
1673 02c521e4 Iustin Pop
    n_drained = 0 # Count of nodes being drained
1674 02c521e4 Iustin Pop
    node_vol_should = {}
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
    # FIXME: verify OS list
1677 a8083063 Iustin Pop
    # do local checksums
1678 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1679 112f18a5 Iustin Pop
1680 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1681 d3100055 Michael Hanselmann
    file_names.extend(constants.ALL_CERT_FILES)
1682 112f18a5 Iustin Pop
    file_names.extend(master_files)
1683 112f18a5 Iustin Pop
1684 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1687 a8083063 Iustin Pop
    node_verify_param = {
1688 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1689 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1690 82e37788 Iustin Pop
                              if not node.offline],
1691 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1692 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1693 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1694 82e37788 Iustin Pop
                                 if not node.offline],
1695 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1696 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1697 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1698 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1699 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1700 a8083063 Iustin Pop
      }
1701 313b2dd4 Michael Hanselmann
1702 cc9e1230 Guido Trotter
    if vg_name is not None:
1703 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1704 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1705 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1706 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1707 313b2dd4 Michael Hanselmann
1708 02c521e4 Iustin Pop
    # Build our expected cluster state
1709 02c521e4 Iustin Pop
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1710 02c521e4 Iustin Pop
                      for node in nodeinfo)
1711 02c521e4 Iustin Pop
1712 02c521e4 Iustin Pop
    for instance in instancelist:
1713 02c521e4 Iustin Pop
      inst_config = instanceinfo[instance]
1714 02c521e4 Iustin Pop
1715 02c521e4 Iustin Pop
      for nname in inst_config.all_nodes:
1716 02c521e4 Iustin Pop
        if nname not in node_image:
1717 02c521e4 Iustin Pop
          # ghost node
1718 02c521e4 Iustin Pop
          gnode = self.NodeImage()
1719 02c521e4 Iustin Pop
          gnode.ghost = True
1720 02c521e4 Iustin Pop
          node_image[nname] = gnode
1721 02c521e4 Iustin Pop
1722 02c521e4 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1723 02c521e4 Iustin Pop
1724 02c521e4 Iustin Pop
      pnode = inst_config.primary_node
1725 02c521e4 Iustin Pop
      node_image[pnode].pinst.append(instance)
1726 02c521e4 Iustin Pop
1727 02c521e4 Iustin Pop
      for snode in inst_config.secondary_nodes:
1728 02c521e4 Iustin Pop
        nimg = node_image[snode]
1729 02c521e4 Iustin Pop
        nimg.sinst.append(instance)
1730 02c521e4 Iustin Pop
        if pnode not in nimg.sbp:
1731 02c521e4 Iustin Pop
          nimg.sbp[pnode] = []
1732 02c521e4 Iustin Pop
        nimg.sbp[pnode].append(instance)
1733 02c521e4 Iustin Pop
1734 02c521e4 Iustin Pop
    # At this point, we have the in-memory data structures complete,
1735 02c521e4 Iustin Pop
    # except for the runtime information, which we'll gather next
1736 02c521e4 Iustin Pop
1737 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1738 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1739 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1740 313b2dd4 Michael Hanselmann
    # window.
1741 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1742 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1743 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1744 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1745 a8083063 Iustin Pop
1746 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1747 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1748 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1749 6d2e83d5 Iustin Pop
1750 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1751 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1752 112f18a5 Iustin Pop
      node = node_i.name
1753 02c521e4 Iustin Pop
      nimg = node_image[node]
1754 25361b9a Iustin Pop
1755 0a66c968 Iustin Pop
      if node_i.offline:
1756 7c874ee1 Iustin Pop
        if verbose:
1757 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1758 02c521e4 Iustin Pop
        n_offline += 1
1759 0a66c968 Iustin Pop
        continue
1760 0a66c968 Iustin Pop
1761 112f18a5 Iustin Pop
      if node == master_node:
1762 25361b9a Iustin Pop
        ntype = "master"
1763 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1764 25361b9a Iustin Pop
        ntype = "master candidate"
1765 22f0f71d Iustin Pop
      elif node_i.drained:
1766 22f0f71d Iustin Pop
        ntype = "drained"
1767 02c521e4 Iustin Pop
        n_drained += 1
1768 112f18a5 Iustin Pop
      else:
1769 25361b9a Iustin Pop
        ntype = "regular"
1770 7c874ee1 Iustin Pop
      if verbose:
1771 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1772 25361b9a Iustin Pop
1773 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1774 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1775 6f68a739 Iustin Pop
      if msg:
1776 02c521e4 Iustin Pop
        nimg.rpc_fail = True
1777 25361b9a Iustin Pop
        continue
1778 25361b9a Iustin Pop
1779 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1780 a8083063 Iustin Pop
1781 02c521e4 Iustin Pop
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1782 02c521e4 Iustin Pop
      self._VerifyNodeNetwork(node_i, nresult)
1783 02c521e4 Iustin Pop
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1784 02c521e4 Iustin Pop
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1785 02c521e4 Iustin Pop
                            master_files)
1786 02c521e4 Iustin Pop
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1787 02c521e4 Iustin Pop
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1788 a8083063 Iustin Pop
1789 02c521e4 Iustin Pop
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1790 02c521e4 Iustin Pop
      self._UpdateNodeInstances(node_i, nresult, nimg)
1791 02c521e4 Iustin Pop
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1792 a8083063 Iustin Pop
1793 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1794 a8083063 Iustin Pop
    for instance in instancelist:
1795 7c874ee1 Iustin Pop
      if verbose:
1796 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1797 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1798 02c521e4 Iustin Pop
      self._VerifyInstance(instance, inst_config, node_image)
1799 832261fd Iustin Pop
      inst_nodes_offline = []
1800 a8083063 Iustin Pop
1801 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1802 02c521e4 Iustin Pop
      pnode_img = node_image[pnode]
1803 02c521e4 Iustin Pop
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1804 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1805 a0c9776a Iustin Pop
               " primary node failed", instance)
1806 93e4c50b Guido Trotter
1807 02c521e4 Iustin Pop
      if pnode_img.offline:
1808 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1809 832261fd Iustin Pop
1810 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1811 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1812 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1813 93e4c50b Guido Trotter
      # supported either.
1814 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1815 02c521e4 Iustin Pop
      if not inst_config.secondary_nodes:
1816 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1817 02c521e4 Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1818 02c521e4 Iustin Pop
               instance, "instance has multiple secondary nodes: %s",
1819 02c521e4 Iustin Pop
               utils.CommaJoin(inst_config.secondary_nodes),
1820 02c521e4 Iustin Pop
               code=self.ETYPE_WARNING)
1821 93e4c50b Guido Trotter
1822 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1823 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1824 3924700f Iustin Pop
1825 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1826 02c521e4 Iustin Pop
        s_img = node_image[snode]
1827 02c521e4 Iustin Pop
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1828 02c521e4 Iustin Pop
                 "instance %s, connection to secondary node failed", instance)
1829 02c521e4 Iustin Pop
1830 02c521e4 Iustin Pop
        if s_img.offline:
1831 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1832 832261fd Iustin Pop
1833 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1834 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1835 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1836 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1837 02c521e4 Iustin Pop
      # ... or ghost nodes
1838 02c521e4 Iustin Pop
      for node in inst_config.all_nodes:
1839 02c521e4 Iustin Pop
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1840 02c521e4 Iustin Pop
                 "instance lives on ghost node %s", node)
1841 93e4c50b Guido Trotter
1842 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1843 02c521e4 Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1844 a8083063 Iustin Pop
1845 02c521e4 Iustin Pop
    feedback_fn("* Verifying oprhan instances")
1846 02c521e4 Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_image)
1847 a8083063 Iustin Pop
1848 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1849 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1850 02c521e4 Iustin Pop
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1851 2b3b6ddd Guido Trotter
1852 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1853 2b3b6ddd Guido Trotter
    if i_non_redundant:
1854 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1855 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1856 2b3b6ddd Guido Trotter
1857 3924700f Iustin Pop
    if i_non_a_balanced:
1858 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1859 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1860 3924700f Iustin Pop
1861 0a66c968 Iustin Pop
    if n_offline:
1862 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1863 0a66c968 Iustin Pop
1864 22f0f71d Iustin Pop
    if n_drained:
1865 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1866 22f0f71d Iustin Pop
1867 a0c9776a Iustin Pop
    return not self.bad
1868 a8083063 Iustin Pop
1869 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1870 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1871 e4376078 Iustin Pop

1872 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1873 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1874 d8fff41c Guido Trotter

1875 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1876 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1877 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1878 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1879 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1880 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1881 e4376078 Iustin Pop
        and hook results
1882 d8fff41c Guido Trotter

1883 d8fff41c Guido Trotter
    """
1884 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1885 38206f3c Iustin Pop
    # their results
1886 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1887 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1888 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1889 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1890 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1891 7c874ee1 Iustin Pop
1892 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1893 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1894 7c874ee1 Iustin Pop
        msg = res.fail_msg
1895 a0c9776a Iustin Pop
        test = msg and not res.offline
1896 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1897 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1898 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1899 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1900 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1901 a0c9776a Iustin Pop
          # overrides self.bad
1902 7c874ee1 Iustin Pop
          lu_result = 1
1903 7c874ee1 Iustin Pop
          continue
1904 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1905 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1906 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1907 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1908 a0c9776a Iustin Pop
          if test:
1909 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1910 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1911 6d7b472a Iustin Pop
            lu_result = 0
1912 d8fff41c Guido Trotter
1913 d8fff41c Guido Trotter
      return lu_result
1914 d8fff41c Guido Trotter
1915 a8083063 Iustin Pop
1916 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1917 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1918 2c95a8d4 Iustin Pop

1919 2c95a8d4 Iustin Pop
  """
1920 2c95a8d4 Iustin Pop
  _OP_REQP = []
1921 d4b9d97f Guido Trotter
  REQ_BGL = False
1922 d4b9d97f Guido Trotter
1923 d4b9d97f Guido Trotter
  def ExpandNames(self):
1924 d4b9d97f Guido Trotter
    self.needed_locks = {
1925 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1926 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1927 d4b9d97f Guido Trotter
    }
1928 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1929 2c95a8d4 Iustin Pop
1930 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1931 2c95a8d4 Iustin Pop
    """Check prerequisites.
1932 2c95a8d4 Iustin Pop

1933 2c95a8d4 Iustin Pop
    This has no prerequisites.
1934 2c95a8d4 Iustin Pop

1935 2c95a8d4 Iustin Pop
    """
1936 2c95a8d4 Iustin Pop
    pass
1937 2c95a8d4 Iustin Pop
1938 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1939 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1940 2c95a8d4 Iustin Pop

1941 29d376ec Iustin Pop
    @rtype: tuple of three items
1942 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1943 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1944 29d376ec Iustin Pop
        missing volumes
1945 29d376ec Iustin Pop

1946 2c95a8d4 Iustin Pop
    """
1947 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1948 2c95a8d4 Iustin Pop
1949 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1950 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1951 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1952 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1953 2c95a8d4 Iustin Pop
1954 2c95a8d4 Iustin Pop
    nv_dict = {}
1955 2c95a8d4 Iustin Pop
    for inst in instances:
1956 2c95a8d4 Iustin Pop
      inst_lvs = {}
1957 0d68c45d Iustin Pop
      if (not inst.admin_up or
1958 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1959 2c95a8d4 Iustin Pop
        continue
1960 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1961 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1962 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1963 2c95a8d4 Iustin Pop
        for vol in vol_list:
1964 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1965 2c95a8d4 Iustin Pop
1966 2c95a8d4 Iustin Pop
    if not nv_dict:
1967 2c95a8d4 Iustin Pop
      return result
1968 2c95a8d4 Iustin Pop
1969 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1970 2c95a8d4 Iustin Pop
1971 2c95a8d4 Iustin Pop
    for node in nodes:
1972 2c95a8d4 Iustin Pop
      # node_volume
1973 29d376ec Iustin Pop
      node_res = node_lvs[node]
1974 29d376ec Iustin Pop
      if node_res.offline:
1975 ea9ddc07 Iustin Pop
        continue
1976 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1977 29d376ec Iustin Pop
      if msg:
1978 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1979 29d376ec Iustin Pop
        res_nodes[node] = msg
1980 2c95a8d4 Iustin Pop
        continue
1981 2c95a8d4 Iustin Pop
1982 29d376ec Iustin Pop
      lvs = node_res.payload
1983 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
1984 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1985 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1986 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1987 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1988 2c95a8d4 Iustin Pop
1989 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1990 b63ed789 Iustin Pop
    # data better
1991 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1992 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1993 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1994 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1995 b63ed789 Iustin Pop
1996 2c95a8d4 Iustin Pop
    return result
1997 2c95a8d4 Iustin Pop
1998 2c95a8d4 Iustin Pop
1999 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
2000 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
2001 60975797 Iustin Pop

2002 60975797 Iustin Pop
  """
2003 60975797 Iustin Pop
  _OP_REQP = ["instances"]
2004 60975797 Iustin Pop
  REQ_BGL = False
2005 60975797 Iustin Pop
2006 60975797 Iustin Pop
  def ExpandNames(self):
2007 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
2008 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2009 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2010 60975797 Iustin Pop
2011 60975797 Iustin Pop
    if self.op.instances:
2012 60975797 Iustin Pop
      self.wanted_names = []
2013 60975797 Iustin Pop
      for name in self.op.instances:
2014 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
2015 60975797 Iustin Pop
        self.wanted_names.append(full_name)
2016 60975797 Iustin Pop
      self.needed_locks = {
2017 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
2018 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
2019 60975797 Iustin Pop
        }
2020 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2021 60975797 Iustin Pop
    else:
2022 60975797 Iustin Pop
      self.wanted_names = None
2023 60975797 Iustin Pop
      self.needed_locks = {
2024 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
2025 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2026 60975797 Iustin Pop
        }
2027 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2028 60975797 Iustin Pop
2029 60975797 Iustin Pop
  def DeclareLocks(self, level):
2030 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2031 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
2032 60975797 Iustin Pop
2033 60975797 Iustin Pop
  def CheckPrereq(self):
2034 60975797 Iustin Pop
    """Check prerequisites.
2035 60975797 Iustin Pop

2036 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
2037 60975797 Iustin Pop

2038 60975797 Iustin Pop
    """
2039 60975797 Iustin Pop
    if self.wanted_names is None:
2040 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2041 60975797 Iustin Pop
2042 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2043 60975797 Iustin Pop
                             in self.wanted_names]
2044 60975797 Iustin Pop
2045 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
2046 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
2047 b775c337 Iustin Pop

2048 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
2049 b775c337 Iustin Pop
    children have smaller disk size.
2050 b775c337 Iustin Pop

2051 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
2052 b775c337 Iustin Pop

2053 b775c337 Iustin Pop
    """
2054 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
2055 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
2056 b775c337 Iustin Pop
      fchild = disk.children[0]
2057 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
2058 b775c337 Iustin Pop
      if mismatch:
2059 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2060 b775c337 Iustin Pop
                     fchild.size, disk.size)
2061 b775c337 Iustin Pop
        fchild.size = disk.size
2062 b775c337 Iustin Pop
2063 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
2064 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
2065 b775c337 Iustin Pop
    else:
2066 b775c337 Iustin Pop
      return False
2067 b775c337 Iustin Pop
2068 60975797 Iustin Pop
  def Exec(self, feedback_fn):
2069 60975797 Iustin Pop
    """Verify the size of cluster disks.
2070 60975797 Iustin Pop

2071 60975797 Iustin Pop
    """
2072 60975797 Iustin Pop
    # TODO: check child disks too
2073 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
2074 60975797 Iustin Pop
    per_node_disks = {}
2075 60975797 Iustin Pop
    for instance in self.wanted_instances:
2076 60975797 Iustin Pop
      pnode = instance.primary_node
2077 60975797 Iustin Pop
      if pnode not in per_node_disks:
2078 60975797 Iustin Pop
        per_node_disks[pnode] = []
2079 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
2080 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
2081 60975797 Iustin Pop
2082 60975797 Iustin Pop
    changed = []
2083 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
2084 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
2085 4d9e6835 Iustin Pop
      for dsk in newl:
2086 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
2087 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
2088 3cebe102 Michael Hanselmann
      if result.fail_msg:
2089 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
2090 60975797 Iustin Pop
                        " %s, ignoring", node)
2091 60975797 Iustin Pop
        continue
2092 60975797 Iustin Pop
      if len(result.data) != len(dskl):
2093 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
2094 60975797 Iustin Pop
                        node)
2095 60975797 Iustin Pop
        continue
2096 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2097 60975797 Iustin Pop
        if size is None:
2098 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
2099 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
2100 60975797 Iustin Pop
          continue
2101 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
2102 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
2103 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
2104 60975797 Iustin Pop
          continue
2105 60975797 Iustin Pop
        size = size >> 20
2106 60975797 Iustin Pop
        if size != disk.size:
2107 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2108 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
2109 60975797 Iustin Pop
                       instance.name, disk.size, size)
2110 60975797 Iustin Pop
          disk.size = size
2111 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2112 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
2113 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
2114 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2115 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
2116 60975797 Iustin Pop
    return changed
2117 60975797 Iustin Pop
2118 60975797 Iustin Pop
2119 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
2120 07bd8a51 Iustin Pop
  """Rename the cluster.
2121 07bd8a51 Iustin Pop

2122 07bd8a51 Iustin Pop
  """
2123 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
2124 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
2125 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
2126 07bd8a51 Iustin Pop
2127 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
2128 07bd8a51 Iustin Pop
    """Build hooks env.
2129 07bd8a51 Iustin Pop

2130 07bd8a51 Iustin Pop
    """
2131 07bd8a51 Iustin Pop
    env = {
2132 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2133 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
2134 07bd8a51 Iustin Pop
      }
2135 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2136 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2137 47a72f18 Iustin Pop
    return env, [mn], all_nodes
2138 07bd8a51 Iustin Pop
2139 07bd8a51 Iustin Pop
  def CheckPrereq(self):
2140 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
2141 07bd8a51 Iustin Pop

2142 07bd8a51 Iustin Pop
    """
2143 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
2144 07bd8a51 Iustin Pop
2145 bcf043c9 Iustin Pop
    new_name = hostname.name
2146 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
2147 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
2148 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
2149 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
2150 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2151 5c983ee5 Iustin Pop
                                 " cluster has changed",
2152 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2153 07bd8a51 Iustin Pop
    if new_ip != old_ip:
2154 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2155 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2156 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
2157 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
2158 07bd8a51 Iustin Pop
2159 07bd8a51 Iustin Pop
    self.op.name = new_name
2160 07bd8a51 Iustin Pop
2161 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
2162 07bd8a51 Iustin Pop
    """Rename the cluster.
2163 07bd8a51 Iustin Pop

2164 07bd8a51 Iustin Pop
    """
2165 07bd8a51 Iustin Pop
    clustername = self.op.name
2166 07bd8a51 Iustin Pop
    ip = self.ip
2167 07bd8a51 Iustin Pop
2168 07bd8a51 Iustin Pop
    # shutdown the master IP
2169 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
2170 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
2171 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
2172 07bd8a51 Iustin Pop
2173 07bd8a51 Iustin Pop
    try:
2174 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2175 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
2176 55cf7d83 Iustin Pop
      cluster.master_ip = ip
2177 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
2178 ec85e3d5 Iustin Pop
2179 ec85e3d5 Iustin Pop
      # update the known hosts file
2180 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2181 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
2182 ec85e3d5 Iustin Pop
      try:
2183 ec85e3d5 Iustin Pop
        node_list.remove(master)
2184 ec85e3d5 Iustin Pop
      except ValueError:
2185 ec85e3d5 Iustin Pop
        pass
2186 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
2187 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
2188 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2189 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2190 6f7d4e75 Iustin Pop
        if msg:
2191 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2192 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2193 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
2194 ec85e3d5 Iustin Pop
2195 07bd8a51 Iustin Pop
    finally:
2196 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
2197 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2198 b726aff0 Iustin Pop
      if msg:
2199 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
2200 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
2201 07bd8a51 Iustin Pop
2202 07bd8a51 Iustin Pop
2203 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
2204 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
2205 8084f9f6 Manuel Franceschini

2206 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
2207 e4376078 Iustin Pop
  @param disk: the disk to check
2208 5bbd3f7f Michael Hanselmann
  @rtype: boolean
2209 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
2210 8084f9f6 Manuel Franceschini

2211 8084f9f6 Manuel Franceschini
  """
2212 8084f9f6 Manuel Franceschini
  if disk.children:
2213 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
2214 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
2215 8084f9f6 Manuel Franceschini
        return True
2216 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
2217 8084f9f6 Manuel Franceschini
2218 8084f9f6 Manuel Franceschini
2219 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
2220 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
2221 8084f9f6 Manuel Franceschini

2222 8084f9f6 Manuel Franceschini
  """
2223 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
2224 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
2225 8084f9f6 Manuel Franceschini
  _OP_REQP = []
2226 c53279cf Guido Trotter
  REQ_BGL = False
2227 c53279cf Guido Trotter
2228 3994f455 Iustin Pop
  def CheckArguments(self):
2229 4b7735f9 Iustin Pop
    """Check parameters
2230 4b7735f9 Iustin Pop

2231 4b7735f9 Iustin Pop
    """
2232 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
2233 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
2234 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2235 4b7735f9 Iustin Pop
      try:
2236 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2237 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
2238 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2239 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
2240 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
2241 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
2242 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2243 3953242f Iustin Pop
    _CheckBooleanOpField(self.op, "maintain_node_health")
2244 4b7735f9 Iustin Pop
2245 c53279cf Guido Trotter
  def ExpandNames(self):
2246 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
2247 c53279cf Guido Trotter
    # all nodes to be modified.
2248 c53279cf Guido Trotter
    self.needed_locks = {
2249 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
2250 c53279cf Guido Trotter
    }
2251 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2252 8084f9f6 Manuel Franceschini
2253 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
2254 8084f9f6 Manuel Franceschini
    """Build hooks env.
2255 8084f9f6 Manuel Franceschini

2256 8084f9f6 Manuel Franceschini
    """
2257 8084f9f6 Manuel Franceschini
    env = {
2258 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2259 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
2260 8084f9f6 Manuel Franceschini
      }
2261 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2262 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
2263 8084f9f6 Manuel Franceschini
2264 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
2265 8084f9f6 Manuel Franceschini
    """Check prerequisites.
2266 8084f9f6 Manuel Franceschini

2267 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
2268 5f83e263 Iustin Pop
    if the given volume group is valid.
2269 8084f9f6 Manuel Franceschini

2270 8084f9f6 Manuel Franceschini
    """
2271 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
2272 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
2273 8084f9f6 Manuel Franceschini
      for inst in instances:
2274 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
2275 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
2276 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2277 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
2278 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
2279 8084f9f6 Manuel Franceschini
2280 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2281 779c15bb Iustin Pop
2282 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2283 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2284 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2285 8084f9f6 Manuel Franceschini
      for node in node_list:
2286 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2287 e480923b Iustin Pop
        if msg:
2288 781de953 Iustin Pop
          # ignoring down node
2289 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2290 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2291 781de953 Iustin Pop
          continue
2292 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2293 781de953 Iustin Pop
                                              self.op.vg_name,
2294 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2295 8084f9f6 Manuel Franceschini
        if vgstatus:
2296 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2297 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2298 8084f9f6 Manuel Franceschini
2299 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2300 5af3da74 Guido Trotter
    # validate params changes
2301 779c15bb Iustin Pop
    if self.op.beparams:
2302 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2303 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2304 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2305 779c15bb Iustin Pop
2306 5af3da74 Guido Trotter
    if self.op.nicparams:
2307 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2308 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2309 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2310 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2311 90b704a1 Guido Trotter
      nic_errors = []
2312 90b704a1 Guido Trotter
2313 90b704a1 Guido Trotter
      # check all instances for consistency
2314 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2315 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2316 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2317 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2318 90b704a1 Guido Trotter
2319 90b704a1 Guido Trotter
          # check parameter syntax
2320 90b704a1 Guido Trotter
          try:
2321 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2322 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2323 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2324 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2325 90b704a1 Guido Trotter
2326 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2327 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2328 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2329 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2330 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2331 90b704a1 Guido Trotter
      if nic_errors:
2332 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2333 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2334 5af3da74 Guido Trotter
2335 779c15bb Iustin Pop
    # hypervisor list/parameters
2336 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
2337 779c15bb Iustin Pop
    if self.op.hvparams:
2338 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2339 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2340 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2341 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2342 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2343 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2344 779c15bb Iustin Pop
        else:
2345 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2346 779c15bb Iustin Pop
2347 17463d22 Renรฉ Nussbaumer
    # os hypervisor parameters
2348 17463d22 Renรฉ Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2349 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2350 17463d22 Renรฉ Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2351 17463d22 Renรฉ Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2352 17463d22 Renรฉ Nussbaumer
                                   errors.ECODE_INVAL)
2353 17463d22 Renรฉ Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2354 17463d22 Renรฉ Nussbaumer
        if not isinstance(hvs, dict):
2355 17463d22 Renรฉ Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2356 17463d22 Renรฉ Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2357 17463d22 Renรฉ Nussbaumer
        if os_name not in self.new_os_hvp:
2358 17463d22 Renรฉ Nussbaumer
          self.new_os_hvp[os_name] = hvs
2359 17463d22 Renรฉ Nussbaumer
        else:
2360 17463d22 Renรฉ Nussbaumer
          for hv_name, hv_dict in hvs.items():
2361 17463d22 Renรฉ Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2362 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2363 17463d22 Renรฉ Nussbaumer
            else:
2364 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2365 17463d22 Renรฉ Nussbaumer
2366 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2367 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2368 b119bccb Guido Trotter
      if not self.hv_list:
2369 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2370 5c983ee5 Iustin Pop
                                   " least one member",
2371 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2372 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2373 b119bccb Guido Trotter
      if invalid_hvs:
2374 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2375 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2376 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2377 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2378 779c15bb Iustin Pop
    else:
2379 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2380 779c15bb Iustin Pop
2381 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2382 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2383 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2384 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2385 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2386 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2387 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2388 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2389 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2390 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2391 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2392 779c15bb Iustin Pop
2393 cced4c39 Iustin Pop
    if self.op.os_hvp:
2394 cced4c39 Iustin Pop
      # no need to check any newly-enabled hypervisors, since the
2395 cced4c39 Iustin Pop
      # defaults have already been checked in the above code-block
2396 cced4c39 Iustin Pop
      for os_name, os_hvp in self.new_os_hvp.items():
2397 cced4c39 Iustin Pop
        for hv_name, hv_params in os_hvp.items():
2398 cced4c39 Iustin Pop
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2399 cced4c39 Iustin Pop
          # we need to fill in the new os_hvp on top of the actual hv_p
2400 cced4c39 Iustin Pop
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2401 cced4c39 Iustin Pop
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2402 cced4c39 Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2403 cced4c39 Iustin Pop
          hv_class.CheckParameterSyntax(new_osp)
2404 cced4c39 Iustin Pop
          _CheckHVParams(self, node_list, hv_name, new_osp)
2405 cced4c39 Iustin Pop
2406 cced4c39 Iustin Pop
2407 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2408 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2409 8084f9f6 Manuel Franceschini

2410 8084f9f6 Manuel Franceschini
    """
2411 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2412 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2413 b2482333 Guido Trotter
      if not new_volume:
2414 b2482333 Guido Trotter
        new_volume = None
2415 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2416 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2417 779c15bb Iustin Pop
      else:
2418 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2419 779c15bb Iustin Pop
                    " state, not changing")
2420 779c15bb Iustin Pop
    if self.op.hvparams:
2421 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2422 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2423 17463d22 Renรฉ Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2424 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2425 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2426 779c15bb Iustin Pop
    if self.op.beparams:
2427 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2428 5af3da74 Guido Trotter
    if self.op.nicparams:
2429 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2430 5af3da74 Guido Trotter
2431 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2432 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2433 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2434 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2435 4b7735f9 Iustin Pop
2436 3953242f Iustin Pop
    if self.op.maintain_node_health is not None:
2437 3953242f Iustin Pop
      self.cluster.maintain_node_health = self.op.maintain_node_health
2438 3953242f Iustin Pop
2439 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2440 8084f9f6 Manuel Franceschini
2441 8084f9f6 Manuel Franceschini
2442 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2443 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2444 28eddce5 Guido Trotter

2445 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2446 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2447 28eddce5 Guido Trotter
  makes sure those are copied.
2448 28eddce5 Guido Trotter

2449 28eddce5 Guido Trotter
  @param lu: calling logical unit
2450 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2451 28eddce5 Guido Trotter

2452 28eddce5 Guido Trotter
  """
2453 28eddce5 Guido Trotter
  # 1. Gather target nodes
2454 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2455 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2456 28eddce5 Guido Trotter
  if additional_nodes is not None:
2457 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2458 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2459 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2460 a4eae71f Michael Hanselmann
2461 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2462 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2463 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2464 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2465 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2466 6b7d5878 Michael Hanselmann
                    constants.CONFD_HMAC_KEY,
2467 28eddce5 Guido Trotter
                   ])
2468 e1b8653f Guido Trotter
2469 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2470 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2471 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2472 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2473 e1b8653f Guido Trotter
2474 28eddce5 Guido Trotter
  # 3. Perform the files upload
2475 28eddce5 Guido Trotter
  for fname in dist_files:
2476 28eddce5 Guido Trotter
    if os.path.exists(fname):
2477 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2478 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2479 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2480 6f7d4e75 Iustin Pop
        if msg:
2481 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2482 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2483 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2484 28eddce5 Guido Trotter
2485 28eddce5 Guido Trotter
2486 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2487 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2488 afee0879 Iustin Pop

2489 afee0879 Iustin Pop
  This is a very simple LU.
2490 afee0879 Iustin Pop

2491 afee0879 Iustin Pop
  """
2492 afee0879 Iustin Pop
  _OP_REQP = []
2493 afee0879 Iustin Pop
  REQ_BGL = False
2494 afee0879 Iustin Pop
2495 afee0879 Iustin Pop
  def ExpandNames(self):
2496 afee0879 Iustin Pop
    self.needed_locks = {
2497 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2498 afee0879 Iustin Pop
    }
2499 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2500 afee0879 Iustin Pop
2501 afee0879 Iustin Pop
  def CheckPrereq(self):
2502 afee0879 Iustin Pop
    """Check prerequisites.
2503 afee0879 Iustin Pop

2504 afee0879 Iustin Pop
    """
2505 afee0879 Iustin Pop
2506 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2507 afee0879 Iustin Pop
    """Redistribute the configuration.
2508 afee0879 Iustin Pop

2509 afee0879 Iustin Pop
    """
2510 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2511 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2512 afee0879 Iustin Pop
2513 afee0879 Iustin Pop
2514 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2515 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2516 a8083063 Iustin Pop

2517 a8083063 Iustin Pop
  """
2518 a8083063 Iustin Pop
  if not instance.disks:
2519 a8083063 Iustin Pop
    return True
2520 a8083063 Iustin Pop
2521 a8083063 Iustin Pop
  if not oneshot:
2522 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2523 a8083063 Iustin Pop
2524 a8083063 Iustin Pop
  node = instance.primary_node
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
  for dev in instance.disks:
2527 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2528 a8083063 Iustin Pop
2529 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2530 6bcb1446 Michael Hanselmann
2531 a8083063 Iustin Pop
  retries = 0
2532 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2533 a8083063 Iustin Pop
  while True:
2534 a8083063 Iustin Pop
    max_time = 0
2535 a8083063 Iustin Pop
    done = True
2536 a8083063 Iustin Pop
    cumul_degraded = False
2537 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2538 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2539 3efa9051 Iustin Pop
    if msg:
2540 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2541 a8083063 Iustin Pop
      retries += 1
2542 a8083063 Iustin Pop
      if retries >= 10:
2543 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2544 3ecf6786 Iustin Pop
                                 " aborting." % node)
2545 a8083063 Iustin Pop
      time.sleep(6)
2546 a8083063 Iustin Pop
      continue
2547 3efa9051 Iustin Pop
    rstats = rstats.payload
2548 a8083063 Iustin Pop
    retries = 0
2549 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2550 a8083063 Iustin Pop
      if mstat is None:
2551 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2552 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2553 a8083063 Iustin Pop
        continue
2554 36145b12 Michael Hanselmann
2555 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2556 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2557 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2558 a8083063 Iustin Pop
        done = False
2559 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2560 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2561 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2562 a8083063 Iustin Pop
        else:
2563 a8083063 Iustin Pop
          rem_time = "no time estimate"
2564 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2565 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2566 4d4a651d Michael Hanselmann
                         rem_time))
2567 fbafd7a8 Iustin Pop
2568 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2569 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2570 fbafd7a8 Iustin Pop
    # we force restart of the loop
2571 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2572 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2573 fbafd7a8 Iustin Pop
      degr_retries -= 1
2574 fbafd7a8 Iustin Pop
      time.sleep(1)
2575 fbafd7a8 Iustin Pop
      continue
2576 fbafd7a8 Iustin Pop
2577 a8083063 Iustin Pop
    if done or oneshot:
2578 a8083063 Iustin Pop
      break
2579 a8083063 Iustin Pop
2580 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
  if done:
2583 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2584 a8083063 Iustin Pop
  return not cumul_degraded
2585 a8083063 Iustin Pop
2586 a8083063 Iustin Pop
2587 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2588 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2589 a8083063 Iustin Pop

2590 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2591 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2592 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2593 0834c866 Iustin Pop

2594 a8083063 Iustin Pop
  """
2595 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2596 a8083063 Iustin Pop
2597 a8083063 Iustin Pop
  result = True
2598 96acbc09 Michael Hanselmann
2599 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2600 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2601 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2602 23829f6f Iustin Pop
    if msg:
2603 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2604 23829f6f Iustin Pop
      result = False
2605 23829f6f Iustin Pop
    elif not rstats.payload:
2606 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2607 a8083063 Iustin Pop
      result = False
2608 a8083063 Iustin Pop
    else:
2609 96acbc09 Michael Hanselmann
      if ldisk:
2610 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2611 96acbc09 Michael Hanselmann
      else:
2612 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2613 96acbc09 Michael Hanselmann
2614 a8083063 Iustin Pop
  if dev.children:
2615 a8083063 Iustin Pop
    for child in dev.children:
2616 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2617 a8083063 Iustin Pop
2618 a8083063 Iustin Pop
  return result
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
2621 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2622 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2623 a8083063 Iustin Pop

2624 a8083063 Iustin Pop
  """
2625 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2626 6bf01bbb Guido Trotter
  REQ_BGL = False
2627 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2628 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2629 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2630 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2631 a8083063 Iustin Pop
2632 6bf01bbb Guido Trotter
  def ExpandNames(self):
2633 1f9430d6 Iustin Pop
    if self.op.names:
2634 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2635 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2636 1f9430d6 Iustin Pop
2637 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2638 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2639 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2640 1f9430d6 Iustin Pop
2641 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2642 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2643 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2644 6bf01bbb Guido Trotter
    self.needed_locks = {}
2645 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2646 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2647 6bf01bbb Guido Trotter
2648 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2649 6bf01bbb Guido Trotter
    """Check prerequisites.
2650 6bf01bbb Guido Trotter

2651 6bf01bbb Guido Trotter
    """
2652 6bf01bbb Guido Trotter
2653 1f9430d6 Iustin Pop
  @staticmethod
2654 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2655 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2656 1f9430d6 Iustin Pop

2657 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2658 1f9430d6 Iustin Pop

2659 e4376078 Iustin Pop
    @rtype: dict
2660 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2661 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2662 e4376078 Iustin Pop

2663 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2664 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2665 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2666 e4376078 Iustin Pop
          }
2667 1f9430d6 Iustin Pop

2668 1f9430d6 Iustin Pop
    """
2669 1f9430d6 Iustin Pop
    all_os = {}
2670 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2671 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2672 a6ab004b Iustin Pop
    # make all OSes invalid
2673 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2674 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2675 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2676 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2677 1f9430d6 Iustin Pop
        continue
2678 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2679 255dcebd Iustin Pop
        if name not in all_os:
2680 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2681 1f9430d6 Iustin Pop
          # for each node in node_list
2682 255dcebd Iustin Pop
          all_os[name] = {}
2683 a6ab004b Iustin Pop
          for nname in good_nodes:
2684 255dcebd Iustin Pop
            all_os[name][nname] = []
2685 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2686 1f9430d6 Iustin Pop
    return all_os
2687 a8083063 Iustin Pop
2688 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2689 a8083063 Iustin Pop
    """Compute the list of OSes.
2690 a8083063 Iustin Pop

2691 a8083063 Iustin Pop
    """
2692 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2693 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2694 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2695 1f9430d6 Iustin Pop
    output = []
2696 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2697 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2698 1e288a26 Guido Trotter
2699 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2700 1f9430d6 Iustin Pop
      row = []
2701 1e288a26 Guido Trotter
      if calc_valid:
2702 1e288a26 Guido Trotter
        valid = True
2703 1e288a26 Guido Trotter
        variants = None
2704 1e288a26 Guido Trotter
        for osl in os_data.values():
2705 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2706 1e288a26 Guido Trotter
          if not valid:
2707 1e288a26 Guido Trotter
            variants = None
2708 1e288a26 Guido Trotter
            break
2709 1e288a26 Guido Trotter
          if calc_variants:
2710 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2711 1e288a26 Guido Trotter
            if variants is None:
2712 1e288a26 Guido Trotter
              variants = node_variants
2713 1e288a26 Guido Trotter
            else:
2714 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2715 1e288a26 Guido Trotter
2716 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2717 1f9430d6 Iustin Pop
        if field == "name":
2718 1f9430d6 Iustin Pop
          val = os_name
2719 1f9430d6 Iustin Pop
        elif field == "valid":
2720 1e288a26 Guido Trotter
          val = valid
2721 1f9430d6 Iustin Pop
        elif field == "node_status":
2722 255dcebd Iustin Pop
          # this is just a copy of the dict
2723 1f9430d6 Iustin Pop
          val = {}
2724 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2725 255dcebd Iustin Pop
            val[node_name] = nos_list
2726 1e288a26 Guido Trotter
        elif field == "variants":
2727 1e288a26 Guido Trotter
          val =  variants
2728 1f9430d6 Iustin Pop
        else:
2729 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2730 1f9430d6 Iustin Pop
        row.append(val)
2731 1f9430d6 Iustin Pop
      output.append(row)
2732 1f9430d6 Iustin Pop
2733 1f9430d6 Iustin Pop
    return output
2734 a8083063 Iustin Pop
2735 a8083063 Iustin Pop
2736 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2737 a8083063 Iustin Pop
  """Logical unit for removing a node.
2738 a8083063 Iustin Pop

2739 a8083063 Iustin Pop
  """
2740 a8083063 Iustin Pop
  HPATH = "node-remove"
2741 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2742 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2745 a8083063 Iustin Pop
    """Build hooks env.
2746 a8083063 Iustin Pop

2747 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2748 d08869ee Guido Trotter
    node would then be impossible to remove.
2749 a8083063 Iustin Pop

2750 a8083063 Iustin Pop
    """
2751 396e1b78 Michael Hanselmann
    env = {
2752 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2753 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2754 396e1b78 Michael Hanselmann
      }
2755 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2756 9bb31ea8 Iustin Pop
    try:
2757 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2758 9bb31ea8 Iustin Pop
    except ValueError:
2759 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2760 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2761 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2762 a8083063 Iustin Pop
2763 a8083063 Iustin Pop
  def CheckPrereq(self):
2764 a8083063 Iustin Pop
    """Check prerequisites.
2765 a8083063 Iustin Pop

2766 a8083063 Iustin Pop
    This checks:
2767 a8083063 Iustin Pop
     - the node exists in the configuration
2768 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2769 a8083063 Iustin Pop
     - it's not the master
2770 a8083063 Iustin Pop

2771 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2772 a8083063 Iustin Pop

2773 a8083063 Iustin Pop
    """
2774 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2775 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2776 cf26a87a Iustin Pop
    assert node is not None
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2779 a8083063 Iustin Pop
2780 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2781 a8083063 Iustin Pop
    if node.name == masternode:
2782 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2783 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2784 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2785 a8083063 Iustin Pop
2786 a8083063 Iustin Pop
    for instance_name in instance_list:
2787 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2788 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2789 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2790 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2791 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2792 a8083063 Iustin Pop
    self.op.node_name = node.name
2793 a8083063 Iustin Pop
    self.node = node
2794 a8083063 Iustin Pop
2795 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2796 a8083063 Iustin Pop
    """Removes the node from the cluster.
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
    """
2799 a8083063 Iustin Pop
    node = self.node
2800 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2801 9a4f63d1 Iustin Pop
                 node.name)
2802 a8083063 Iustin Pop
2803 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2804 b989b9d9 Ken Wehr
2805 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2806 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2807 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2808 a8083063 Iustin Pop
2809 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2810 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2811 cd46f3b4 Luca Bigliardi
    try:
2812 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2813 3cb5c1e3 Luca Bigliardi
    except:
2814 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2815 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2816 cd46f3b4 Luca Bigliardi
2817 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2818 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2819 0623d351 Iustin Pop
    if msg:
2820 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2821 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2822 c8a0948f Michael Hanselmann
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2825 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2826 a8083063 Iustin Pop

2827 a8083063 Iustin Pop
  """
2828 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2829 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2830 35705d8f Guido Trotter
  REQ_BGL = False
2831 19bed813 Iustin Pop
2832 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2833 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2834 19bed813 Iustin Pop
2835 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2836 31bf511f Iustin Pop
    "dtotal", "dfree",
2837 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2838 31bf511f Iustin Pop
    "bootid",
2839 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2840 31bf511f Iustin Pop
    )
2841 31bf511f Iustin Pop
2842 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2843 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2844 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2845 31bf511f Iustin Pop
    "pip", "sip", "tags",
2846 0e67cdbe Iustin Pop
    "master",
2847 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2848 31bf511f Iustin Pop
    )
2849 a8083063 Iustin Pop
2850 35705d8f Guido Trotter
  def ExpandNames(self):
2851 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2852 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2853 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2854 a8083063 Iustin Pop
2855 35705d8f Guido Trotter
    self.needed_locks = {}
2856 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2857 c8d8b4c8 Iustin Pop
2858 c8d8b4c8 Iustin Pop
    if self.op.names:
2859 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2860 35705d8f Guido Trotter
    else:
2861 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2862 c8d8b4c8 Iustin Pop
2863 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2864 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2865 c8d8b4c8 Iustin Pop
    if self.do_locking:
2866 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2867 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2868 c8d8b4c8 Iustin Pop
2869 35705d8f Guido Trotter
  def CheckPrereq(self):
2870 35705d8f Guido Trotter
    """Check prerequisites.
2871 35705d8f Guido Trotter

2872 35705d8f Guido Trotter
    """
2873 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2874 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2875 c8d8b4c8 Iustin Pop
    pass
2876 a8083063 Iustin Pop
2877 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2878 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2879 a8083063 Iustin Pop

2880 a8083063 Iustin Pop
    """
2881 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2882 c8d8b4c8 Iustin Pop
    if self.do_locking:
2883 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2884 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2885 3fa93523 Guido Trotter
      nodenames = self.wanted
2886 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2887 3fa93523 Guido Trotter
      if missing:
2888 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2889 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2890 c8d8b4c8 Iustin Pop
    else:
2891 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2892 c1f1cbb2 Iustin Pop
2893 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2894 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
    # begin data gathering
2897 a8083063 Iustin Pop
2898 bc8e4a1a Iustin Pop
    if self.do_node_query:
2899 a8083063 Iustin Pop
      live_data = {}
2900 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2901 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2902 a8083063 Iustin Pop
      for name in nodenames:
2903 781de953 Iustin Pop
        nodeinfo = node_data[name]
2904 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2905 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2906 d599d686 Iustin Pop
          fn = utils.TryConvert
2907 a8083063 Iustin Pop
          live_data[name] = {
2908 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2909 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2910 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2911 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2912 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2913 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2914 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2915 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2916 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2917 a8083063 Iustin Pop
            }
2918 a8083063 Iustin Pop
        else:
2919 a8083063 Iustin Pop
          live_data[name] = {}
2920 a8083063 Iustin Pop
    else:
2921 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2922 a8083063 Iustin Pop
2923 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2924 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2925 a8083063 Iustin Pop
2926 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2927 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2928 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2929 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2930 a8083063 Iustin Pop
2931 1122eb25 Iustin Pop
      for inst in inst_data.values():
2932 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2933 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2934 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2935 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2936 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2937 a8083063 Iustin Pop
2938 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2939 0e67cdbe Iustin Pop
2940 a8083063 Iustin Pop
    # end data gathering
2941 a8083063 Iustin Pop
2942 a8083063 Iustin Pop
    output = []
2943 a8083063 Iustin Pop
    for node in nodelist:
2944 a8083063 Iustin Pop
      node_output = []
2945 a8083063 Iustin Pop
      for field in self.op.output_fields:
2946 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2947 19bed813 Iustin Pop
          val = getattr(node, field)
2948 ec223efb Iustin Pop
        elif field == "pinst_list":
2949 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2950 ec223efb Iustin Pop
        elif field == "sinst_list":
2951 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2952 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2953 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2954 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2955 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2956 a8083063 Iustin Pop
        elif field == "pip":
2957 a8083063 Iustin Pop
          val = node.primary_ip
2958 a8083063 Iustin Pop
        elif field == "sip":
2959 a8083063 Iustin Pop
          val = node.secondary_ip
2960 130a6a6f Iustin Pop
        elif field == "tags":
2961 130a6a6f Iustin Pop
          val = list(node.GetTags())
2962 0e67cdbe Iustin Pop
        elif field == "master":
2963 0e67cdbe Iustin Pop
          val = node.name == master_node
2964 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2965 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2966 c120ff34 Iustin Pop
        elif field == "role":
2967 c120ff34 Iustin Pop
          if node.name == master_node:
2968 c120ff34 Iustin Pop
            val = "M"
2969 c120ff34 Iustin Pop
          elif node.master_candidate:
2970 c120ff34 Iustin Pop
            val = "C"
2971 c120ff34 Iustin Pop
          elif node.drained:
2972 c120ff34 Iustin Pop
            val = "D"
2973 c120ff34 Iustin Pop
          elif node.offline:
2974 c120ff34 Iustin Pop
            val = "O"
2975 c120ff34 Iustin Pop
          else:
2976 c120ff34 Iustin Pop
            val = "R"
2977 a8083063 Iustin Pop
        else:
2978 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2979 a8083063 Iustin Pop
        node_output.append(val)
2980 a8083063 Iustin Pop
      output.append(node_output)
2981 a8083063 Iustin Pop
2982 a8083063 Iustin Pop
    return output
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
2985 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2986 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2987 dcb93971 Michael Hanselmann

2988 dcb93971 Michael Hanselmann
  """
2989 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2990 21a15682 Guido Trotter
  REQ_BGL = False
2991 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2992 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2993 21a15682 Guido Trotter
2994 21a15682 Guido Trotter
  def ExpandNames(self):
2995 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2996 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2997 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2998 21a15682 Guido Trotter
2999 21a15682 Guido Trotter
    self.needed_locks = {}
3000 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3001 21a15682 Guido Trotter
    if not self.op.nodes:
3002 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3003 21a15682 Guido Trotter
    else:
3004 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
3005 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
3006 dcb93971 Michael Hanselmann
3007 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
3008 dcb93971 Michael Hanselmann
    """Check prerequisites.
3009 dcb93971 Michael Hanselmann

3010 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
3011 dcb93971 Michael Hanselmann

3012 dcb93971 Michael Hanselmann
    """
3013 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3014 dcb93971 Michael Hanselmann
3015 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
3016 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3017 dcb93971 Michael Hanselmann

3018 dcb93971 Michael Hanselmann
    """
3019 a7ba5e53 Iustin Pop
    nodenames = self.nodes
3020 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
3021 dcb93971 Michael Hanselmann
3022 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3023 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
3024 dcb93971 Michael Hanselmann
3025 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3026 dcb93971 Michael Hanselmann
3027 dcb93971 Michael Hanselmann
    output = []
3028 dcb93971 Michael Hanselmann
    for node in nodenames:
3029 10bfe6cb Iustin Pop
      nresult = volumes[node]
3030 10bfe6cb Iustin Pop
      if nresult.offline:
3031 10bfe6cb Iustin Pop
        continue
3032 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
3033 10bfe6cb Iustin Pop
      if msg:
3034 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3035 37d19eb2 Michael Hanselmann
        continue
3036 37d19eb2 Michael Hanselmann
3037 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
3038 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
3039 dcb93971 Michael Hanselmann
3040 dcb93971 Michael Hanselmann
      for vol in node_vols:
3041 dcb93971 Michael Hanselmann
        node_output = []
3042 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
3043 dcb93971 Michael Hanselmann
          if field == "node":
3044 dcb93971 Michael Hanselmann
            val = node
3045 dcb93971 Michael Hanselmann
          elif field == "phys":
3046 dcb93971 Michael Hanselmann
            val = vol['dev']
3047 dcb93971 Michael Hanselmann
          elif field == "vg":
3048 dcb93971 Michael Hanselmann
            val = vol['vg']
3049 dcb93971 Michael Hanselmann
          elif field == "name":
3050 dcb93971 Michael Hanselmann
            val = vol['name']
3051 dcb93971 Michael Hanselmann
          elif field == "size":
3052 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
3053 dcb93971 Michael Hanselmann
          elif field == "instance":
3054 dcb93971 Michael Hanselmann
            for inst in ilist:
3055 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
3056 dcb93971 Michael Hanselmann
                continue
3057 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
3058 dcb93971 Michael Hanselmann
                val = inst.name
3059 dcb93971 Michael Hanselmann
                break
3060 dcb93971 Michael Hanselmann
            else:
3061 dcb93971 Michael Hanselmann
              val = '-'
3062 dcb93971 Michael Hanselmann
          else:
3063 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
3064 dcb93971 Michael Hanselmann
          node_output.append(str(val))
3065 dcb93971 Michael Hanselmann
3066 dcb93971 Michael Hanselmann
        output.append(node_output)
3067 dcb93971 Michael Hanselmann
3068 dcb93971 Michael Hanselmann
    return output
3069 dcb93971 Michael Hanselmann
3070 dcb93971 Michael Hanselmann
3071 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
3072 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
3073 9e5442ce Michael Hanselmann

3074 9e5442ce Michael Hanselmann
  """
3075 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3076 9e5442ce Michael Hanselmann
  REQ_BGL = False
3077 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3078 9e5442ce Michael Hanselmann
3079 9e5442ce Michael Hanselmann
  def ExpandNames(self):
3080 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
3081 9e5442ce Michael Hanselmann
3082 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
3083 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
3084 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3085 9e5442ce Michael Hanselmann
3086 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
3087 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3088 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
3089 9e5442ce Michael Hanselmann
3090 9e5442ce Michael Hanselmann
    self.needed_locks = {}
3091 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
3092 9e5442ce Michael Hanselmann
3093 9e5442ce Michael Hanselmann
    if self.op.nodes:
3094 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
3095 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
3096 9e5442ce Michael Hanselmann
    else:
3097 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3098 9e5442ce Michael Hanselmann
3099 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
3100 9e5442ce Michael Hanselmann
    """Check prerequisites.
3101 9e5442ce Michael Hanselmann

3102 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
3103 9e5442ce Michael Hanselmann

3104 9e5442ce Michael Hanselmann
    """
3105 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
3106 9e5442ce Michael Hanselmann
3107 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3108 9e5442ce Michael Hanselmann
3109 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
3110 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
3111 9e5442ce Michael Hanselmann

3112 9e5442ce Michael Hanselmann
    """
3113 9e5442ce Michael Hanselmann
    # Always get name to sort by
3114 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
3115 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
3116 9e5442ce Michael Hanselmann
    else:
3117 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
3118 9e5442ce Michael Hanselmann
3119 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
3120 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3121 620a85fd Iustin Pop
      while extra in fields:
3122 620a85fd Iustin Pop
        fields.remove(extra)
3123 9e5442ce Michael Hanselmann
3124 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3125 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
3126 9e5442ce Michael Hanselmann
3127 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3128 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
3129 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
3130 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
3131 9e5442ce Michael Hanselmann
3132 9e5442ce Michael Hanselmann
    result = []
3133 9e5442ce Michael Hanselmann
3134 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
3135 9e5442ce Michael Hanselmann
      nresult = data[node]
3136 9e5442ce Michael Hanselmann
      if nresult.offline:
3137 9e5442ce Michael Hanselmann
        continue
3138 9e5442ce Michael Hanselmann
3139 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
3140 9e5442ce Michael Hanselmann
      if msg:
3141 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3142 9e5442ce Michael Hanselmann
        continue
3143 9e5442ce Michael Hanselmann
3144 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3145 9e5442ce Michael Hanselmann
3146 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
3147 9e5442ce Michael Hanselmann
        row = rows[name]
3148 9e5442ce Michael Hanselmann
3149 9e5442ce Michael Hanselmann
        out = []
3150 9e5442ce Michael Hanselmann
3151 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
3152 620a85fd Iustin Pop
          if field == constants.SF_NODE:
3153 9e5442ce Michael Hanselmann
            val = node
3154 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
3155 620a85fd Iustin Pop
            val = self.op.storage_type
3156 9e5442ce Michael Hanselmann
          elif field in field_idx:
3157 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
3158 9e5442ce Michael Hanselmann
          else:
3159 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
3160 9e5442ce Michael Hanselmann
3161 9e5442ce Michael Hanselmann
          out.append(val)
3162 9e5442ce Michael Hanselmann
3163 9e5442ce Michael Hanselmann
        result.append(out)
3164 9e5442ce Michael Hanselmann
3165 9e5442ce Michael Hanselmann
    return result
3166 9e5442ce Michael Hanselmann
3167 9e5442ce Michael Hanselmann
3168 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
3169 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
3170 efb8da02 Michael Hanselmann

3171 efb8da02 Michael Hanselmann
  """
3172 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3173 efb8da02 Michael Hanselmann
  REQ_BGL = False
3174 efb8da02 Michael Hanselmann
3175 efb8da02 Michael Hanselmann
  def CheckArguments(self):
3176 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3177 efb8da02 Michael Hanselmann
3178 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3179 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
3180 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
3181 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3182 efb8da02 Michael Hanselmann
3183 efb8da02 Michael Hanselmann
  def ExpandNames(self):
3184 efb8da02 Michael Hanselmann
    self.needed_locks = {
3185 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
3186 efb8da02 Michael Hanselmann
      }
3187 efb8da02 Michael Hanselmann
3188 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
3189 efb8da02 Michael Hanselmann
    """Check prerequisites.
3190 efb8da02 Michael Hanselmann

3191 efb8da02 Michael Hanselmann
    """
3192 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3193 efb8da02 Michael Hanselmann
3194 efb8da02 Michael Hanselmann
    try:
3195 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3196 efb8da02 Michael Hanselmann
    except KeyError:
3197 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3198 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
3199 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3200 efb8da02 Michael Hanselmann
3201 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
3202 efb8da02 Michael Hanselmann
    if diff:
3203 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
3204 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
3205 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
3206 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3207 efb8da02 Michael Hanselmann
3208 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
3209 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3210 efb8da02 Michael Hanselmann

3211 efb8da02 Michael Hanselmann
    """
3212 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3213 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
3214 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
3215 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
3216 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
3217 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
3218 efb8da02 Michael Hanselmann
3219 efb8da02 Michael Hanselmann
3220 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
3221 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
3222 a8083063 Iustin Pop

3223 a8083063 Iustin Pop
  """
3224 a8083063 Iustin Pop
  HPATH = "node-add"
3225 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3226 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
3227 a8083063 Iustin Pop
3228 44caf5a8 Iustin Pop
  def CheckArguments(self):
3229 44caf5a8 Iustin Pop
    # validate/normalize the node name
3230 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3231 44caf5a8 Iustin Pop
3232 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3233 a8083063 Iustin Pop
    """Build hooks env.
3234 a8083063 Iustin Pop

3235 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
3236 a8083063 Iustin Pop

3237 a8083063 Iustin Pop
    """
3238 a8083063 Iustin Pop
    env = {
3239 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
3240 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
3241 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
3242 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
3243 a8083063 Iustin Pop
      }
3244 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
3245 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
3246 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
  def CheckPrereq(self):
3249 a8083063 Iustin Pop
    """Check prerequisites.
3250 a8083063 Iustin Pop

3251 a8083063 Iustin Pop
    This checks:
3252 a8083063 Iustin Pop
     - the new node is not already in the config
3253 a8083063 Iustin Pop
     - it is resolvable
3254 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
3255 a8083063 Iustin Pop

3256 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
3257 a8083063 Iustin Pop

3258 a8083063 Iustin Pop
    """
3259 a8083063 Iustin Pop
    node_name = self.op.node_name
3260 a8083063 Iustin Pop
    cfg = self.cfg
3261 a8083063 Iustin Pop
3262 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
3263 a8083063 Iustin Pop
3264 bcf043c9 Iustin Pop
    node = dns_data.name
3265 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
3266 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
3267 a8083063 Iustin Pop
    if secondary_ip is None:
3268 a8083063 Iustin Pop
      secondary_ip = primary_ip
3269 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
3270 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
3271 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3272 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
3273 e7c6e02b Michael Hanselmann
3274 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
3275 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
3276 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3277 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
3278 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
3279 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3280 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3281 a8083063 Iustin Pop
3282 a8083063 Iustin Pop
    for existing_node_name in node_list:
3283 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
3284 e7c6e02b Michael Hanselmann
3285 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
3286 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
3287 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
3288 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3289 5c983ee5 Iustin Pop
                                     " address configuration as before",
3290 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3291 e7c6e02b Michael Hanselmann
        continue
3292 e7c6e02b Michael Hanselmann
3293 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
3294 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
3295 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
3296 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3297 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3298 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3299 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3302 a8083063 Iustin Pop
    # same as for the master
3303 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3304 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3305 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3306 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3307 a8083063 Iustin Pop
      if master_singlehomed:
3308 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3309 5c983ee5 Iustin Pop
                                   " new node has one",
3310 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3311 a8083063 Iustin Pop
      else:
3312 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3313 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3314 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3315 a8083063 Iustin Pop
3316 5bbd3f7f Michael Hanselmann
    # checks reachability
3317 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3318 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3319 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
    if not newbie_singlehomed:
3322 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3323 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3324 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3325 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3326 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3327 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3328 a8083063 Iustin Pop
3329 a8ae3eb5 Iustin Pop
    if self.op.readd:
3330 a8ae3eb5 Iustin Pop
      exceptions = [node]
3331 a8ae3eb5 Iustin Pop
    else:
3332 a8ae3eb5 Iustin Pop
      exceptions = []
3333 6d7e1f20 Guido Trotter
3334 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3335 0fff97e9 Guido Trotter
3336 a8ae3eb5 Iustin Pop
    if self.op.readd:
3337 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3338 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3339 a8ae3eb5 Iustin Pop
    else:
3340 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3341 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3342 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3343 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3344 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3345 a8083063 Iustin Pop
3346 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3347 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3348 a8083063 Iustin Pop

3349 a8083063 Iustin Pop
    """
3350 a8083063 Iustin Pop
    new_node = self.new_node
3351 a8083063 Iustin Pop
    node = new_node.name
3352 a8083063 Iustin Pop
3353 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3354 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3355 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3356 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3357 a8ae3eb5 Iustin Pop
    if self.op.readd:
3358 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3359 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3360 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3361 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3362 a8ae3eb5 Iustin Pop
3363 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3364 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3365 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3366 a8ae3eb5 Iustin Pop
3367 a8083063 Iustin Pop
    # check connectivity
3368 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3369 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3370 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3371 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3372 90b54c26 Iustin Pop
                   node, result.payload)
3373 a8083063 Iustin Pop
    else:
3374 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3375 90b54c26 Iustin Pop
                               " node version %s" %
3376 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3377 a8083063 Iustin Pop
3378 a8083063 Iustin Pop
    # setup ssh on node
3379 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3380 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3381 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3382 b989b9d9 Ken Wehr
      keyarray = []
3383 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3384 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3385 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3386 b989b9d9 Ken Wehr
3387 b989b9d9 Ken Wehr
      for i in keyfiles:
3388 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3389 b989b9d9 Ken Wehr
3390 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3391 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3392 b989b9d9 Ken Wehr
                                      keyarray[5])
3393 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3394 a8083063 Iustin Pop
3395 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3396 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3397 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3398 c8a0948f Michael Hanselmann
3399 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3400 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3401 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3402 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3403 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3404 c2fc8250 Iustin Pop
      if not result.payload:
3405 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3406 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3407 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3408 a8083063 Iustin Pop
3409 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3410 5c0527ed Guido Trotter
    node_verify_param = {
3411 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3412 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3413 5c0527ed Guido Trotter
    }
3414 5c0527ed Guido Trotter
3415 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3416 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3417 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3418 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3419 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3420 6f68a739 Iustin Pop
      if nl_payload:
3421 6f68a739 Iustin Pop
        for failed in nl_payload:
3422 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3423 31821208 Iustin Pop
                      " (checking from %s): %s" %
3424 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3425 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3426 ff98055b Iustin Pop
3427 d8470559 Michael Hanselmann
    if self.op.readd:
3428 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3429 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3430 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3431 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3432 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3433 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3434 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3435 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3436 a8ae3eb5 Iustin Pop
        if msg:
3437 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3438 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3439 d8470559 Michael Hanselmann
    else:
3440 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3441 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3442 a8083063 Iustin Pop
3443 a8083063 Iustin Pop
3444 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3445 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3446 b31c8676 Iustin Pop

3447 b31c8676 Iustin Pop
  """
3448 b31c8676 Iustin Pop
  HPATH = "node-modify"
3449 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3450 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3451 b31c8676 Iustin Pop
  REQ_BGL = False
3452 b31c8676 Iustin Pop
3453 b31c8676 Iustin Pop
  def CheckArguments(self):
3454 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3455 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3456 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3457 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3458 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3459 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3460 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3461 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3462 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3463 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3464 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3465 5c983ee5 Iustin Pop
                                 " state at the same time",
3466 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3467 b31c8676 Iustin Pop
3468 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3469 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3470 601908d0 Iustin Pop
                             self.op.drained == True)
3471 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3472 601908d0 Iustin Pop
                               self.op.drained == False)
3473 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3474 601908d0 Iustin Pop
                         self.offline_or_drain)
3475 601908d0 Iustin Pop
3476 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3477 601908d0 Iustin Pop
3478 601908d0 Iustin Pop
3479 b31c8676 Iustin Pop
  def ExpandNames(self):
3480 601908d0 Iustin Pop
    if self.lock_all:
3481 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3482 601908d0 Iustin Pop
    else:
3483 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3484 b31c8676 Iustin Pop
3485 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3486 b31c8676 Iustin Pop
    """Build hooks env.
3487 b31c8676 Iustin Pop

3488 b31c8676 Iustin Pop
    This runs on the master node.
3489 b31c8676 Iustin Pop

3490 b31c8676 Iustin Pop
    """
3491 b31c8676 Iustin Pop
    env = {
3492 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3493 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3494 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3495 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3496 b31c8676 Iustin Pop
      }
3497 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3498 b31c8676 Iustin Pop
          self.op.node_name]
3499 b31c8676 Iustin Pop
    return env, nl, nl
3500 b31c8676 Iustin Pop
3501 b31c8676 Iustin Pop
  def CheckPrereq(self):
3502 b31c8676 Iustin Pop
    """Check prerequisites.
3503 b31c8676 Iustin Pop

3504 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3505 b31c8676 Iustin Pop

3506 b31c8676 Iustin Pop
    """
3507 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3508 b31c8676 Iustin Pop
3509 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3510 97c61d46 Iustin Pop
        self.op.drained is not None or
3511 97c61d46 Iustin Pop
        self.op.offline is not None):
3512 97c61d46 Iustin Pop
      # we can't change the master's node flags
3513 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3514 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3515 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3516 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3517 97c61d46 Iustin Pop
3518 601908d0 Iustin Pop
3519 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3520 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3521 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3522 601908d0 Iustin Pop
      # candidates
3523 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3524 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3525 8fe9239e Iustin Pop
      if mc_remaining < mc_should:
3526 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3527 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3528 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3529 3e83dd48 Iustin Pop
3530 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3531 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3532 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3533 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3534 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3535 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3536 3a5ba66a Iustin Pop
3537 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3538 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3539 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3540 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3541 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3542 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3543 3d9eb52b Guido Trotter
3544 b31c8676 Iustin Pop
    return
3545 b31c8676 Iustin Pop
3546 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3547 b31c8676 Iustin Pop
    """Modifies a node.
3548 b31c8676 Iustin Pop

3549 b31c8676 Iustin Pop
    """
3550 3a5ba66a Iustin Pop
    node = self.node
3551 b31c8676 Iustin Pop
3552 b31c8676 Iustin Pop
    result = []
3553 c9d443ea Iustin Pop
    changed_mc = False
3554 b31c8676 Iustin Pop
3555 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3556 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3557 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3558 c9d443ea Iustin Pop
      if self.op.offline == True:
3559 c9d443ea Iustin Pop
        if node.master_candidate:
3560 c9d443ea Iustin Pop
          node.master_candidate = False
3561 c9d443ea Iustin Pop
          changed_mc = True
3562 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3563 c9d443ea Iustin Pop
        if node.drained:
3564 c9d443ea Iustin Pop
          node.drained = False
3565 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3566 3a5ba66a Iustin Pop
3567 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3568 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3569 c9d443ea Iustin Pop
      changed_mc = True
3570 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3571 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3572 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3573 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3574 0959c824 Iustin Pop
        if msg:
3575 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3576 b31c8676 Iustin Pop
3577 c9d443ea Iustin Pop
    if self.op.drained is not None:
3578 c9d443ea Iustin Pop
      node.drained = self.op.drained
3579 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3580 c9d443ea Iustin Pop
      if self.op.drained == True:
3581 c9d443ea Iustin Pop
        if node.master_candidate:
3582 c9d443ea Iustin Pop
          node.master_candidate = False
3583 c9d443ea Iustin Pop
          changed_mc = True
3584 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3585 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3586 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3587 dec0d9da Iustin Pop
          if msg:
3588 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3589 c9d443ea Iustin Pop
        if node.offline:
3590 c9d443ea Iustin Pop
          node.offline = False
3591 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3592 c9d443ea Iustin Pop
3593 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3594 601908d0 Iustin Pop
    if self.lock_all:
3595 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3596 601908d0 Iustin Pop
3597 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3598 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3599 601908d0 Iustin Pop
3600 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3601 c9d443ea Iustin Pop
    if changed_mc:
3602 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3603 b31c8676 Iustin Pop
3604 b31c8676 Iustin Pop
    return result
3605 b31c8676 Iustin Pop
3606 b31c8676 Iustin Pop
3607 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3608 f5118ade Iustin Pop
  """Powercycles a node.
3609 f5118ade Iustin Pop

3610 f5118ade Iustin Pop
  """
3611 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3612 f5118ade Iustin Pop
  REQ_BGL = False
3613 f5118ade Iustin Pop
3614 f5118ade Iustin Pop
  def CheckArguments(self):
3615 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3616 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3617 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3618 5c983ee5 Iustin Pop
                                 " parameter was not set",
3619 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3620 f5118ade Iustin Pop
3621 f5118ade Iustin Pop
  def ExpandNames(self):
3622 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3623 f5118ade Iustin Pop

3624 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3625 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3626 f5118ade Iustin Pop

3627 f5118ade Iustin Pop
    """
3628 f5118ade Iustin Pop
    self.needed_locks = {}
3629 f5118ade Iustin Pop
3630 f5118ade Iustin Pop
  def CheckPrereq(self):
3631 f5118ade Iustin Pop
    """Check prerequisites.
3632 f5118ade Iustin Pop

3633 f5118ade Iustin Pop
    This LU has no prereqs.
3634 f5118ade Iustin Pop

3635 f5118ade Iustin Pop
    """
3636 f5118ade Iustin Pop
    pass
3637 f5118ade Iustin Pop
3638 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3639 f5118ade Iustin Pop
    """Reboots a node.
3640 f5118ade Iustin Pop

3641 f5118ade Iustin Pop
    """
3642 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3643 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3644 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3645 f5118ade Iustin Pop
    return result.payload
3646 f5118ade Iustin Pop
3647 f5118ade Iustin Pop
3648 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3649 a8083063 Iustin Pop
  """Query cluster configuration.
3650 a8083063 Iustin Pop

3651 a8083063 Iustin Pop
  """
3652 a8083063 Iustin Pop
  _OP_REQP = []
3653 642339cf Guido Trotter
  REQ_BGL = False
3654 642339cf Guido Trotter
3655 642339cf Guido Trotter
  def ExpandNames(self):
3656 642339cf Guido Trotter
    self.needed_locks = {}
3657 a8083063 Iustin Pop
3658 a8083063 Iustin Pop
  def CheckPrereq(self):
3659 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3660 a8083063 Iustin Pop

3661 a8083063 Iustin Pop
    """
3662 a8083063 Iustin Pop
    pass
3663 a8083063 Iustin Pop
3664 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3665 a8083063 Iustin Pop
    """Return cluster config.
3666 a8083063 Iustin Pop

3667 a8083063 Iustin Pop
    """
3668 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3669 17463d22 Renรฉ Nussbaumer
    os_hvp = {}
3670 17463d22 Renรฉ Nussbaumer
3671 17463d22 Renรฉ Nussbaumer
    # Filter just for enabled hypervisors
3672 17463d22 Renรฉ Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3673 17463d22 Renรฉ Nussbaumer
      os_hvp[os_name] = {}
3674 17463d22 Renรฉ Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3675 17463d22 Renรฉ Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3676 17463d22 Renรฉ Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3677 17463d22 Renรฉ Nussbaumer
3678 a8083063 Iustin Pop
    result = {
3679 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3680 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3681 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3682 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3683 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3684 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3685 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3686 469f88e1 Iustin Pop
      "master": cluster.master_node,
3687 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3688 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3689 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3690 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3691 17463d22 Renรฉ Nussbaumer
      "os_hvp": os_hvp,
3692 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3693 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3694 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3695 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3696 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3697 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3698 3953242f Iustin Pop
      "maintain_node_health": cluster.maintain_node_health,
3699 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3700 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3701 259578eb Iustin Pop
      "uuid": cluster.uuid,
3702 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3703 a8083063 Iustin Pop
      }
3704 a8083063 Iustin Pop
3705 a8083063 Iustin Pop
    return result
3706 a8083063 Iustin Pop
3707 a8083063 Iustin Pop
3708 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3709 ae5849b5 Michael Hanselmann
  """Return configuration values.
3710 a8083063 Iustin Pop

3711 a8083063 Iustin Pop
  """
3712 a8083063 Iustin Pop
  _OP_REQP = []
3713 642339cf Guido Trotter
  REQ_BGL = False
3714 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3715 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3716 05e50653 Michael Hanselmann
                                  "watcher_pause")
3717 642339cf Guido Trotter
3718 642339cf Guido Trotter
  def ExpandNames(self):
3719 642339cf Guido Trotter
    self.needed_locks = {}
3720 a8083063 Iustin Pop
3721 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3722 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3723 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3724 ae5849b5 Michael Hanselmann
3725 a8083063 Iustin Pop
  def CheckPrereq(self):
3726 a8083063 Iustin Pop
    """No prerequisites.
3727 a8083063 Iustin Pop

3728 a8083063 Iustin Pop
    """
3729 a8083063 Iustin Pop
    pass
3730 a8083063 Iustin Pop
3731 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3732 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3733 a8083063 Iustin Pop

3734 a8083063 Iustin Pop
    """
3735 ae5849b5 Michael Hanselmann
    values = []
3736 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3737 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3738 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3739 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3740 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3741 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3742 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3743 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3744 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3745 ae5849b5 Michael Hanselmann
      else:
3746 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3747 3ccafd0e Iustin Pop
      values.append(entry)
3748 ae5849b5 Michael Hanselmann
    return values
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
3751 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3752 a8083063 Iustin Pop
  """Bring up an instance's disks.
3753 a8083063 Iustin Pop

3754 a8083063 Iustin Pop
  """
3755 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3756 f22a8ba3 Guido Trotter
  REQ_BGL = False
3757 f22a8ba3 Guido Trotter
3758 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3759 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3760 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3761 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3762 f22a8ba3 Guido Trotter
3763 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3764 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3765 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3766 a8083063 Iustin Pop
3767 a8083063 Iustin Pop
  def CheckPrereq(self):
3768 a8083063 Iustin Pop
    """Check prerequisites.
3769 a8083063 Iustin Pop

3770 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3771 a8083063 Iustin Pop

3772 a8083063 Iustin Pop
    """
3773 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3774 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3775 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3776 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3777 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3778 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3779 a8083063 Iustin Pop
3780 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3781 a8083063 Iustin Pop
    """Activate the disks.
3782 a8083063 Iustin Pop

3783 a8083063 Iustin Pop
    """
3784 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3785 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3786 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3787 a8083063 Iustin Pop
    if not disks_ok:
3788 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3789 a8083063 Iustin Pop
3790 a8083063 Iustin Pop
    return disks_info
3791 a8083063 Iustin Pop
3792 a8083063 Iustin Pop
3793 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3794 e3443b36 Iustin Pop
                           ignore_size=False):
3795 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3796 a8083063 Iustin Pop

3797 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3798 a8083063 Iustin Pop

3799 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3800 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3801 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3802 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3803 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3804 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3805 e4376078 Iustin Pop
      won't result in an error return from the function
3806 e3443b36 Iustin Pop
  @type ignore_size: boolean
3807 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3808 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3809 e3443b36 Iustin Pop
      when the size is wrong
3810 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3811 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3812 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3813 a8083063 Iustin Pop

3814 a8083063 Iustin Pop
  """
3815 a8083063 Iustin Pop
  device_info = []
3816 a8083063 Iustin Pop
  disks_ok = True
3817 fdbd668d Iustin Pop
  iname = instance.name
3818 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3819 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3820 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3821 fdbd668d Iustin Pop
3822 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3823 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3824 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3825 fdbd668d Iustin Pop
  # SyncSource, etc.)
3826 fdbd668d Iustin Pop
3827 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3828 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3829 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3830 e3443b36 Iustin Pop
      if ignore_size:
3831 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3832 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3833 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3834 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3835 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3836 53c14ef1 Iustin Pop
      if msg:
3837 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3838 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3839 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3840 fdbd668d Iustin Pop
        if not ignore_secondaries:
3841 a8083063 Iustin Pop
          disks_ok = False
3842 fdbd668d Iustin Pop
3843 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3844 fdbd668d Iustin Pop
3845 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3846 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3847 d52ea991 Michael Hanselmann
    dev_path = None
3848 d52ea991 Michael Hanselmann
3849 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3850 fdbd668d Iustin Pop
      if node != instance.primary_node:
3851 fdbd668d Iustin Pop
        continue
3852 e3443b36 Iustin Pop
      if ignore_size:
3853 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3854 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3855 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3856 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3857 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3858 53c14ef1 Iustin Pop
      if msg:
3859 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3860 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3861 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3862 fdbd668d Iustin Pop
        disks_ok = False
3863 d52ea991 Michael Hanselmann
      else:
3864 d52ea991 Michael Hanselmann
        dev_path = result.payload
3865 d52ea991 Michael Hanselmann
3866 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3867 a8083063 Iustin Pop
3868 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3869 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3870 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3871 b352ab5b Iustin Pop
  for disk in instance.disks:
3872 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3873 b352ab5b Iustin Pop
3874 a8083063 Iustin Pop
  return disks_ok, device_info
3875 a8083063 Iustin Pop
3876 a8083063 Iustin Pop
3877 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3878 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3879 3ecf6786 Iustin Pop

3880 3ecf6786 Iustin Pop
  """
3881 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3882 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3883 fe7b0351 Michael Hanselmann
  if not disks_ok:
3884 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3885 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3886 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3887 86d9d3bb Iustin Pop
                         " secondary node,"
3888 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3889 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3890 fe7b0351 Michael Hanselmann
3891 fe7b0351 Michael Hanselmann
3892 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3893 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3894 a8083063 Iustin Pop

3895 a8083063 Iustin Pop
  """
3896 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3897 f22a8ba3 Guido Trotter
  REQ_BGL = False
3898 f22a8ba3 Guido Trotter
3899 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3900 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3901 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3902 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3903 f22a8ba3 Guido Trotter
3904 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3905 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3906 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3907 a8083063 Iustin Pop
3908 a8083063 Iustin Pop
  def CheckPrereq(self):
3909 a8083063 Iustin Pop
    """Check prerequisites.
3910 a8083063 Iustin Pop

3911 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3912 a8083063 Iustin Pop

3913 a8083063 Iustin Pop
    """
3914 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3915 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3916 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3917 a8083063 Iustin Pop
3918 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3919 a8083063 Iustin Pop
    """Deactivate the disks
3920 a8083063 Iustin Pop

3921 a8083063 Iustin Pop
    """
3922 a8083063 Iustin Pop
    instance = self.instance
3923 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3924 a8083063 Iustin Pop
3925 a8083063 Iustin Pop
3926 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3927 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3928 155d6c75 Guido Trotter

3929 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3930 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3931 155d6c75 Guido Trotter

3932 155d6c75 Guido Trotter
  """
3933 31624382 Iustin Pop
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3934 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3935 a8083063 Iustin Pop
3936 a8083063 Iustin Pop
3937 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3938 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3939 a8083063 Iustin Pop

3940 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3941 a8083063 Iustin Pop

3942 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3943 a8083063 Iustin Pop
  ignored.
3944 a8083063 Iustin Pop

3945 a8083063 Iustin Pop
  """
3946 cacfd1fd Iustin Pop
  all_result = True
3947 a8083063 Iustin Pop
  for disk in instance.disks:
3948 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3949 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3950 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3951 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3952 cacfd1fd Iustin Pop
      if msg:
3953 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3954 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3955 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3956 cacfd1fd Iustin Pop
          all_result = False
3957 cacfd1fd Iustin Pop
  return all_result
3958 a8083063 Iustin Pop
3959 a8083063 Iustin Pop
3960 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3961 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3962 d4f16fd9 Iustin Pop

3963 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3964 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3965 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3966 d4f16fd9 Iustin Pop
  exception.
3967 d4f16fd9 Iustin Pop

3968 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3969 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3970 e69d05fd Iustin Pop
  @type node: C{str}
3971 e69d05fd Iustin Pop
  @param node: the node to check
3972 e69d05fd Iustin Pop
  @type reason: C{str}
3973 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3974 e69d05fd Iustin Pop
  @type requested: C{int}
3975 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3976 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3977 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3978 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3979 e69d05fd Iustin Pop
      we cannot check the node
3980 d4f16fd9 Iustin Pop

3981 d4f16fd9 Iustin Pop
  """
3982 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3983 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
3984 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
3985 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3986 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3987 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3988 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
3989 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
3990 d4f16fd9 Iustin Pop
  if requested > free_mem:
3991 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3992 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3993 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
3994 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
3995 d4f16fd9 Iustin Pop
3996 d4f16fd9 Iustin Pop
3997 701384a9 Iustin Pop
def _CheckNodesFreeDisk(lu, nodenames, requested):
3998 701384a9 Iustin Pop
  """Checks if nodes have enough free disk space in the default VG.
3999 701384a9 Iustin Pop

4000 701384a9 Iustin Pop
  This function check if all given nodes have the needed amount of
4001 701384a9 Iustin Pop
  free disk. In case any node has less disk or we cannot get the
4002 701384a9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4003 701384a9 Iustin Pop
  exception.
4004 701384a9 Iustin Pop

4005 701384a9 Iustin Pop
  @type lu: C{LogicalUnit}
4006 701384a9 Iustin Pop
  @param lu: a logical unit from which we get configuration data
4007 701384a9 Iustin Pop
  @type nodenames: C{list}
4008 3a488770 Iustin Pop
  @param nodenames: the list of node names to check
4009 701384a9 Iustin Pop
  @type requested: C{int}
4010 701384a9 Iustin Pop
  @param requested: the amount of disk in MiB to check for
4011 701384a9 Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4012 701384a9 Iustin Pop
      we cannot check the node
4013 701384a9 Iustin Pop

4014 701384a9 Iustin Pop
  """
4015 701384a9 Iustin Pop
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4016 701384a9 Iustin Pop
                                   lu.cfg.GetHypervisorType())
4017 701384a9 Iustin Pop
  for node in nodenames:
4018 701384a9 Iustin Pop
    info = nodeinfo[node]
4019 701384a9 Iustin Pop
    info.Raise("Cannot get current information from node %s" % node,
4020 701384a9 Iustin Pop
               prereq=True, ecode=errors.ECODE_ENVIRON)
4021 701384a9 Iustin Pop
    vg_free = info.payload.get("vg_free", None)
4022 701384a9 Iustin Pop
    if not isinstance(vg_free, int):
4023 701384a9 Iustin Pop
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4024 701384a9 Iustin Pop
                                 " result was '%s'" % (node, vg_free),
4025 701384a9 Iustin Pop
                                 errors.ECODE_ENVIRON)
4026 701384a9 Iustin Pop
    if requested > vg_free:
4027 701384a9 Iustin Pop
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4028 701384a9 Iustin Pop
                                 " required %d MiB, available %d MiB" %
4029 701384a9 Iustin Pop
                                 (node, requested, vg_free),
4030 701384a9 Iustin Pop
                                 errors.ECODE_NORES)
4031 701384a9 Iustin Pop
4032 701384a9 Iustin Pop
4033 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
4034 a8083063 Iustin Pop
  """Starts an instance.
4035 a8083063 Iustin Pop

4036 a8083063 Iustin Pop
  """
4037 a8083063 Iustin Pop
  HPATH = "instance-start"
4038 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4039 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
4040 e873317a Guido Trotter
  REQ_BGL = False
4041 e873317a Guido Trotter
4042 e873317a Guido Trotter
  def ExpandNames(self):
4043 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4044 a8083063 Iustin Pop
4045 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4046 a8083063 Iustin Pop
    """Build hooks env.
4047 a8083063 Iustin Pop

4048 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4049 a8083063 Iustin Pop

4050 a8083063 Iustin Pop
    """
4051 a8083063 Iustin Pop
    env = {
4052 a8083063 Iustin Pop
      "FORCE": self.op.force,
4053 a8083063 Iustin Pop
      }
4054 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4055 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4056 a8083063 Iustin Pop
    return env, nl, nl
4057 a8083063 Iustin Pop
4058 a8083063 Iustin Pop
  def CheckPrereq(self):
4059 a8083063 Iustin Pop
    """Check prerequisites.
4060 a8083063 Iustin Pop

4061 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4062 a8083063 Iustin Pop

4063 a8083063 Iustin Pop
    """
4064 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4065 e873317a Guido Trotter
    assert self.instance is not None, \
4066 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4067 a8083063 Iustin Pop
4068 d04aaa2f Iustin Pop
    # extra beparams
4069 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
4070 d04aaa2f Iustin Pop
    if self.beparams:
4071 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
4072 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4073 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
4074 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4075 d04aaa2f Iustin Pop
      # fill the beparams dict
4076 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4077 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
4078 d04aaa2f Iustin Pop
4079 d04aaa2f Iustin Pop
    # extra hvparams
4080 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
4081 d04aaa2f Iustin Pop
    if self.hvparams:
4082 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
4083 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4084 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
4085 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4086 d04aaa2f Iustin Pop
4087 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
4088 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4089 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4090 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
4091 d04aaa2f Iustin Pop
                                    instance.hvparams)
4092 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
4093 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4094 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
4095 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4096 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
4097 d04aaa2f Iustin Pop
4098 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4099 7527a8a4 Iustin Pop
4100 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4101 5bbd3f7f Michael Hanselmann
    # check bridges existence
4102 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4103 a8083063 Iustin Pop
4104 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4105 f1926756 Guido Trotter
                                              instance.name,
4106 f1926756 Guido Trotter
                                              instance.hypervisor)
4107 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4108 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4109 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
4110 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
4111 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
4112 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
4113 d4f16fd9 Iustin Pop
4114 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4115 a8083063 Iustin Pop
    """Start the instance.
4116 a8083063 Iustin Pop

4117 a8083063 Iustin Pop
    """
4118 a8083063 Iustin Pop
    instance = self.instance
4119 a8083063 Iustin Pop
    force = self.op.force
4120 a8083063 Iustin Pop
4121 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
4122 fe482621 Iustin Pop
4123 a8083063 Iustin Pop
    node_current = instance.primary_node
4124 a8083063 Iustin Pop
4125 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
4126 a8083063 Iustin Pop
4127 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
4128 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
4129 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4130 dd279568 Iustin Pop
    if msg:
4131 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4132 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
4133 a8083063 Iustin Pop
4134 a8083063 Iustin Pop
4135 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
4136 bf6929a2 Alexander Schreiber
  """Reboot an instance.
4137 bf6929a2 Alexander Schreiber

4138 bf6929a2 Alexander Schreiber
  """
4139 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
4140 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
4141 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4142 e873317a Guido Trotter
  REQ_BGL = False
4143 e873317a Guido Trotter
4144 17c3f802 Guido Trotter
  def CheckArguments(self):
4145 17c3f802 Guido Trotter
    """Check the arguments.
4146 17c3f802 Guido Trotter

4147 17c3f802 Guido Trotter
    """
4148 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4149 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4150 17c3f802 Guido Trotter
4151 e873317a Guido Trotter
  def ExpandNames(self):
4152 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4153 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4154 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
4155 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
4156 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
4157 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4158 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
4159 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4160 bf6929a2 Alexander Schreiber
4161 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
4162 bf6929a2 Alexander Schreiber
    """Build hooks env.
4163 bf6929a2 Alexander Schreiber

4164 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
4165 bf6929a2 Alexander Schreiber

4166 bf6929a2 Alexander Schreiber
    """
4167 bf6929a2 Alexander Schreiber
    env = {
4168 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4169 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
4170 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4171 bf6929a2 Alexander Schreiber
      }
4172 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4173 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4174 bf6929a2 Alexander Schreiber
    return env, nl, nl
4175 bf6929a2 Alexander Schreiber
4176 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
4177 bf6929a2 Alexander Schreiber
    """Check prerequisites.
4178 bf6929a2 Alexander Schreiber

4179 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
4180 bf6929a2 Alexander Schreiber

4181 bf6929a2 Alexander Schreiber
    """
4182 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4183 e873317a Guido Trotter
    assert self.instance is not None, \
4184 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4185 bf6929a2 Alexander Schreiber
4186 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4187 7527a8a4 Iustin Pop
4188 5bbd3f7f Michael Hanselmann
    # check bridges existence
4189 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4190 bf6929a2 Alexander Schreiber
4191 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
4192 bf6929a2 Alexander Schreiber
    """Reboot the instance.
4193 bf6929a2 Alexander Schreiber

4194 bf6929a2 Alexander Schreiber
    """
4195 bf6929a2 Alexander Schreiber
    instance = self.instance
4196 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
4197 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
4198 bf6929a2 Alexander Schreiber
4199 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
4200 bf6929a2 Alexander Schreiber
4201 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4202 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
4203 ae48ac32 Iustin Pop
      for disk in instance.disks:
4204 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
4205 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
4206 17c3f802 Guido Trotter
                                             reboot_type,
4207 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4208 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
4209 bf6929a2 Alexander Schreiber
    else:
4210 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
4211 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
4212 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
4213 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4214 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
4215 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4216 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4217 dd279568 Iustin Pop
      if msg:
4218 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4219 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
4220 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
4221 bf6929a2 Alexander Schreiber
4222 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
4223 bf6929a2 Alexander Schreiber
4224 bf6929a2 Alexander Schreiber
4225 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
4226 a8083063 Iustin Pop
  """Shutdown an instance.
4227 a8083063 Iustin Pop

4228 a8083063 Iustin Pop
  """
4229 a8083063 Iustin Pop
  HPATH = "instance-stop"
4230 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4231 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4232 e873317a Guido Trotter
  REQ_BGL = False
4233 e873317a Guido Trotter
4234 6263189c Guido Trotter
  def CheckArguments(self):
4235 6263189c Guido Trotter
    """Check the arguments.
4236 6263189c Guido Trotter

4237 6263189c Guido Trotter
    """
4238 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
4239 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4240 6263189c Guido Trotter
4241 e873317a Guido Trotter
  def ExpandNames(self):
4242 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4243 a8083063 Iustin Pop
4244 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4245 a8083063 Iustin Pop
    """Build hooks env.
4246 a8083063 Iustin Pop

4247 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4248 a8083063 Iustin Pop

4249 a8083063 Iustin Pop
    """
4250 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4251 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
4252 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4253 a8083063 Iustin Pop
    return env, nl, nl
4254 a8083063 Iustin Pop
4255 a8083063 Iustin Pop
  def CheckPrereq(self):
4256 a8083063 Iustin Pop
    """Check prerequisites.
4257 a8083063 Iustin Pop

4258 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4259 a8083063 Iustin Pop

4260 a8083063 Iustin Pop
    """
4261 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4262 e873317a Guido Trotter
    assert self.instance is not None, \
4263 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4264 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
4265 a8083063 Iustin Pop
4266 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4267 a8083063 Iustin Pop
    """Shutdown the instance.
4268 a8083063 Iustin Pop

4269 a8083063 Iustin Pop
    """
4270 a8083063 Iustin Pop
    instance = self.instance
4271 a8083063 Iustin Pop
    node_current = instance.primary_node
4272 6263189c Guido Trotter
    timeout = self.timeout
4273 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
4274 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4275 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4276 1fae010f Iustin Pop
    if msg:
4277 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4278 a8083063 Iustin Pop
4279 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
4280 a8083063 Iustin Pop
4281 a8083063 Iustin Pop
4282 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
4283 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
4284 fe7b0351 Michael Hanselmann

4285 fe7b0351 Michael Hanselmann
  """
4286 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
4287 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
4288 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
4289 4e0b4d2d Guido Trotter
  REQ_BGL = False
4290 4e0b4d2d Guido Trotter
4291 4e0b4d2d Guido Trotter
  def ExpandNames(self):
4292 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
4293 fe7b0351 Michael Hanselmann
4294 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
4295 fe7b0351 Michael Hanselmann
    """Build hooks env.
4296 fe7b0351 Michael Hanselmann

4297 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
4298 fe7b0351 Michael Hanselmann

4299 fe7b0351 Michael Hanselmann
    """
4300 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4301 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4302 fe7b0351 Michael Hanselmann
    return env, nl, nl
4303 fe7b0351 Michael Hanselmann
4304 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
4305 fe7b0351 Michael Hanselmann
    """Check prerequisites.
4306 fe7b0351 Michael Hanselmann

4307 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
4308 fe7b0351 Michael Hanselmann

4309 fe7b0351 Michael Hanselmann
    """
4310 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4311 4e0b4d2d Guido Trotter
    assert instance is not None, \
4312 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4313 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4314 4e0b4d2d Guido Trotter
4315 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
4316 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4317 5c983ee5 Iustin Pop
                                 self.op.instance_name,
4318 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
4319 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot reinstall")
4320 d0834de3 Michael Hanselmann
4321 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4322 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4323 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4324 d0834de3 Michael Hanselmann
      # OS verification
4325 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4326 231cd901 Iustin Pop
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4327 d0834de3 Michael Hanselmann
4328 fe7b0351 Michael Hanselmann
    self.instance = instance
4329 fe7b0351 Michael Hanselmann
4330 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4331 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4332 fe7b0351 Michael Hanselmann

4333 fe7b0351 Michael Hanselmann
    """
4334 fe7b0351 Michael Hanselmann
    inst = self.instance
4335 fe7b0351 Michael Hanselmann
4336 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4337 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4338 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4339 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4340 d0834de3 Michael Hanselmann
4341 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4342 fe7b0351 Michael Hanselmann
    try:
4343 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4344 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4345 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4346 dd713605 Iustin Pop
                                             self.op.debug_level)
4347 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4348 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4349 fe7b0351 Michael Hanselmann
    finally:
4350 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4351 fe7b0351 Michael Hanselmann
4352 fe7b0351 Michael Hanselmann
4353 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4354 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4355 bd315bfa Iustin Pop

4356 bd315bfa Iustin Pop
  """
4357 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4358 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4359 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4360 bd315bfa Iustin Pop
  REQ_BGL = False
4361 bd315bfa Iustin Pop
4362 bd315bfa Iustin Pop
  def CheckArguments(self):
4363 bd315bfa Iustin Pop
    """Check the arguments.
4364 bd315bfa Iustin Pop

4365 bd315bfa Iustin Pop
    """
4366 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4367 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4368 bd315bfa Iustin Pop
    for item in self.op.disks:
4369 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4370 bd315bfa Iustin Pop
          item < 0):
4371 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4372 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4373 bd315bfa Iustin Pop
4374 bd315bfa Iustin Pop
  def ExpandNames(self):
4375 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4376 bd315bfa Iustin Pop
4377 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4378 bd315bfa Iustin Pop
    """Build hooks env.
4379 bd315bfa Iustin Pop

4380 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4381 bd315bfa Iustin Pop

4382 bd315bfa Iustin Pop
    """
4383 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4384 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4385 bd315bfa Iustin Pop
    return env, nl, nl
4386 bd315bfa Iustin Pop
4387 bd315bfa Iustin Pop
  def CheckPrereq(self):
4388 bd315bfa Iustin Pop
    """Check prerequisites.
4389 bd315bfa Iustin Pop

4390 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4391 bd315bfa Iustin Pop

4392 bd315bfa Iustin Pop
    """
4393 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4394 bd315bfa Iustin Pop
    assert instance is not None, \
4395 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4396 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4397 bd315bfa Iustin Pop
4398 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4399 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4400 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4401 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4402 bd315bfa Iustin Pop
4403 bd315bfa Iustin Pop
    if not self.op.disks:
4404 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4405 bd315bfa Iustin Pop
    else:
4406 bd315bfa Iustin Pop
      for idx in self.op.disks:
4407 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4408 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4409 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4410 bd315bfa Iustin Pop
4411 bd315bfa Iustin Pop
    self.instance = instance
4412 bd315bfa Iustin Pop
4413 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4414 bd315bfa Iustin Pop
    """Recreate the disks.
4415 bd315bfa Iustin Pop

4416 bd315bfa Iustin Pop
    """
4417 bd315bfa Iustin Pop
    to_skip = []
4418 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4419 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4420 bd315bfa Iustin Pop
        to_skip.append(idx)
4421 bd315bfa Iustin Pop
        continue
4422 bd315bfa Iustin Pop
4423 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4424 bd315bfa Iustin Pop
4425 bd315bfa Iustin Pop
4426 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4427 decd5f45 Iustin Pop
  """Rename an instance.
4428 decd5f45 Iustin Pop

4429 decd5f45 Iustin Pop
  """
4430 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4431 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4432 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4433 decd5f45 Iustin Pop
4434 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4435 decd5f45 Iustin Pop
    """Build hooks env.
4436 decd5f45 Iustin Pop

4437 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4438 decd5f45 Iustin Pop

4439 decd5f45 Iustin Pop
    """
4440 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4441 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4442 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4443 decd5f45 Iustin Pop
    return env, nl, nl
4444 decd5f45 Iustin Pop
4445 decd5f45 Iustin Pop
  def CheckPrereq(self):
4446 decd5f45 Iustin Pop
    """Check prerequisites.
4447 decd5f45 Iustin Pop

4448 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4449 decd5f45 Iustin Pop

4450 decd5f45 Iustin Pop
    """
4451 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4452 cf26a87a Iustin Pop
                                                self.op.instance_name)
4453 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4454 cf26a87a Iustin Pop
    assert instance is not None
4455 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4456 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot rename")
4457 decd5f45 Iustin Pop
    self.instance = instance
4458 decd5f45 Iustin Pop
4459 decd5f45 Iustin Pop
    # new name verification
4460 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4461 decd5f45 Iustin Pop
4462 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4463 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4464 7bde3275 Guido Trotter
    if new_name in instance_list:
4465 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4466 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4467 7bde3275 Guido Trotter
4468 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4469 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4470 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4471 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4472 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4473 decd5f45 Iustin Pop
4474 decd5f45 Iustin Pop
4475 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4476 decd5f45 Iustin Pop
    """Reinstall the instance.
4477 decd5f45 Iustin Pop

4478 decd5f45 Iustin Pop
    """
4479 decd5f45 Iustin Pop
    inst = self.instance
4480 decd5f45 Iustin Pop
    old_name = inst.name
4481 decd5f45 Iustin Pop
4482 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4483 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4484 b23c4333 Manuel Franceschini
4485 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4486 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4487 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4488 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4489 decd5f45 Iustin Pop
4490 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4491 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4492 decd5f45 Iustin Pop
4493 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4494 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4495 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4496 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4497 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4498 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4499 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4500 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4501 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4502 b23c4333 Manuel Franceschini
4503 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4504 decd5f45 Iustin Pop
    try:
4505 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4506 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4507 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4508 96841384 Iustin Pop
      if msg:
4509 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4510 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4511 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4512 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4513 decd5f45 Iustin Pop
    finally:
4514 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4515 decd5f45 Iustin Pop
4516 decd5f45 Iustin Pop
4517 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4518 a8083063 Iustin Pop
  """Remove an instance.
4519 a8083063 Iustin Pop

4520 a8083063 Iustin Pop
  """
4521 a8083063 Iustin Pop
  HPATH = "instance-remove"
4522 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4523 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4524 cf472233 Guido Trotter
  REQ_BGL = False
4525 cf472233 Guido Trotter
4526 17c3f802 Guido Trotter
  def CheckArguments(self):
4527 17c3f802 Guido Trotter
    """Check the arguments.
4528 17c3f802 Guido Trotter

4529 17c3f802 Guido Trotter
    """
4530 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4531 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4532 17c3f802 Guido Trotter
4533 cf472233 Guido Trotter
  def ExpandNames(self):
4534 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4535 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4536 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4537 cf472233 Guido Trotter
4538 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4539 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4540 cf472233 Guido Trotter
      self._LockInstancesNodes()
4541 a8083063 Iustin Pop
4542 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4543 a8083063 Iustin Pop
    """Build hooks env.
4544 a8083063 Iustin Pop

4545 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4546 a8083063 Iustin Pop

4547 a8083063 Iustin Pop
    """
4548 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4549 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4550 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4551 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4552 abd8e836 Iustin Pop
    return env, nl, nl_post
4553 a8083063 Iustin Pop
4554 a8083063 Iustin Pop
  def CheckPrereq(self):
4555 a8083063 Iustin Pop
    """Check prerequisites.
4556 a8083063 Iustin Pop

4557 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4558 a8083063 Iustin Pop

4559 a8083063 Iustin Pop
    """
4560 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4561 cf472233 Guido Trotter
    assert self.instance is not None, \
4562 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4563 a8083063 Iustin Pop
4564 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4565 a8083063 Iustin Pop
    """Remove the instance.
4566 a8083063 Iustin Pop

4567 a8083063 Iustin Pop
    """
4568 a8083063 Iustin Pop
    instance = self.instance
4569 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4570 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4571 a8083063 Iustin Pop
4572 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4573 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4574 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4575 1fae010f Iustin Pop
    if msg:
4576 1d67656e Iustin Pop
      if self.op.ignore_failures:
4577 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4578 1d67656e Iustin Pop
      else:
4579 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4580 1fae010f Iustin Pop
                                 " node %s: %s" %
4581 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4582 a8083063 Iustin Pop
4583 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4584 a8083063 Iustin Pop
4585 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4586 1d67656e Iustin Pop
      if self.op.ignore_failures:
4587 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4588 1d67656e Iustin Pop
      else:
4589 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4590 a8083063 Iustin Pop
4591 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4592 a8083063 Iustin Pop
4593 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4594 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4595 a8083063 Iustin Pop
4596 a8083063 Iustin Pop
4597 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4598 a8083063 Iustin Pop
  """Logical unit for querying instances.
4599 a8083063 Iustin Pop

4600 a8083063 Iustin Pop
  """
4601 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4602 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4603 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4604 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4605 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4606 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4607 5b460366 Iustin Pop
                                    "admin_state",
4608 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4609 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4610 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4611 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4612 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4613 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4614 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4615 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4616 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4617 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4618 19bed813 Iustin Pop
                                    "hvparams",
4619 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4620 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4621 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4622 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4623 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4624 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4625 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4626 31bf511f Iustin Pop
4627 a8083063 Iustin Pop
4628 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4629 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4630 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4631 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4632 a8083063 Iustin Pop
4633 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4634 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4635 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4636 7eb9d8f7 Guido Trotter
4637 57a2fb91 Iustin Pop
    if self.op.names:
4638 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4639 7eb9d8f7 Guido Trotter
    else:
4640 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4641 7eb9d8f7 Guido Trotter
4642 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4643 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4644 57a2fb91 Iustin Pop
    if self.do_locking:
4645 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4646 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4647 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4648 7eb9d8f7 Guido Trotter
4649 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4650 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4651 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4652 7eb9d8f7 Guido Trotter
4653 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4654 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4655 7eb9d8f7 Guido Trotter

4656 7eb9d8f7 Guido Trotter
    """
4657 57a2fb91 Iustin Pop
    pass
4658 069dcc86 Iustin Pop
4659 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4660 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4661 a8083063 Iustin Pop

4662 a8083063 Iustin Pop
    """
4663 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4664 7260cfbe Iustin Pop
    # way too many branches here
4665 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4666 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4667 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4668 a7f5dc98 Iustin Pop
      if self.do_locking:
4669 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4670 a7f5dc98 Iustin Pop
      else:
4671 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4672 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4673 57a2fb91 Iustin Pop
    else:
4674 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4675 a7f5dc98 Iustin Pop
      if self.do_locking:
4676 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4677 a7f5dc98 Iustin Pop
      else:
4678 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4679 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4680 a7f5dc98 Iustin Pop
      if missing:
4681 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4682 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4683 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4684 c1f1cbb2 Iustin Pop
4685 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
    # begin data gathering
4688 a8083063 Iustin Pop
4689 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4690 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4691 a8083063 Iustin Pop
4692 a8083063 Iustin Pop
    bad_nodes = []
4693 cbfc4681 Iustin Pop
    off_nodes = []
4694 ec79568d Iustin Pop
    if self.do_node_query:
4695 a8083063 Iustin Pop
      live_data = {}
4696 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4697 a8083063 Iustin Pop
      for name in nodes:
4698 a8083063 Iustin Pop
        result = node_data[name]
4699 cbfc4681 Iustin Pop
        if result.offline:
4700 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4701 cbfc4681 Iustin Pop
          off_nodes.append(name)
4702 3cebe102 Michael Hanselmann
        if result.fail_msg:
4703 a8083063 Iustin Pop
          bad_nodes.append(name)
4704 781de953 Iustin Pop
        else:
4705 2fa74ef4 Iustin Pop
          if result.payload:
4706 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4707 2fa74ef4 Iustin Pop
          # else no instance is alive
4708 a8083063 Iustin Pop
    else:
4709 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4710 a8083063 Iustin Pop
4711 a8083063 Iustin Pop
    # end data gathering
4712 a8083063 Iustin Pop
4713 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4714 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4715 a8083063 Iustin Pop
    output = []
4716 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4717 a8083063 Iustin Pop
    for instance in instance_list:
4718 a8083063 Iustin Pop
      iout = []
4719 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4720 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4721 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4722 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4723 a8083063 Iustin Pop
      for field in self.op.output_fields:
4724 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4725 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4726 19bed813 Iustin Pop
          val = getattr(instance, field)
4727 a8083063 Iustin Pop
        elif field == "pnode":
4728 a8083063 Iustin Pop
          val = instance.primary_node
4729 a8083063 Iustin Pop
        elif field == "snodes":
4730 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4731 a8083063 Iustin Pop
        elif field == "admin_state":
4732 0d68c45d Iustin Pop
          val = instance.admin_up
4733 a8083063 Iustin Pop
        elif field == "oper_state":
4734 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4735 8a23d2d3 Iustin Pop
            val = None
4736 a8083063 Iustin Pop
          else:
4737 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4738 d8052456 Iustin Pop
        elif field == "status":
4739 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4740 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4741 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4742 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4743 d8052456 Iustin Pop
          else:
4744 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4745 d8052456 Iustin Pop
            if running:
4746 0d68c45d Iustin Pop
              if instance.admin_up:
4747 d8052456 Iustin Pop
                val = "running"
4748 d8052456 Iustin Pop
              else:
4749 d8052456 Iustin Pop
                val = "ERROR_up"
4750 d8052456 Iustin Pop
            else:
4751 0d68c45d Iustin Pop
              if instance.admin_up:
4752 d8052456 Iustin Pop
                val = "ERROR_down"
4753 d8052456 Iustin Pop
              else:
4754 d8052456 Iustin Pop
                val = "ADMIN_down"
4755 a8083063 Iustin Pop
        elif field == "oper_ram":
4756 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4757 8a23d2d3 Iustin Pop
            val = None
4758 a8083063 Iustin Pop
          elif instance.name in live_data:
4759 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4760 a8083063 Iustin Pop
          else:
4761 a8083063 Iustin Pop
            val = "-"
4762 c1ce76bb Iustin Pop
        elif field == "vcpus":
4763 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4764 a8083063 Iustin Pop
        elif field == "disk_template":
4765 a8083063 Iustin Pop
          val = instance.disk_template
4766 a8083063 Iustin Pop
        elif field == "ip":
4767 39a02558 Guido Trotter
          if instance.nics:
4768 39a02558 Guido Trotter
            val = instance.nics[0].ip
4769 39a02558 Guido Trotter
          else:
4770 39a02558 Guido Trotter
            val = None
4771 638c6349 Guido Trotter
        elif field == "nic_mode":
4772 638c6349 Guido Trotter
          if instance.nics:
4773 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4774 638c6349 Guido Trotter
          else:
4775 638c6349 Guido Trotter
            val = None
4776 638c6349 Guido Trotter
        elif field == "nic_link":
4777 39a02558 Guido Trotter
          if instance.nics:
4778 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4779 638c6349 Guido Trotter
          else:
4780 638c6349 Guido Trotter
            val = None
4781 638c6349 Guido Trotter
        elif field == "bridge":
4782 638c6349 Guido Trotter
          if (instance.nics and
4783 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4784 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4785 39a02558 Guido Trotter
          else:
4786 39a02558 Guido Trotter
            val = None
4787 a8083063 Iustin Pop
        elif field == "mac":
4788 39a02558 Guido Trotter
          if instance.nics:
4789 39a02558 Guido Trotter
            val = instance.nics[0].mac
4790 39a02558 Guido Trotter
          else:
4791 39a02558 Guido Trotter
            val = None
4792 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4793 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4794 ad24e046 Iustin Pop
          try:
4795 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4796 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4797 8a23d2d3 Iustin Pop
            val = None
4798 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4799 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4800 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4801 130a6a6f Iustin Pop
        elif field == "tags":
4802 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4803 338e51e8 Iustin Pop
        elif field == "hvparams":
4804 338e51e8 Iustin Pop
          val = i_hv
4805 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4806 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4807 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4808 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4809 338e51e8 Iustin Pop
        elif field == "beparams":
4810 338e51e8 Iustin Pop
          val = i_be
4811 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4812 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4813 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4814 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4815 71c1af58 Iustin Pop
          # matches a variable list
4816 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4817 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4818 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4819 71c1af58 Iustin Pop
              val = len(instance.disks)
4820 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4821 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4822 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4823 3e0cea06 Iustin Pop
              try:
4824 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4825 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4826 71c1af58 Iustin Pop
                val = None
4827 71c1af58 Iustin Pop
            else:
4828 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4829 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4830 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4831 71c1af58 Iustin Pop
              val = len(instance.nics)
4832 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4833 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4834 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4835 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4836 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4837 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4838 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4839 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4840 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4841 638c6349 Guido Trotter
              val = []
4842 638c6349 Guido Trotter
              for nicp in i_nicp:
4843 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4844 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4845 638c6349 Guido Trotter
                else:
4846 638c6349 Guido Trotter
                  val.append(None)
4847 71c1af58 Iustin Pop
            else:
4848 71c1af58 Iustin Pop
              # index-based item
4849 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4850 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4851 71c1af58 Iustin Pop
                val = None
4852 71c1af58 Iustin Pop
              else:
4853 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4854 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4855 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4856 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4857 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4858 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4859 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4860 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4861 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4862 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4863 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4864 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4865 638c6349 Guido Trotter
                  else:
4866 638c6349 Guido Trotter
                    val = None
4867 71c1af58 Iustin Pop
                else:
4868 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4869 71c1af58 Iustin Pop
          else:
4870 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4871 c1ce76bb Iustin Pop
                           field)
4872 a8083063 Iustin Pop
        else:
4873 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4874 a8083063 Iustin Pop
        iout.append(val)
4875 a8083063 Iustin Pop
      output.append(iout)
4876 a8083063 Iustin Pop
4877 a8083063 Iustin Pop
    return output
4878 a8083063 Iustin Pop
4879 a8083063 Iustin Pop
4880 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4881 a8083063 Iustin Pop
  """Failover an instance.
4882 a8083063 Iustin Pop

4883 a8083063 Iustin Pop
  """
4884 a8083063 Iustin Pop
  HPATH = "instance-failover"
4885 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4886 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4887 c9e5c064 Guido Trotter
  REQ_BGL = False
4888 c9e5c064 Guido Trotter
4889 17c3f802 Guido Trotter
  def CheckArguments(self):
4890 17c3f802 Guido Trotter
    """Check the arguments.
4891 17c3f802 Guido Trotter

4892 17c3f802 Guido Trotter
    """
4893 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4894 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4895 17c3f802 Guido Trotter
4896 c9e5c064 Guido Trotter
  def ExpandNames(self):
4897 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4898 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4899 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4900 c9e5c064 Guido Trotter
4901 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4902 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4903 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4904 a8083063 Iustin Pop
4905 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4906 a8083063 Iustin Pop
    """Build hooks env.
4907 a8083063 Iustin Pop

4908 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4909 a8083063 Iustin Pop

4910 a8083063 Iustin Pop
    """
4911 08eec276 Iustin Pop
    instance = self.instance
4912 08eec276 Iustin Pop
    source_node = instance.primary_node
4913 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4914 a8083063 Iustin Pop
    env = {
4915 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4916 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4917 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4918 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4919 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4920 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4921 a8083063 Iustin Pop
      }
4922 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4923 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4924 abd8e836 Iustin Pop
    nl_post = list(nl)
4925 abd8e836 Iustin Pop
    nl_post.append(source_node)
4926 abd8e836 Iustin Pop
    return env, nl, nl_post
4927 a8083063 Iustin Pop
4928 a8083063 Iustin Pop
  def CheckPrereq(self):
4929 a8083063 Iustin Pop
    """Check prerequisites.
4930 a8083063 Iustin Pop

4931 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4932 a8083063 Iustin Pop

4933 a8083063 Iustin Pop
    """
4934 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4935 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4936 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4937 a8083063 Iustin Pop
4938 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4939 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4940 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4941 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
4942 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4943 2a710df1 Michael Hanselmann
4944 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4945 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4946 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4947 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4948 2a710df1 Michael Hanselmann
4949 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4950 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4951 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4952 d27776f0 Iustin Pop
    if instance.admin_up:
4953 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4954 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4955 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4956 d27776f0 Iustin Pop
                           instance.hypervisor)
4957 d27776f0 Iustin Pop
    else:
4958 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4959 d27776f0 Iustin Pop
                   " instance will not be started")
4960 3a7c308e Guido Trotter
4961 a8083063 Iustin Pop
    # check bridge existance
4962 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4963 a8083063 Iustin Pop
4964 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4965 a8083063 Iustin Pop
    """Failover an instance.
4966 a8083063 Iustin Pop

4967 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4968 a8083063 Iustin Pop
    starting it on the secondary.
4969 a8083063 Iustin Pop

4970 a8083063 Iustin Pop
    """
4971 a8083063 Iustin Pop
    instance = self.instance
4972 a8083063 Iustin Pop
4973 a8083063 Iustin Pop
    source_node = instance.primary_node
4974 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4975 a8083063 Iustin Pop
4976 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4977 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4978 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4979 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4980 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
4981 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
4982 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
4983 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
4984 1df79ce6 Michael Hanselmann
    else:
4985 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
4986 a8083063 Iustin Pop
4987 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4988 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4989 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4990 a8083063 Iustin Pop
4991 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4992 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4993 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4994 1fae010f Iustin Pop
    if msg:
4995 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4996 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4997 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4998 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4999 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
5000 24a40d57 Iustin Pop
      else:
5001 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5002 1fae010f Iustin Pop
                                 " node %s: %s" %
5003 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
5004 a8083063 Iustin Pop
5005 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
5006 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5007 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
5008 a8083063 Iustin Pop
5009 a8083063 Iustin Pop
    instance.primary_node = target_node
5010 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
5011 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5012 a8083063 Iustin Pop
5013 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
5014 0d68c45d Iustin Pop
    if instance.admin_up:
5015 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
5016 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
5017 9a4f63d1 Iustin Pop
                   instance.name, target_node)
5018 12a0cfbe Guido Trotter
5019 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5020 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
5021 12a0cfbe Guido Trotter
      if not disks_ok:
5022 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5023 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
5024 a8083063 Iustin Pop
5025 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
5026 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5027 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5028 dd279568 Iustin Pop
      if msg:
5029 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5030 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5031 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
5032 a8083063 Iustin Pop
5033 a8083063 Iustin Pop
5034 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
5035 53c776b5 Iustin Pop
  """Migrate an instance.
5036 53c776b5 Iustin Pop

5037 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
5038 53c776b5 Iustin Pop
  which is done with shutdown.
5039 53c776b5 Iustin Pop

5040 53c776b5 Iustin Pop
  """
5041 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
5042 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5043 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
5044 53c776b5 Iustin Pop
5045 53c776b5 Iustin Pop
  REQ_BGL = False
5046 53c776b5 Iustin Pop
5047 53c776b5 Iustin Pop
  def ExpandNames(self):
5048 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
5049 3e06e001 Michael Hanselmann
5050 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5051 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5052 53c776b5 Iustin Pop
5053 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5054 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
5055 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
5056 3e06e001 Michael Hanselmann
5057 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
5058 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
5059 53c776b5 Iustin Pop
      self._LockInstancesNodes()
5060 53c776b5 Iustin Pop
5061 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
5062 53c776b5 Iustin Pop
    """Build hooks env.
5063 53c776b5 Iustin Pop

5064 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5065 53c776b5 Iustin Pop

5066 53c776b5 Iustin Pop
    """
5067 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
5068 08eec276 Iustin Pop
    source_node = instance.primary_node
5069 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
5070 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
5071 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
5072 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5073 08eec276 Iustin Pop
    env.update({
5074 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
5075 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
5076 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
5077 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
5078 08eec276 Iustin Pop
        })
5079 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5080 abd8e836 Iustin Pop
    nl_post = list(nl)
5081 abd8e836 Iustin Pop
    nl_post.append(source_node)
5082 abd8e836 Iustin Pop
    return env, nl, nl_post
5083 53c776b5 Iustin Pop
5084 3e06e001 Michael Hanselmann
5085 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
5086 313bcead Iustin Pop
  """Move an instance by data-copying.
5087 313bcead Iustin Pop

5088 313bcead Iustin Pop
  """
5089 313bcead Iustin Pop
  HPATH = "instance-move"
5090 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5091 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
5092 313bcead Iustin Pop
  REQ_BGL = False
5093 313bcead Iustin Pop
5094 17c3f802 Guido Trotter
  def CheckArguments(self):
5095 17c3f802 Guido Trotter
    """Check the arguments.
5096 17c3f802 Guido Trotter

5097 17c3f802 Guido Trotter
    """
5098 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5099 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5100 17c3f802 Guido Trotter
5101 313bcead Iustin Pop
  def ExpandNames(self):
5102 313bcead Iustin Pop
    self._ExpandAndLockInstance()
5103 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5104 313bcead Iustin Pop
    self.op.target_node = target_node
5105 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5106 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5107 313bcead Iustin Pop
5108 313bcead Iustin Pop
  def DeclareLocks(self, level):
5109 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
5110 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
5111 313bcead Iustin Pop
5112 313bcead Iustin Pop
  def BuildHooksEnv(self):
5113 313bcead Iustin Pop
    """Build hooks env.
5114 313bcead Iustin Pop

5115 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5116 313bcead Iustin Pop

5117 313bcead Iustin Pop
    """
5118 313bcead Iustin Pop
    env = {
5119 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
5120 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5121 313bcead Iustin Pop
      }
5122 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5123 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5124 313bcead Iustin Pop
                                       self.op.target_node]
5125 313bcead Iustin Pop
    return env, nl, nl
5126 313bcead Iustin Pop
5127 313bcead Iustin Pop
  def CheckPrereq(self):
5128 313bcead Iustin Pop
    """Check prerequisites.
5129 313bcead Iustin Pop

5130 313bcead Iustin Pop
    This checks that the instance is in the cluster.
5131 313bcead Iustin Pop

5132 313bcead Iustin Pop
    """
5133 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5134 313bcead Iustin Pop
    assert self.instance is not None, \
5135 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
5136 313bcead Iustin Pop
5137 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
5138 313bcead Iustin Pop
    assert node is not None, \
5139 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
5140 313bcead Iustin Pop
5141 313bcead Iustin Pop
    self.target_node = target_node = node.name
5142 313bcead Iustin Pop
5143 313bcead Iustin Pop
    if target_node == instance.primary_node:
5144 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5145 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
5146 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5147 313bcead Iustin Pop
5148 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5149 313bcead Iustin Pop
5150 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
5151 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5152 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5153 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
5154 313bcead Iustin Pop
5155 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
5156 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5157 313bcead Iustin Pop
5158 313bcead Iustin Pop
    if instance.admin_up:
5159 313bcead Iustin Pop
      # check memory requirements on the secondary node
5160 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5161 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5162 313bcead Iustin Pop
                           instance.hypervisor)
5163 313bcead Iustin Pop
    else:
5164 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5165 313bcead Iustin Pop
                   " instance will not be started")
5166 313bcead Iustin Pop
5167 313bcead Iustin Pop
    # check bridge existance
5168 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5169 313bcead Iustin Pop
5170 313bcead Iustin Pop
  def Exec(self, feedback_fn):
5171 313bcead Iustin Pop
    """Move an instance.
5172 313bcead Iustin Pop

5173 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
5174 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
5175 313bcead Iustin Pop

5176 313bcead Iustin Pop
    """
5177 313bcead Iustin Pop
    instance = self.instance
5178 313bcead Iustin Pop
5179 313bcead Iustin Pop
    source_node = instance.primary_node
5180 313bcead Iustin Pop
    target_node = self.target_node
5181 313bcead Iustin Pop
5182 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
5183 313bcead Iustin Pop
                 instance.name, source_node)
5184 313bcead Iustin Pop
5185 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5186 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5187 313bcead Iustin Pop
    msg = result.fail_msg
5188 313bcead Iustin Pop
    if msg:
5189 313bcead Iustin Pop
      if self.op.ignore_consistency:
5190 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5191 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
5192 313bcead Iustin Pop
                             " %s is down. Error details: %s",
5193 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
5194 313bcead Iustin Pop
      else:
5195 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5196 313bcead Iustin Pop
                                 " node %s: %s" %
5197 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
5198 313bcead Iustin Pop
5199 313bcead Iustin Pop
    # create the target disks
5200 313bcead Iustin Pop
    try:
5201 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
5202 313bcead Iustin Pop
    except errors.OpExecError:
5203 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5204 313bcead Iustin Pop
      try:
5205 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5206 313bcead Iustin Pop
      finally:
5207 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5208 313bcead Iustin Pop
        raise
5209 313bcead Iustin Pop
5210 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5211 313bcead Iustin Pop
5212 313bcead Iustin Pop
    errs = []
5213 313bcead Iustin Pop
    # activate, get path, copy the data over
5214 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
5215 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
5216 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5217 313bcead Iustin Pop
                                               instance.name, True)
5218 313bcead Iustin Pop
      if result.fail_msg:
5219 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
5220 313bcead Iustin Pop
                        idx, result.fail_msg)
5221 313bcead Iustin Pop
        errs.append(result.fail_msg)
5222 313bcead Iustin Pop
        break
5223 313bcead Iustin Pop
      dev_path = result.payload
5224 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
5225 313bcead Iustin Pop
                                             target_node, dev_path,
5226 313bcead Iustin Pop
                                             cluster_name)
5227 313bcead Iustin Pop
      if result.fail_msg:
5228 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
5229 313bcead Iustin Pop
                        idx, result.fail_msg)
5230 313bcead Iustin Pop
        errs.append(result.fail_msg)
5231 313bcead Iustin Pop
        break
5232 313bcead Iustin Pop
5233 313bcead Iustin Pop
    if errs:
5234 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
5235 313bcead Iustin Pop
      try:
5236 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5237 313bcead Iustin Pop
      finally:
5238 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5239 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
5240 313bcead Iustin Pop
                                 (",".join(errs),))
5241 313bcead Iustin Pop
5242 313bcead Iustin Pop
    instance.primary_node = target_node
5243 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5244 313bcead Iustin Pop
5245 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
5246 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
5247 313bcead Iustin Pop
5248 313bcead Iustin Pop
    # Only start the instance if it's marked as up
5249 313bcead Iustin Pop
    if instance.admin_up:
5250 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
5251 313bcead Iustin Pop
                   instance.name, target_node)
5252 313bcead Iustin Pop
5253 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5254 313bcead Iustin Pop
                                           ignore_secondaries=True)
5255 313bcead Iustin Pop
      if not disks_ok:
5256 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5257 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
5258 313bcead Iustin Pop
5259 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5260 313bcead Iustin Pop
      msg = result.fail_msg
5261 313bcead Iustin Pop
      if msg:
5262 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5263 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5264 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
5265 313bcead Iustin Pop
5266 313bcead Iustin Pop
5267 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
5268 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
5269 80cb875c Michael Hanselmann

5270 80cb875c Michael Hanselmann
  """
5271 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
5272 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
5273 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
5274 80cb875c Michael Hanselmann
  REQ_BGL = False
5275 80cb875c Michael Hanselmann
5276 80cb875c Michael Hanselmann
  def ExpandNames(self):
5277 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5278 80cb875c Michael Hanselmann
5279 80cb875c Michael Hanselmann
    self.needed_locks = {
5280 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
5281 80cb875c Michael Hanselmann
      }
5282 80cb875c Michael Hanselmann
5283 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5284 80cb875c Michael Hanselmann
5285 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5286 80cb875c Michael Hanselmann
    names = []
5287 80cb875c Michael Hanselmann
    tasklets = []
5288 80cb875c Michael Hanselmann
5289 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5290 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5291 80cb875c Michael Hanselmann
      names.append(inst.name)
5292 80cb875c Michael Hanselmann
5293 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5294 80cb875c Michael Hanselmann
5295 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5296 80cb875c Michael Hanselmann
5297 80cb875c Michael Hanselmann
    # Declare instance locks
5298 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5299 80cb875c Michael Hanselmann
5300 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5301 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5302 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5303 80cb875c Michael Hanselmann
5304 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5305 80cb875c Michael Hanselmann
    """Build hooks env.
5306 80cb875c Michael Hanselmann

5307 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5308 80cb875c Michael Hanselmann

5309 80cb875c Michael Hanselmann
    """
5310 80cb875c Michael Hanselmann
    env = {
5311 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5312 80cb875c Michael Hanselmann
      }
5313 80cb875c Michael Hanselmann
5314 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5315 80cb875c Michael Hanselmann
5316 80cb875c Michael Hanselmann
    return (env, nl, nl)
5317 80cb875c Michael Hanselmann
5318 80cb875c Michael Hanselmann
5319 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5320 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5321 3e06e001 Michael Hanselmann
    """Initializes this class.
5322 3e06e001 Michael Hanselmann

5323 3e06e001 Michael Hanselmann
    """
5324 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5325 464243a7 Michael Hanselmann
5326 3e06e001 Michael Hanselmann
    # Parameters
5327 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5328 3e06e001 Michael Hanselmann
    self.live = live
5329 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5330 3e06e001 Michael Hanselmann
5331 53c776b5 Iustin Pop
  def CheckPrereq(self):
5332 53c776b5 Iustin Pop
    """Check prerequisites.
5333 53c776b5 Iustin Pop

5334 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5335 53c776b5 Iustin Pop

5336 53c776b5 Iustin Pop
    """
5337 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5338 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5339 cf26a87a Iustin Pop
    assert instance is not None
5340 53c776b5 Iustin Pop
5341 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5342 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5343 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5344 53c776b5 Iustin Pop
5345 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5346 53c776b5 Iustin Pop
    if not secondary_nodes:
5347 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5348 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5349 53c776b5 Iustin Pop
5350 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5351 53c776b5 Iustin Pop
5352 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5353 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5354 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5355 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5356 53c776b5 Iustin Pop
                         instance.hypervisor)
5357 53c776b5 Iustin Pop
5358 53c776b5 Iustin Pop
    # check bridge existance
5359 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5360 53c776b5 Iustin Pop
5361 3e06e001 Michael Hanselmann
    if not self.cleanup:
5362 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5363 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5364 53c776b5 Iustin Pop
                                                 instance)
5365 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5366 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5367 53c776b5 Iustin Pop
5368 53c776b5 Iustin Pop
    self.instance = instance
5369 53c776b5 Iustin Pop
5370 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5371 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5372 53c776b5 Iustin Pop

5373 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5374 53c776b5 Iustin Pop

5375 53c776b5 Iustin Pop
    """
5376 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5377 53c776b5 Iustin Pop
    all_done = False
5378 53c776b5 Iustin Pop
    while not all_done:
5379 53c776b5 Iustin Pop
      all_done = True
5380 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5381 53c776b5 Iustin Pop
                                            self.nodes_ip,
5382 53c776b5 Iustin Pop
                                            self.instance.disks)
5383 53c776b5 Iustin Pop
      min_percent = 100
5384 53c776b5 Iustin Pop
      for node, nres in result.items():
5385 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5386 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5387 53c776b5 Iustin Pop
        all_done = all_done and node_done
5388 53c776b5 Iustin Pop
        if node_percent is not None:
5389 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5390 53c776b5 Iustin Pop
      if not all_done:
5391 53c776b5 Iustin Pop
        if min_percent < 100:
5392 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5393 53c776b5 Iustin Pop
        time.sleep(2)
5394 53c776b5 Iustin Pop
5395 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5396 53c776b5 Iustin Pop
    """Demote a node to secondary.
5397 53c776b5 Iustin Pop

5398 53c776b5 Iustin Pop
    """
5399 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5400 53c776b5 Iustin Pop
5401 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5402 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5403 53c776b5 Iustin Pop
5404 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5405 53c776b5 Iustin Pop
                                          self.instance.disks)
5406 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5407 53c776b5 Iustin Pop
5408 53c776b5 Iustin Pop
  def _GoStandalone(self):
5409 53c776b5 Iustin Pop
    """Disconnect from the network.
5410 53c776b5 Iustin Pop

5411 53c776b5 Iustin Pop
    """
5412 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5413 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5414 53c776b5 Iustin Pop
                                               self.instance.disks)
5415 53c776b5 Iustin Pop
    for node, nres in result.items():
5416 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5417 53c776b5 Iustin Pop
5418 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5419 53c776b5 Iustin Pop
    """Reconnect to the network.
5420 53c776b5 Iustin Pop

5421 53c776b5 Iustin Pop
    """
5422 53c776b5 Iustin Pop
    if multimaster:
5423 53c776b5 Iustin Pop
      msg = "dual-master"
5424 53c776b5 Iustin Pop
    else:
5425 53c776b5 Iustin Pop
      msg = "single-master"
5426 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5427 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5428 53c776b5 Iustin Pop
                                           self.instance.disks,
5429 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5430 53c776b5 Iustin Pop
    for node, nres in result.items():
5431 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5432 53c776b5 Iustin Pop
5433 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5434 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5435 53c776b5 Iustin Pop

5436 53c776b5 Iustin Pop
    The cleanup is done by:
5437 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5438 53c776b5 Iustin Pop
        (and update the config if needed)
5439 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5440 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5441 53c776b5 Iustin Pop
      - disconnect from the network
5442 53c776b5 Iustin Pop
      - change disks into single-master mode
5443 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5444 53c776b5 Iustin Pop

5445 53c776b5 Iustin Pop
    """
5446 53c776b5 Iustin Pop
    instance = self.instance
5447 53c776b5 Iustin Pop
    target_node = self.target_node
5448 53c776b5 Iustin Pop
    source_node = self.source_node
5449 53c776b5 Iustin Pop
5450 53c776b5 Iustin Pop
    # check running on only one node
5451 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5452 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5453 53c776b5 Iustin Pop
                     " a bad state)")
5454 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5455 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5456 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5457 53c776b5 Iustin Pop
5458 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5459 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5460 53c776b5 Iustin Pop
5461 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5462 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5463 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5464 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5465 53c776b5 Iustin Pop
                               " and restart this operation.")
5466 53c776b5 Iustin Pop
5467 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5468 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5469 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5470 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5471 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5472 53c776b5 Iustin Pop
5473 53c776b5 Iustin Pop
    if runningon_target:
5474 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5475 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5476 53c776b5 Iustin Pop
                       " updating config" % target_node)
5477 53c776b5 Iustin Pop
      instance.primary_node = target_node
5478 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5479 53c776b5 Iustin Pop
      demoted_node = source_node
5480 53c776b5 Iustin Pop
    else:
5481 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5482 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5483 53c776b5 Iustin Pop
      demoted_node = target_node
5484 53c776b5 Iustin Pop
5485 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5486 53c776b5 Iustin Pop
    try:
5487 53c776b5 Iustin Pop
      self._WaitUntilSync()
5488 53c776b5 Iustin Pop
    except errors.OpExecError:
5489 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5490 53c776b5 Iustin Pop
      # won't be able to sync
5491 53c776b5 Iustin Pop
      pass
5492 53c776b5 Iustin Pop
    self._GoStandalone()
5493 53c776b5 Iustin Pop
    self._GoReconnect(False)
5494 53c776b5 Iustin Pop
    self._WaitUntilSync()
5495 53c776b5 Iustin Pop
5496 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5497 53c776b5 Iustin Pop
5498 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5499 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5500 6906a9d8 Guido Trotter

5501 6906a9d8 Guido Trotter
    """
5502 6906a9d8 Guido Trotter
    target_node = self.target_node
5503 6906a9d8 Guido Trotter
    try:
5504 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5505 6906a9d8 Guido Trotter
      self._GoStandalone()
5506 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5507 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5508 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5509 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5510 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5511 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5512 3e06e001 Michael Hanselmann
                         str(err))
5513 6906a9d8 Guido Trotter
5514 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5515 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5516 6906a9d8 Guido Trotter

5517 6906a9d8 Guido Trotter
    """
5518 6906a9d8 Guido Trotter
    instance = self.instance
5519 6906a9d8 Guido Trotter
    target_node = self.target_node
5520 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5521 6906a9d8 Guido Trotter
5522 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5523 6906a9d8 Guido Trotter
                                                    instance,
5524 6906a9d8 Guido Trotter
                                                    migration_info,
5525 6906a9d8 Guido Trotter
                                                    False)
5526 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5527 6906a9d8 Guido Trotter
    if abort_msg:
5528 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5529 099c52ad Iustin Pop
                    target_node, abort_msg)
5530 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5531 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5532 6906a9d8 Guido Trotter
5533 53c776b5 Iustin Pop
  def _ExecMigration(self):
5534 53c776b5 Iustin Pop
    """Migrate an instance.
5535 53c776b5 Iustin Pop

5536 53c776b5 Iustin Pop
    The migrate is done by:
5537 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5538 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5539 53c776b5 Iustin Pop
      - migrate the instance
5540 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5541 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5542 53c776b5 Iustin Pop
      - change disks into single-master mode
5543 53c776b5 Iustin Pop

5544 53c776b5 Iustin Pop
    """
5545 53c776b5 Iustin Pop
    instance = self.instance
5546 53c776b5 Iustin Pop
    target_node = self.target_node
5547 53c776b5 Iustin Pop
    source_node = self.source_node
5548 53c776b5 Iustin Pop
5549 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5550 53c776b5 Iustin Pop
    for dev in instance.disks:
5551 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5552 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5553 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5554 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5555 53c776b5 Iustin Pop
5556 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5557 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5558 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5559 6906a9d8 Guido Trotter
    if msg:
5560 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5561 0959c824 Iustin Pop
                 (source_node, msg))
5562 6906a9d8 Guido Trotter
      logging.error(log_err)
5563 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5564 6906a9d8 Guido Trotter
5565 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5566 6906a9d8 Guido Trotter
5567 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5568 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5569 53c776b5 Iustin Pop
    self._GoStandalone()
5570 53c776b5 Iustin Pop
    self._GoReconnect(True)
5571 53c776b5 Iustin Pop
    self._WaitUntilSync()
5572 53c776b5 Iustin Pop
5573 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5574 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5575 6906a9d8 Guido Trotter
                                           instance,
5576 6906a9d8 Guido Trotter
                                           migration_info,
5577 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5578 6906a9d8 Guido Trotter
5579 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5580 6906a9d8 Guido Trotter
    if msg:
5581 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5582 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5583 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5584 6906a9d8 Guido Trotter
      self._AbortMigration()
5585 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5586 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5587 6906a9d8 Guido Trotter
                               (instance.name, msg))
5588 6906a9d8 Guido Trotter
5589 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5590 53c776b5 Iustin Pop
    time.sleep(10)
5591 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5592 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5593 3e06e001 Michael Hanselmann
                                            self.live)
5594 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5595 53c776b5 Iustin Pop
    if msg:
5596 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5597 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5598 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5599 6906a9d8 Guido Trotter
      self._AbortMigration()
5600 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5601 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5602 53c776b5 Iustin Pop
                               (instance.name, msg))
5603 53c776b5 Iustin Pop
    time.sleep(10)
5604 53c776b5 Iustin Pop
5605 53c776b5 Iustin Pop
    instance.primary_node = target_node
5606 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5607 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5608 53c776b5 Iustin Pop
5609 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5610 6906a9d8 Guido Trotter
                                              instance,
5611 6906a9d8 Guido Trotter
                                              migration_info,
5612 6906a9d8 Guido Trotter
                                              True)
5613 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5614 6906a9d8 Guido Trotter
    if msg:
5615 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5616 099c52ad Iustin Pop
                    " %s", msg)
5617 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5618 6906a9d8 Guido Trotter
                               msg)
5619 6906a9d8 Guido Trotter
5620 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5621 53c776b5 Iustin Pop
    self._WaitUntilSync()
5622 53c776b5 Iustin Pop
    self._GoStandalone()
5623 53c776b5 Iustin Pop
    self._GoReconnect(False)
5624 53c776b5 Iustin Pop
    self._WaitUntilSync()
5625 53c776b5 Iustin Pop
5626 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5627 53c776b5 Iustin Pop
5628 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5629 53c776b5 Iustin Pop
    """Perform the migration.
5630 53c776b5 Iustin Pop

5631 53c776b5 Iustin Pop
    """
5632 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5633 80cb875c Michael Hanselmann
5634 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5635 53c776b5 Iustin Pop
5636 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5637 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5638 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5639 53c776b5 Iustin Pop
    self.nodes_ip = {
5640 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5641 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5642 53c776b5 Iustin Pop
      }
5643 3e06e001 Michael Hanselmann
5644 3e06e001 Michael Hanselmann
    if self.cleanup:
5645 53c776b5 Iustin Pop
      return self._ExecCleanup()
5646 53c776b5 Iustin Pop
    else:
5647 53c776b5 Iustin Pop
      return self._ExecMigration()
5648 53c776b5 Iustin Pop
5649 53c776b5 Iustin Pop
5650 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5651 428958aa Iustin Pop
                    info, force_open):
5652 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5653 a8083063 Iustin Pop

5654 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5655 a8083063 Iustin Pop
  all its children.
5656 a8083063 Iustin Pop

5657 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5658 a8083063 Iustin Pop

5659 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5660 428958aa Iustin Pop
  @param node: the node on which to create the device
5661 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5662 428958aa Iustin Pop
  @param instance: the instance which owns the device
5663 428958aa Iustin Pop
  @type device: L{objects.Disk}
5664 428958aa Iustin Pop
  @param device: the device to create
5665 428958aa Iustin Pop
  @type force_create: boolean
5666 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5667 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5668 428958aa Iustin Pop
      CreateOnSecondary() attribute
5669 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5670 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5671 428958aa Iustin Pop
  @type force_open: boolean
5672 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5673 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5674 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5675 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5676 428958aa Iustin Pop

5677 a8083063 Iustin Pop
  """
5678 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5679 428958aa Iustin Pop
    force_create = True
5680 796cab27 Iustin Pop
5681 a8083063 Iustin Pop
  if device.children:
5682 a8083063 Iustin Pop
    for child in device.children:
5683 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5684 428958aa Iustin Pop
                      info, force_open)
5685 a8083063 Iustin Pop
5686 428958aa Iustin Pop
  if not force_create:
5687 796cab27 Iustin Pop
    return
5688 796cab27 Iustin Pop
5689 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5690 de12473a Iustin Pop
5691 de12473a Iustin Pop
5692 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5693 de12473a Iustin Pop
  """Create a single block device on a given node.
5694 de12473a Iustin Pop

5695 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5696 de12473a Iustin Pop
  created in advance.
5697 de12473a Iustin Pop

5698 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5699 de12473a Iustin Pop
  @param node: the node on which to create the device
5700 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5701 de12473a Iustin Pop
  @param instance: the instance which owns the device
5702 de12473a Iustin Pop
  @type device: L{objects.Disk}
5703 de12473a Iustin Pop
  @param device: the device to create
5704 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5705 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5706 de12473a Iustin Pop
  @type force_open: boolean
5707 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5708 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5709 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5710 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5711 de12473a Iustin Pop

5712 de12473a Iustin Pop
  """
5713 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5714 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5715 428958aa Iustin Pop
                                       instance.name, force_open, info)
5716 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5717 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5718 a8083063 Iustin Pop
  if device.physical_id is None:
5719 0959c824 Iustin Pop
    device.physical_id = result.payload
5720 a8083063 Iustin Pop
5721 a8083063 Iustin Pop
5722 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5723 923b1523 Iustin Pop
  """Generate a suitable LV name.
5724 923b1523 Iustin Pop

5725 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5726 923b1523 Iustin Pop

5727 923b1523 Iustin Pop
  """
5728 923b1523 Iustin Pop
  results = []
5729 923b1523 Iustin Pop
  for val in exts:
5730 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5731 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5732 923b1523 Iustin Pop
  return results
5733 923b1523 Iustin Pop
5734 923b1523 Iustin Pop
5735 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5736 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5737 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5738 a1f445d3 Iustin Pop

5739 a1f445d3 Iustin Pop
  """
5740 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5741 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5742 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5743 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5744 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5745 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5746 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5747 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5748 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5749 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5750 f9518d38 Iustin Pop
                                      shared_secret),
5751 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5752 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5753 a1f445d3 Iustin Pop
  return drbd_dev
5754 a1f445d3 Iustin Pop
5755 7c0d6283 Michael Hanselmann
5756 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5757 a8083063 Iustin Pop
                          instance_name, primary_node,
5758 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5759 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5760 e2a65344 Iustin Pop
                          base_index):
5761 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5762 a8083063 Iustin Pop

5763 a8083063 Iustin Pop
  """
5764 a8083063 Iustin Pop
  #TODO: compute space requirements
5765 a8083063 Iustin Pop
5766 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5767 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5768 08db7c5c Iustin Pop
  disks = []
5769 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5770 08db7c5c Iustin Pop
    pass
5771 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5772 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5773 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5774 923b1523 Iustin Pop
5775 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5776 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5777 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5778 e2a65344 Iustin Pop
      disk_index = idx + base_index
5779 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5780 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5781 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5782 6ec66eae Iustin Pop
                              mode=disk["mode"])
5783 08db7c5c Iustin Pop
      disks.append(disk_dev)
5784 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5785 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5786 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5787 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5788 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5789 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5790 08db7c5c Iustin Pop
5791 e6c1ff2f Iustin Pop
    names = []
5792 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5793 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5794 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5795 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5796 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5797 112050d9 Iustin Pop
      disk_index = idx + base_index
5798 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5799 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5800 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5801 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5802 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5803 08db7c5c Iustin Pop
      disks.append(disk_dev)
5804 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5805 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5806 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5807 0f1a06e3 Manuel Franceschini
5808 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5809 112050d9 Iustin Pop
      disk_index = idx + base_index
5810 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5811 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5812 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5813 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5814 43e99cff Guido Trotter
                                                         disk_index)),
5815 6ec66eae Iustin Pop
                              mode=disk["mode"])
5816 08db7c5c Iustin Pop
      disks.append(disk_dev)
5817 a8083063 Iustin Pop
  else:
5818 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5819 a8083063 Iustin Pop
  return disks
5820 a8083063 Iustin Pop
5821 a8083063 Iustin Pop
5822 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5823 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5824 3ecf6786 Iustin Pop

5825 3ecf6786 Iustin Pop
  """
5826 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5827 a0c3fea1 Michael Hanselmann
5828 a0c3fea1 Michael Hanselmann
5829 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5830 a8083063 Iustin Pop
  """Create all disks for an instance.
5831 a8083063 Iustin Pop

5832 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5833 a8083063 Iustin Pop

5834 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5835 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5836 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5837 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5838 bd315bfa Iustin Pop
  @type to_skip: list
5839 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5840 621b7678 Iustin Pop
  @type target_node: string
5841 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5842 e4376078 Iustin Pop
  @rtype: boolean
5843 e4376078 Iustin Pop
  @return: the success of the creation
5844 a8083063 Iustin Pop

5845 a8083063 Iustin Pop
  """
5846 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5847 621b7678 Iustin Pop
  if target_node is None:
5848 621b7678 Iustin Pop
    pnode = instance.primary_node
5849 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5850 621b7678 Iustin Pop
  else:
5851 621b7678 Iustin Pop
    pnode = target_node
5852 621b7678 Iustin Pop
    all_nodes = [pnode]
5853 a0c3fea1 Michael Hanselmann
5854 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5855 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5856 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5857 0f1a06e3 Manuel Franceschini
5858 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5859 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5860 0f1a06e3 Manuel Franceschini
5861 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5862 24991749 Iustin Pop
  # LUSetInstanceParams
5863 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5864 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5865 bd315bfa Iustin Pop
      continue
5866 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5867 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5868 a8083063 Iustin Pop
    #HARDCODE
5869 621b7678 Iustin Pop
    for node in all_nodes:
5870 428958aa Iustin Pop
      f_create = node == pnode
5871 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5872 a8083063 Iustin Pop
5873 a8083063 Iustin Pop
5874 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5875 a8083063 Iustin Pop
  """Remove all disks for an instance.
5876 a8083063 Iustin Pop

5877 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5878 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5879 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5880 a8083063 Iustin Pop
  with `_CreateDisks()`).
5881 a8083063 Iustin Pop

5882 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5883 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5884 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5885 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5886 621b7678 Iustin Pop
  @type target_node: string
5887 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5888 e4376078 Iustin Pop
  @rtype: boolean
5889 e4376078 Iustin Pop
  @return: the success of the removal
5890 a8083063 Iustin Pop

5891 a8083063 Iustin Pop
  """
5892 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5893 a8083063 Iustin Pop
5894 e1bc0878 Iustin Pop
  all_result = True
5895 a8083063 Iustin Pop
  for device in instance.disks:
5896 621b7678 Iustin Pop
    if target_node:
5897 621b7678 Iustin Pop
      edata = [(target_node, device)]
5898 621b7678 Iustin Pop
    else:
5899 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5900 621b7678 Iustin Pop
    for node, disk in edata:
5901 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5902 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5903 e1bc0878 Iustin Pop
      if msg:
5904 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5905 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5906 e1bc0878 Iustin Pop
        all_result = False
5907 0f1a06e3 Manuel Franceschini
5908 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5909 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5910 dfc2a24c Guido Trotter
    if target_node:
5911 dfc2a24c Guido Trotter
      tgt = target_node
5912 621b7678 Iustin Pop
    else:
5913 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5914 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5915 621b7678 Iustin Pop
    if result.fail_msg:
5916 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5917 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5918 e1bc0878 Iustin Pop
      all_result = False
5919 0f1a06e3 Manuel Franceschini
5920 e1bc0878 Iustin Pop
  return all_result
5921 a8083063 Iustin Pop
5922 a8083063 Iustin Pop
5923 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5924 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5925 e2fe6369 Iustin Pop

5926 e2fe6369 Iustin Pop
  """
5927 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5928 e2fe6369 Iustin Pop
  req_size_dict = {
5929 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5930 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5931 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5932 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5933 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5934 e2fe6369 Iustin Pop
  }
5935 e2fe6369 Iustin Pop
5936 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5937 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5938 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5939 e2fe6369 Iustin Pop
5940 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5941 e2fe6369 Iustin Pop
5942 e2fe6369 Iustin Pop
5943 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5944 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5945 74409b12 Iustin Pop

5946 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5947 74409b12 Iustin Pop
  used in both instance create and instance modify.
5948 74409b12 Iustin Pop

5949 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5950 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5951 74409b12 Iustin Pop
  @type nodenames: list
5952 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5953 74409b12 Iustin Pop
  @type hvname: string
5954 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5955 74409b12 Iustin Pop
  @type hvparams: dict
5956 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5957 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5958 74409b12 Iustin Pop

5959 74409b12 Iustin Pop
  """
5960 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5961 74409b12 Iustin Pop
                                                  hvname,
5962 74409b12 Iustin Pop
                                                  hvparams)
5963 74409b12 Iustin Pop
  for node in nodenames:
5964 781de953 Iustin Pop
    info = hvinfo[node]
5965 68c6f21c Iustin Pop
    if info.offline:
5966 68c6f21c Iustin Pop
      continue
5967 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5968 74409b12 Iustin Pop
5969 74409b12 Iustin Pop
5970 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5971 a8083063 Iustin Pop
  """Create an instance.
5972 a8083063 Iustin Pop

5973 a8083063 Iustin Pop
  """
5974 a8083063 Iustin Pop
  HPATH = "instance-add"
5975 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5976 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5977 08db7c5c Iustin Pop
              "mode", "start",
5978 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5979 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5980 7baf741d Guido Trotter
  REQ_BGL = False
5981 7baf741d Guido Trotter
5982 5f23e043 Iustin Pop
  def CheckArguments(self):
5983 5f23e043 Iustin Pop
    """Check arguments.
5984 5f23e043 Iustin Pop

5985 5f23e043 Iustin Pop
    """
5986 df4272e5 Iustin Pop
    # set optional parameters to none if they don't exist
5987 df4272e5 Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5988 df4272e5 Iustin Pop
      if not hasattr(self.op, attr):
5989 df4272e5 Iustin Pop
        setattr(self.op, attr, None)
5990 df4272e5 Iustin Pop
5991 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
5992 5f23e043 Iustin Pop
    # for tools
5993 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
5994 5f23e043 Iustin Pop
      self.op.name_check = True
5995 25a8792c Iustin Pop
    if not hasattr(self.op, "no_install"):
5996 25a8792c Iustin Pop
      self.op.no_install = False
5997 25a8792c Iustin Pop
    if self.op.no_install and self.op.start:
5998 25a8792c Iustin Pop
      self.LogInfo("No-installation mode selected, disabling startup")
5999 25a8792c Iustin Pop
      self.op.start = False
6000 44caf5a8 Iustin Pop
    # validate/normalize the instance name
6001 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
6002 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
6003 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
6004 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6005 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
6006 cb7c0198 Iustin Pop
    if (self.op.disk_template == constants.DT_FILE and
6007 cb7c0198 Iustin Pop
        not constants.ENABLE_FILE_STORAGE):
6008 cb7c0198 Iustin Pop
      raise errors.OpPrereqError("File storage disabled at configure time",
6009 cb7c0198 Iustin Pop
                                 errors.ECODE_INVAL)
6010 c3589cf8 Iustin Pop
    # check disk information: either all adopt, or no adopt
6011 c3589cf8 Iustin Pop
    has_adopt = has_no_adopt = False
6012 c3589cf8 Iustin Pop
    for disk in self.op.disks:
6013 c3589cf8 Iustin Pop
      if "adopt" in disk:
6014 c3589cf8 Iustin Pop
        has_adopt = True
6015 c3589cf8 Iustin Pop
      else:
6016 c3589cf8 Iustin Pop
        has_no_adopt = True
6017 c3589cf8 Iustin Pop
    if has_adopt and has_no_adopt:
6018 c3589cf8 Iustin Pop
      raise errors.OpPrereqError("Either all disks have are adoped or none is",
6019 c3589cf8 Iustin Pop
                                 errors.ECODE_INVAL)
6020 c3589cf8 Iustin Pop
    if has_adopt:
6021 c3589cf8 Iustin Pop
      if self.op.disk_template != constants.DT_PLAIN:
6022 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption is only supported for the"
6023 c3589cf8 Iustin Pop
                                   " 'plain' disk template",
6024 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6025 c3589cf8 Iustin Pop
      if self.op.iallocator is not None:
6026 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6027 c3589cf8 Iustin Pop
                                   " iallocator script", errors.ECODE_INVAL)
6028 c3589cf8 Iustin Pop
      if self.op.mode == constants.INSTANCE_IMPORT:
6029 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed for"
6030 c3589cf8 Iustin Pop
                                   " instance import", errors.ECODE_INVAL)
6031 c3589cf8 Iustin Pop
6032 c3589cf8 Iustin Pop
    self.adopt_disks = has_adopt
6033 5f23e043 Iustin Pop
6034 7baf741d Guido Trotter
  def ExpandNames(self):
6035 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
6036 7baf741d Guido Trotter

6037 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
6038 7baf741d Guido Trotter

6039 7baf741d Guido Trotter
    """
6040 7baf741d Guido Trotter
    self.needed_locks = {}
6041 7baf741d Guido Trotter
6042 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
6043 4b2f38dd Iustin Pop
6044 7baf741d Guido Trotter
    # verify creation mode
6045 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
6046 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
6047 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
6048 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
6049 4b2f38dd Iustin Pop
6050 7baf741d Guido Trotter
    # disk template and mirror node verification
6051 5d55819e Iustin Pop
    _CheckDiskTemplate(self.op.disk_template)
6052 7baf741d Guido Trotter
6053 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
6054 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
6055 4b2f38dd Iustin Pop
6056 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6057 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
6058 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
6059 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6060 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
6061 5c983ee5 Iustin Pop
                                  ",".join(enabled_hvs)),
6062 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
6063 4b2f38dd Iustin Pop
6064 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
6065 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6066 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
6067 8705eb96 Iustin Pop
                                  self.op.hvparams)
6068 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6069 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
6070 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
6071 7736a5f2 Iustin Pop
    # check that we don't specify global parameters on an instance
6072 7736a5f2 Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
6073 6785674e Iustin Pop
6074 338e51e8 Iustin Pop
    # fill and remember the beparams dict
6075 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6076 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
6077 338e51e8 Iustin Pop
                                    self.op.beparams)
6078 338e51e8 Iustin Pop
6079 7baf741d Guido Trotter
    #### instance parameters check
6080 7baf741d Guido Trotter
6081 7baf741d Guido Trotter
    # instance name verification
6082 5f23e043 Iustin Pop
    if self.op.name_check:
6083 5f23e043 Iustin Pop
      hostname1 = utils.GetHostInfo(self.op.instance_name)
6084 5f23e043 Iustin Pop
      self.op.instance_name = instance_name = hostname1.name
6085 5f23e043 Iustin Pop
      # used in CheckPrereq for ip ping check
6086 5f23e043 Iustin Pop
      self.check_ip = hostname1.ip
6087 5f23e043 Iustin Pop
    else:
6088 5f23e043 Iustin Pop
      instance_name = self.op.instance_name
6089 5f23e043 Iustin Pop
      self.check_ip = None
6090 7baf741d Guido Trotter
6091 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
6092 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
6093 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
6094 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6095 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
6096 7baf741d Guido Trotter
6097 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6098 7baf741d Guido Trotter
6099 08db7c5c Iustin Pop
    # NIC buildup
6100 08db7c5c Iustin Pop
    self.nics = []
6101 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
6102 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
6103 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
6104 9dce4771 Guido Trotter
      if nic_mode is None:
6105 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6106 9dce4771 Guido Trotter
6107 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
6108 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6109 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
6110 9dce4771 Guido Trotter
      else:
6111 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
6112 9dce4771 Guido Trotter
6113 08db7c5c Iustin Pop
      # ip validity checks
6114 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
6115 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
6116 08db7c5c Iustin Pop
        nic_ip = None
6117 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
6118 5f23e043 Iustin Pop
        if not self.op.name_check:
6119 5f23e043 Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
6120 5f23e043 Iustin Pop
                                     " have been skipped. Aborting.",
6121 5f23e043 Iustin Pop
                                     errors.ECODE_INVAL)
6122 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
6123 08db7c5c Iustin Pop
      else:
6124 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
6125 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6126 5c983ee5 Iustin Pop
                                     " like a valid IP" % ip,
6127 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6128 08db7c5c Iustin Pop
        nic_ip = ip
6129 08db7c5c Iustin Pop
6130 b8716596 Michael Hanselmann
      # TODO: check the ip address for uniqueness
6131 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6132 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6133 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6134 9dce4771 Guido Trotter
6135 08db7c5c Iustin Pop
      # MAC address verification
6136 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
6137 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6138 82187135 Renรฉ Nussbaumer
        mac = utils.NormalizeAndValidateMac(mac)
6139 82187135 Renรฉ Nussbaumer
6140 82187135 Renรฉ Nussbaumer
        try:
6141 82187135 Renรฉ Nussbaumer
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6142 82187135 Renรฉ Nussbaumer
        except errors.ReservationError:
6143 82187135 Renรฉ Nussbaumer
          raise errors.OpPrereqError("MAC address %s already in use"
6144 82187135 Renรฉ Nussbaumer
                                     " in cluster" % mac,
6145 82187135 Renรฉ Nussbaumer
                                     errors.ECODE_NOTUNIQUE)
6146 87e43988 Iustin Pop
6147 08db7c5c Iustin Pop
      # bridge verification
6148 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
6149 9dce4771 Guido Trotter
      link = nic.get("link", None)
6150 9dce4771 Guido Trotter
      if bridge and link:
6151 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6152 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
6153 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
6154 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
6155 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6156 9dce4771 Guido Trotter
      elif bridge:
6157 9dce4771 Guido Trotter
        link = bridge
6158 9dce4771 Guido Trotter
6159 9dce4771 Guido Trotter
      nicparams = {}
6160 9dce4771 Guido Trotter
      if nic_mode_req:
6161 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
6162 9dce4771 Guido Trotter
      if link:
6163 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
6164 9dce4771 Guido Trotter
6165 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
6166 9dce4771 Guido Trotter
                                      nicparams)
6167 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
6168 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
6169 08db7c5c Iustin Pop
6170 08db7c5c Iustin Pop
    # disk checks/pre-build
6171 08db7c5c Iustin Pop
    self.disks = []
6172 08db7c5c Iustin Pop
    for disk in self.op.disks:
6173 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
6174 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
6175 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
6176 5c983ee5 Iustin Pop
                                   mode, errors.ECODE_INVAL)
6177 08db7c5c Iustin Pop
      size = disk.get("size", None)
6178 08db7c5c Iustin Pop
      if size is None:
6179 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
6180 08db7c5c Iustin Pop
      try:
6181 08db7c5c Iustin Pop
        size = int(size)
6182 691744c4 Iustin Pop
      except (TypeError, ValueError):
6183 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
6184 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6185 c3589cf8 Iustin Pop
      new_disk = {"size": size, "mode": mode}
6186 c3589cf8 Iustin Pop
      if "adopt" in disk:
6187 c3589cf8 Iustin Pop
        new_disk["adopt"] = disk["adopt"]
6188 c3589cf8 Iustin Pop
      self.disks.append(new_disk)
6189 08db7c5c Iustin Pop
6190 7baf741d Guido Trotter
    # file storage checks
6191 7baf741d Guido Trotter
    if (self.op.file_driver and
6192 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
6193 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6194 5c983ee5 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
6195 7baf741d Guido Trotter
6196 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6197 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
6198 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6199 7baf741d Guido Trotter
6200 7baf741d Guido Trotter
    ### Node/iallocator related checks
6201 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
6202 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
6203 5c983ee5 Iustin Pop
                                 " node must be given",
6204 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
6205 7baf741d Guido Trotter
6206 7baf741d Guido Trotter
    if self.op.iallocator:
6207 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6208 7baf741d Guido Trotter
    else:
6209 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6210 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
6211 7baf741d Guido Trotter
      if self.op.snode is not None:
6212 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6213 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
6214 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6215 7baf741d Guido Trotter
6216 7baf741d Guido Trotter
    # in case of import lock the source node too
6217 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6218 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
6219 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
6220 7baf741d Guido Trotter
6221 b9322a9f Guido Trotter
      if src_path is None:
6222 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
6223 b9322a9f Guido Trotter
6224 b9322a9f Guido Trotter
      if src_node is None:
6225 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6226 b9322a9f Guido Trotter
        self.op.src_node = None
6227 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
6228 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
6229 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
6230 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6231 b9322a9f Guido Trotter
      else:
6232 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6233 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6234 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6235 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
6236 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
6237 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6238 7baf741d Guido Trotter
6239 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
6240 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
6241 f2c05717 Guido Trotter
      # works again!
6242 f2c05717 Guido Trotter
      self.op.force_variant = True
6243 f2c05717 Guido Trotter
6244 25a8792c Iustin Pop
      if self.op.no_install:
6245 25a8792c Iustin Pop
        self.LogInfo("No-installation mode has no effect during import")
6246 25a8792c Iustin Pop
6247 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
6248 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
6249 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
6250 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6251 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
6252 a8083063 Iustin Pop
6253 538475ca Iustin Pop
  def _RunAllocator(self):
6254 538475ca Iustin Pop
    """Run the allocator based on input opcode.
6255 538475ca Iustin Pop

6256 538475ca Iustin Pop
    """
6257 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
6258 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
6259 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6260 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
6261 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
6262 d1c2dd75 Iustin Pop
                     tags=[],
6263 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
6264 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
6265 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
6266 08db7c5c Iustin Pop
                     disks=self.disks,
6267 d1c2dd75 Iustin Pop
                     nics=nics,
6268 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
6269 29859cb7 Iustin Pop
                     )
6270 d1c2dd75 Iustin Pop
6271 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
6272 d1c2dd75 Iustin Pop
6273 d1c2dd75 Iustin Pop
    if not ial.success:
6274 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
6275 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
6276 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
6277 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6278 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6279 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6280 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
6281 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
6282 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
6283 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
6284 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6285 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
6286 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
6287 27579978 Iustin Pop
    if ial.required_nodes == 2:
6288 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
6289 538475ca Iustin Pop
6290 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6291 a8083063 Iustin Pop
    """Build hooks env.
6292 a8083063 Iustin Pop

6293 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
6294 a8083063 Iustin Pop

6295 a8083063 Iustin Pop
    """
6296 a8083063 Iustin Pop
    env = {
6297 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
6298 a8083063 Iustin Pop
      }
6299 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6300 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
6301 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
6302 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
6303 396e1b78 Michael Hanselmann
6304 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
6305 2c2690c9 Iustin Pop
      name=self.op.instance_name,
6306 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
6307 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
6308 4978db17 Iustin Pop
      status=self.op.start,
6309 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
6310 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
6311 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
6312 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
6313 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
6314 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
6315 67fc3042 Iustin Pop
      bep=self.be_full,
6316 67fc3042 Iustin Pop
      hvp=self.hv_full,
6317 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6318 396e1b78 Michael Hanselmann
    ))
6319 a8083063 Iustin Pop
6320 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6321 a8083063 Iustin Pop
          self.secondaries)
6322 a8083063 Iustin Pop
    return env, nl, nl
6323 a8083063 Iustin Pop
6324 a8083063 Iustin Pop
6325 a8083063 Iustin Pop
  def CheckPrereq(self):
6326 a8083063 Iustin Pop
    """Check prerequisites.
6327 a8083063 Iustin Pop

6328 a8083063 Iustin Pop
    """
6329 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6330 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6331 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6332 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6333 eedc99de Manuel Franceschini
6334 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6335 7baf741d Guido Trotter
      src_node = self.op.src_node
6336 7baf741d Guido Trotter
      src_path = self.op.src_path
6337 a8083063 Iustin Pop
6338 c0cbdc67 Guido Trotter
      if src_node is None:
6339 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6340 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
6341 c0cbdc67 Guido Trotter
        found = False
6342 c0cbdc67 Guido Trotter
        for node in exp_list:
6343 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
6344 1b7bfbb7 Iustin Pop
            continue
6345 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
6346 c0cbdc67 Guido Trotter
            found = True
6347 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
6348 c4feafe8 Iustin Pop
            self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6349 c4feafe8 Iustin Pop
                                                         src_path)
6350 c0cbdc67 Guido Trotter
            break
6351 c0cbdc67 Guido Trotter
        if not found:
6352 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
6353 5c983ee5 Iustin Pop
                                      src_path, errors.ECODE_INVAL)
6354 c0cbdc67 Guido Trotter
6355 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
6356 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
6357 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
6358 a8083063 Iustin Pop
6359 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6360 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
6361 5c983ee5 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config",
6362 5c983ee5 Iustin Pop
                                     errors.ECODE_ENVIRON)
6363 a8083063 Iustin Pop
6364 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
6365 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
6366 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6367 5c983ee5 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION),
6368 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
6369 a8083063 Iustin Pop
6370 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6371 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6372 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6373 09acf207 Guido Trotter
      if instance_disks < export_disks:
6374 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6375 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6376 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6377 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6378 a8083063 Iustin Pop
6379 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
6380 09acf207 Guido Trotter
      disk_images = []
6381 09acf207 Guido Trotter
      for idx in range(export_disks):
6382 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6383 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6384 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6385 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6386 c4feafe8 Iustin Pop
          image = utils.PathJoin(src_path, export_name)
6387 09acf207 Guido Trotter
          disk_images.append(image)
6388 09acf207 Guido Trotter
        else:
6389 09acf207 Guido Trotter
          disk_images.append(False)
6390 09acf207 Guido Trotter
6391 09acf207 Guido Trotter
      self.src_images = disk_images
6392 901a65c1 Iustin Pop
6393 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6394 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
6395 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
6396 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6397 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6398 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6399 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6400 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6401 bc89efc3 Guido Trotter
6402 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6403 901a65c1 Iustin Pop
6404 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6405 901a65c1 Iustin Pop
    if self.op.ip_check:
6406 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6407 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6408 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6409 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6410 901a65c1 Iustin Pop
6411 295728df Guido Trotter
    #### mac address generation
6412 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6413 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6414 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6415 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6416 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6417 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6418 295728df Guido Trotter
    # creation job will fail.
6419 295728df Guido Trotter
    for nic in self.nics:
6420 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6421 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6422 295728df Guido Trotter
6423 538475ca Iustin Pop
    #### allocator run
6424 538475ca Iustin Pop
6425 538475ca Iustin Pop
    if self.op.iallocator is not None:
6426 538475ca Iustin Pop
      self._RunAllocator()
6427 0f1a06e3 Manuel Franceschini
6428 901a65c1 Iustin Pop
    #### node related checks
6429 901a65c1 Iustin Pop
6430 901a65c1 Iustin Pop
    # check primary node
6431 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6432 7baf741d Guido Trotter
    assert self.pnode is not None, \
6433 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6434 7527a8a4 Iustin Pop
    if pnode.offline:
6435 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6436 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6437 733a2b6a Iustin Pop
    if pnode.drained:
6438 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6439 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6440 7527a8a4 Iustin Pop
6441 901a65c1 Iustin Pop
    self.secondaries = []
6442 901a65c1 Iustin Pop
6443 901a65c1 Iustin Pop
    # mirror node verification
6444 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6445 7baf741d Guido Trotter
      if self.op.snode is None:
6446 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6447 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6448 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6449 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6450 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6451 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6452 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6453 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6454 a8083063 Iustin Pop
6455 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6456 6785674e Iustin Pop
6457 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6458 08db7c5c Iustin Pop
                                self.disks)
6459 ed1ebc60 Guido Trotter
6460 c3589cf8 Iustin Pop
    # Check lv size requirements, if not adopting
6461 c3589cf8 Iustin Pop
    if req_size is not None and not self.adopt_disks:
6462 701384a9 Iustin Pop
      _CheckNodesFreeDisk(self, nodenames, req_size)
6463 ed1ebc60 Guido Trotter
6464 c3589cf8 Iustin Pop
    if self.adopt_disks: # instead, we must check the adoption data
6465 c3589cf8 Iustin Pop
      all_lvs = set([i["adopt"] for i in self.disks])
6466 c3589cf8 Iustin Pop
      if len(all_lvs) != len(self.disks):
6467 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
6468 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6469 c3589cf8 Iustin Pop
      for lv_name in all_lvs:
6470 c3589cf8 Iustin Pop
        try:
6471 c3589cf8 Iustin Pop
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
6472 c3589cf8 Iustin Pop
        except errors.ReservationError:
6473 c3589cf8 Iustin Pop
          raise errors.OpPrereqError("LV named %s used by another instance" %
6474 c3589cf8 Iustin Pop
                                     lv_name, errors.ECODE_NOTUNIQUE)
6475 c3589cf8 Iustin Pop
6476 c3589cf8 Iustin Pop
      node_lvs = self.rpc.call_lv_list([pnode.name],
6477 c3589cf8 Iustin Pop
                                       self.cfg.GetVGName())[pnode.name]
6478 c3589cf8 Iustin Pop
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
6479 c3589cf8 Iustin Pop
      node_lvs = node_lvs.payload
6480 c3589cf8 Iustin Pop
      delta = all_lvs.difference(node_lvs.keys())
6481 c3589cf8 Iustin Pop
      if delta:
6482 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
6483 c3589cf8 Iustin Pop
                                   utils.CommaJoin(delta),
6484 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6485 c3589cf8 Iustin Pop
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
6486 c3589cf8 Iustin Pop
      if online_lvs:
6487 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Online logical volumes found, cannot"
6488 c3589cf8 Iustin Pop
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
6489 c3589cf8 Iustin Pop
                                   errors.ECODE_STATE)
6490 c3589cf8 Iustin Pop
      # update the size of disk based on what is found
6491 c3589cf8 Iustin Pop
      for dsk in self.disks:
6492 c3589cf8 Iustin Pop
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
6493 c3589cf8 Iustin Pop
6494 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6495 6785674e Iustin Pop
6496 231cd901 Iustin Pop
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
6497 a8083063 Iustin Pop
6498 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6499 a8083063 Iustin Pop
6500 49ce1563 Iustin Pop
    # memory check on primary node
6501 49ce1563 Iustin Pop
    if self.op.start:
6502 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6503 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6504 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6505 338e51e8 Iustin Pop
                           self.op.hypervisor)
6506 49ce1563 Iustin Pop
6507 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6508 08896026 Iustin Pop
6509 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6510 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6511 a8083063 Iustin Pop

6512 a8083063 Iustin Pop
    """
6513 a8083063 Iustin Pop
    instance = self.op.instance_name
6514 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6515 a8083063 Iustin Pop
6516 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6517 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6518 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6519 2a6469d5 Alexander Schreiber
    else:
6520 2a6469d5 Alexander Schreiber
      network_port = None
6521 58acb49d Alexander Schreiber
6522 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
6523 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
6524 31a853d2 Iustin Pop
6525 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
6526 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
6527 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
6528 2c313123 Manuel Franceschini
    else:
6529 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
6530 2c313123 Manuel Franceschini
6531 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
6532 c4feafe8 Iustin Pop
    file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6533 c4feafe8 Iustin Pop
                                      string_file_storage_dir, instance)
6534 0f1a06e3 Manuel Franceschini
6535 0f1a06e3 Manuel Franceschini
6536 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6537 a8083063 Iustin Pop
                                  self.op.disk_template,
6538 a8083063 Iustin Pop
                                  instance, pnode_name,
6539 08db7c5c Iustin Pop
                                  self.secondaries,
6540 08db7c5c Iustin Pop
                                  self.disks,
6541 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6542 e2a65344 Iustin Pop
                                  self.op.file_driver,
6543 e2a65344 Iustin Pop
                                  0)
6544 a8083063 Iustin Pop
6545 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6546 a8083063 Iustin Pop
                            primary_node=pnode_name,
6547 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6548 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6549 4978db17 Iustin Pop
                            admin_up=False,
6550 58acb49d Alexander Schreiber
                            network_port=network_port,
6551 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6552 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6553 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6554 a8083063 Iustin Pop
                            )
6555 a8083063 Iustin Pop
6556 c3589cf8 Iustin Pop
    if self.adopt_disks:
6557 c3589cf8 Iustin Pop
      # rename LVs to the newly-generated names; we need to construct
6558 c3589cf8 Iustin Pop
      # 'fake' LV disks with the old data, plus the new unique_id
6559 c3589cf8 Iustin Pop
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
6560 c3589cf8 Iustin Pop
      rename_to = []
6561 c3589cf8 Iustin Pop
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
6562 c3589cf8 Iustin Pop
        rename_to.append(t_dsk.logical_id)
6563 c3589cf8 Iustin Pop
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
6564 c3589cf8 Iustin Pop
        self.cfg.SetDiskID(t_dsk, pnode_name)
6565 c3589cf8 Iustin Pop
      result = self.rpc.call_blockdev_rename(pnode_name,
6566 c3589cf8 Iustin Pop
                                             zip(tmp_disks, rename_to))
6567 c3589cf8 Iustin Pop
      result.Raise("Failed to rename adoped LVs")
6568 c3589cf8 Iustin Pop
    else:
6569 c3589cf8 Iustin Pop
      feedback_fn("* creating instance disks...")
6570 796cab27 Iustin Pop
      try:
6571 c3589cf8 Iustin Pop
        _CreateDisks(self, iobj)
6572 c3589cf8 Iustin Pop
      except errors.OpExecError:
6573 c3589cf8 Iustin Pop
        self.LogWarning("Device creation failed, reverting...")
6574 c3589cf8 Iustin Pop
        try:
6575 c3589cf8 Iustin Pop
          _RemoveDisks(self, iobj)
6576 c3589cf8 Iustin Pop
        finally:
6577 c3589cf8 Iustin Pop
          self.cfg.ReleaseDRBDMinors(instance)
6578 c3589cf8 Iustin Pop
          raise
6579 a8083063 Iustin Pop
6580 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6581 a8083063 Iustin Pop
6582 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6583 0debfb35 Guido Trotter
6584 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6585 7baf741d Guido Trotter
    # added the instance to the config
6586 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6587 e36e96b4 Guido Trotter
    # Unlock all the nodes
6588 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6589 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6590 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6591 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6592 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6593 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6594 9c8971d7 Guido Trotter
    else:
6595 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6596 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6597 a8083063 Iustin Pop
6598 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6599 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6600 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6601 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6602 a8083063 Iustin Pop
      time.sleep(15)
6603 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6604 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6605 a8083063 Iustin Pop
    else:
6606 a8083063 Iustin Pop
      disk_abort = False
6607 a8083063 Iustin Pop
6608 a8083063 Iustin Pop
    if disk_abort:
6609 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6610 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6611 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6612 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6613 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6614 3ecf6786 Iustin Pop
                               " this instance")
6615 a8083063 Iustin Pop
6616 c3589cf8 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
6617 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6618 25a8792c Iustin Pop
        if not self.op.no_install:
6619 25a8792c Iustin Pop
          feedback_fn("* running the instance OS create scripts...")
6620 25a8792c Iustin Pop
          # FIXME: pass debug option from opcode to backend
6621 25a8792c Iustin Pop
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6622 25a8792c Iustin Pop
                                                 self.op.debug_level)
6623 25a8792c Iustin Pop
          result.Raise("Could not add os for instance %s"
6624 25a8792c Iustin Pop
                       " on node %s" % (instance, pnode_name))
6625 a8083063 Iustin Pop
6626 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6627 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6628 a8083063 Iustin Pop
        src_node = self.op.src_node
6629 09acf207 Guido Trotter
        src_images = self.src_images
6630 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6631 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6632 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6633 09acf207 Guido Trotter
                                                         src_node, src_images,
6634 dd713605 Iustin Pop
                                                         cluster_name,
6635 dd713605 Iustin Pop
                                                         self.op.debug_level)
6636 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6637 944bf548 Iustin Pop
        if msg:
6638 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6639 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6640 a8083063 Iustin Pop
      else:
6641 a8083063 Iustin Pop
        # also checked in the prereq part
6642 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6643 3ecf6786 Iustin Pop
                                     % self.op.mode)
6644 a8083063 Iustin Pop
6645 a8083063 Iustin Pop
    if self.op.start:
6646 4978db17 Iustin Pop
      iobj.admin_up = True
6647 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6648 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6649 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6650 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6651 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6652 a8083063 Iustin Pop
6653 08896026 Iustin Pop
    return list(iobj.all_nodes)
6654 08896026 Iustin Pop
6655 a8083063 Iustin Pop
6656 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6657 a8083063 Iustin Pop
  """Connect to an instance's console.
6658 a8083063 Iustin Pop

6659 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6660 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6661 a8083063 Iustin Pop
  console.
6662 a8083063 Iustin Pop

6663 a8083063 Iustin Pop
  """
6664 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6665 8659b73e Guido Trotter
  REQ_BGL = False
6666 8659b73e Guido Trotter
6667 8659b73e Guido Trotter
  def ExpandNames(self):
6668 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6669 a8083063 Iustin Pop
6670 a8083063 Iustin Pop
  def CheckPrereq(self):
6671 a8083063 Iustin Pop
    """Check prerequisites.
6672 a8083063 Iustin Pop

6673 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6674 a8083063 Iustin Pop

6675 a8083063 Iustin Pop
    """
6676 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6677 8659b73e Guido Trotter
    assert self.instance is not None, \
6678 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6679 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6680 a8083063 Iustin Pop
6681 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6682 a8083063 Iustin Pop
    """Connect to the console of an instance
6683 a8083063 Iustin Pop

6684 a8083063 Iustin Pop
    """
6685 a8083063 Iustin Pop
    instance = self.instance
6686 a8083063 Iustin Pop
    node = instance.primary_node
6687 a8083063 Iustin Pop
6688 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6689 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6690 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6691 a8083063 Iustin Pop
6692 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6693 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6694 a8083063 Iustin Pop
6695 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6696 a8083063 Iustin Pop
6697 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6698 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6699 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6700 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6701 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6702 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6703 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6704 b047857b Michael Hanselmann
6705 82122173 Iustin Pop
    # build ssh cmdline
6706 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6707 a8083063 Iustin Pop
6708 a8083063 Iustin Pop
6709 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6710 a8083063 Iustin Pop
  """Replace the disks of an instance.
6711 a8083063 Iustin Pop

6712 a8083063 Iustin Pop
  """
6713 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6714 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6715 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6716 efd990e4 Guido Trotter
  REQ_BGL = False
6717 efd990e4 Guido Trotter
6718 7e9366f7 Iustin Pop
  def CheckArguments(self):
6719 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6720 efd990e4 Guido Trotter
      self.op.remote_node = None
6721 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6722 7e9366f7 Iustin Pop
      self.op.iallocator = None
6723 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6724 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6725 7e9366f7 Iustin Pop
6726 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6727 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6728 7e9366f7 Iustin Pop
6729 7e9366f7 Iustin Pop
  def ExpandNames(self):
6730 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6731 7e9366f7 Iustin Pop
6732 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6733 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6734 2bb5c911 Michael Hanselmann
6735 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6736 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6737 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6738 2bb5c911 Michael Hanselmann
6739 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6740 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6741 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6742 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6743 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6744 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6745 2bb5c911 Michael Hanselmann
6746 efd990e4 Guido Trotter
    else:
6747 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6748 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6749 efd990e4 Guido Trotter
6750 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6751 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6752 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6753 c68174b6 Michael Hanselmann
6754 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6755 2bb5c911 Michael Hanselmann
6756 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6757 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6758 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6759 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6760 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6761 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6762 a8083063 Iustin Pop
6763 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6764 a8083063 Iustin Pop
    """Build hooks env.
6765 a8083063 Iustin Pop

6766 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6767 a8083063 Iustin Pop

6768 a8083063 Iustin Pop
    """
6769 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6770 a8083063 Iustin Pop
    env = {
6771 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6772 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6773 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6774 a8083063 Iustin Pop
      }
6775 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6776 0834c866 Iustin Pop
    nl = [
6777 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6778 2bb5c911 Michael Hanselmann
      instance.primary_node,
6779 0834c866 Iustin Pop
      ]
6780 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6781 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6782 a8083063 Iustin Pop
    return env, nl, nl
6783 a8083063 Iustin Pop
6784 2bb5c911 Michael Hanselmann
6785 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6786 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6787 7ffc5a86 Michael Hanselmann

6788 7ffc5a86 Michael Hanselmann
  """
6789 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6790 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6791 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6792 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6793 7ffc5a86 Michael Hanselmann
6794 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6795 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6796 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6797 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6798 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6799 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6800 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6801 7ffc5a86 Michael Hanselmann
6802 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6803 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6804 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6805 7ffc5a86 Michael Hanselmann
6806 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6807 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6808 7ffc5a86 Michael Hanselmann
6809 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6810 7ffc5a86 Michael Hanselmann
6811 7ffc5a86 Michael Hanselmann
    # Declare node locks
6812 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6813 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6814 7ffc5a86 Michael Hanselmann
6815 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6816 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6817 7ffc5a86 Michael Hanselmann
6818 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6819 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6820 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6821 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6822 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6823 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6824 7ffc5a86 Michael Hanselmann
6825 7ffc5a86 Michael Hanselmann
    else:
6826 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6827 7ffc5a86 Michael Hanselmann
6828 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6829 7ffc5a86 Michael Hanselmann
    # node
6830 7ffc5a86 Michael Hanselmann
    names = []
6831 3a012b41 Michael Hanselmann
    tasklets = []
6832 7ffc5a86 Michael Hanselmann
6833 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6834 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6835 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6836 7ffc5a86 Michael Hanselmann
6837 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6838 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
6839 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
6840 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6841 7ffc5a86 Michael Hanselmann
6842 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6843 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6844 7ffc5a86 Michael Hanselmann
6845 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6846 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6847 7ffc5a86 Michael Hanselmann
6848 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6849 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6850 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6851 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6852 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6853 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6854 7ffc5a86 Michael Hanselmann
6855 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6856 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6857 7ffc5a86 Michael Hanselmann

6858 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6859 7ffc5a86 Michael Hanselmann

6860 7ffc5a86 Michael Hanselmann
    """
6861 7ffc5a86 Michael Hanselmann
    env = {
6862 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6863 7ffc5a86 Michael Hanselmann
      }
6864 7ffc5a86 Michael Hanselmann
6865 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6866 7ffc5a86 Michael Hanselmann
6867 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6868 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6869 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6870 7ffc5a86 Michael Hanselmann
6871 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6872 7ffc5a86 Michael Hanselmann
6873 7ffc5a86 Michael Hanselmann
6874 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6875 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6876 2bb5c911 Michael Hanselmann

6877 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6878 2bb5c911 Michael Hanselmann

6879 2bb5c911 Michael Hanselmann
  """
6880 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6881 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
6882 2bb5c911 Michael Hanselmann
    """Initializes this class.
6883 2bb5c911 Michael Hanselmann

6884 2bb5c911 Michael Hanselmann
    """
6885 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6886 464243a7 Michael Hanselmann
6887 2bb5c911 Michael Hanselmann
    # Parameters
6888 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6889 2bb5c911 Michael Hanselmann
    self.mode = mode
6890 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6891 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6892 2bb5c911 Michael Hanselmann
    self.disks = disks
6893 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
6894 7ea7bcf6 Iustin Pop
    self.early_release = early_release
6895 2bb5c911 Michael Hanselmann
6896 2bb5c911 Michael Hanselmann
    # Runtime data
6897 2bb5c911 Michael Hanselmann
    self.instance = None
6898 2bb5c911 Michael Hanselmann
    self.new_node = None
6899 2bb5c911 Michael Hanselmann
    self.target_node = None
6900 2bb5c911 Michael Hanselmann
    self.other_node = None
6901 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6902 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6903 2bb5c911 Michael Hanselmann
6904 2bb5c911 Michael Hanselmann
  @staticmethod
6905 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6906 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6907 c68174b6 Michael Hanselmann

6908 c68174b6 Michael Hanselmann
    """
6909 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6910 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6911 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6912 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6913 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6914 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
6915 02a00186 Michael Hanselmann
6916 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6917 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6918 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
6919 02a00186 Michael Hanselmann
6920 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6921 02a00186 Michael Hanselmann
      # Not replacing the secondary
6922 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6923 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6924 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
6925 2bb5c911 Michael Hanselmann
6926 2bb5c911 Michael Hanselmann
  @staticmethod
6927 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6928 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6929 2bb5c911 Michael Hanselmann

6930 2bb5c911 Michael Hanselmann
    """
6931 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6932 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6933 2bb5c911 Michael Hanselmann
                     name=instance_name,
6934 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6935 2bb5c911 Michael Hanselmann
6936 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6937 2bb5c911 Michael Hanselmann
6938 2bb5c911 Michael Hanselmann
    if not ial.success:
6939 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6940 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
6941 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6942 2bb5c911 Michael Hanselmann
6943 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6944 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6945 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6946 d984846d Iustin Pop
                                 (iallocator_name,
6947 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
6948 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6949 2bb5c911 Michael Hanselmann
6950 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
6951 2bb5c911 Michael Hanselmann
6952 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6953 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6954 2bb5c911 Michael Hanselmann
6955 2bb5c911 Michael Hanselmann
    return remote_node_name
6956 2bb5c911 Michael Hanselmann
6957 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6958 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6959 2d9005d8 Michael Hanselmann
                                    node_name, True)
6960 942be002 Michael Hanselmann
6961 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6962 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6963 2bb5c911 Michael Hanselmann

6964 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6965 2bb5c911 Michael Hanselmann

6966 2bb5c911 Michael Hanselmann
    """
6967 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6968 e9022531 Iustin Pop
    assert instance is not None, \
6969 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
6970 2bb5c911 Michael Hanselmann
6971 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
6972 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6973 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
6974 a8083063 Iustin Pop
6975 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6976 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6977 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6978 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
6979 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
6980 a8083063 Iustin Pop
6981 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
6982 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
6983 94a1b377 Michael Hanselmann
6984 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
6985 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
6986 94a1b377 Michael Hanselmann

6987 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
6988 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
6989 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
6990 94a1b377 Michael Hanselmann
    account.
6991 94a1b377 Michael Hanselmann

6992 94a1b377 Michael Hanselmann
    """
6993 94a1b377 Michael Hanselmann
    instance = self.instance
6994 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
6995 a9e0c397 Iustin Pop
6996 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6997 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6998 2bb5c911 Michael Hanselmann
    else:
6999 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7000 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
7001 b6e82a65 Iustin Pop
7002 a9e0c397 Iustin Pop
    if remote_node is not None:
7003 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7004 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
7005 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
7006 a9e0c397 Iustin Pop
    else:
7007 a9e0c397 Iustin Pop
      self.remote_node_info = None
7008 2bb5c911 Michael Hanselmann
7009 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
7010 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
7011 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
7012 2bb5c911 Michael Hanselmann
7013 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
7014 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
7015 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
7016 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7017 7e9366f7 Iustin Pop
7018 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7019 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
7020 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7021 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7022 942be002 Michael Hanselmann
7023 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
7024 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7025 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7026 942be002 Michael Hanselmann
7027 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
7028 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7029 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
7030 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
7031 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7032 942be002 Michael Hanselmann
7033 942be002 Michael Hanselmann
      if faulty_primary:
7034 942be002 Michael Hanselmann
        self.disks = faulty_primary
7035 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7036 942be002 Michael Hanselmann
        self.other_node = secondary_node
7037 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7038 942be002 Michael Hanselmann
      elif faulty_secondary:
7039 942be002 Michael Hanselmann
        self.disks = faulty_secondary
7040 942be002 Michael Hanselmann
        self.target_node = secondary_node
7041 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7042 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7043 942be002 Michael Hanselmann
      else:
7044 942be002 Michael Hanselmann
        self.disks = []
7045 942be002 Michael Hanselmann
        check_nodes = []
7046 942be002 Michael Hanselmann
7047 942be002 Michael Hanselmann
    else:
7048 942be002 Michael Hanselmann
      # Non-automatic modes
7049 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
7050 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7051 942be002 Michael Hanselmann
        self.other_node = secondary_node
7052 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7053 7e9366f7 Iustin Pop
7054 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
7055 942be002 Michael Hanselmann
        self.target_node = secondary_node
7056 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7057 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7058 a9e0c397 Iustin Pop
7059 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
7060 942be002 Michael Hanselmann
        self.new_node = remote_node
7061 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7062 942be002 Michael Hanselmann
        self.target_node = secondary_node
7063 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
7064 54155f52 Iustin Pop
7065 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
7066 a8083063 Iustin Pop
7067 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7068 9af0fa6a Iustin Pop
        assert old_node_info is not None
7069 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
7070 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
7071 9af0fa6a Iustin Pop
          self.early_release = True
7072 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7073 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
7074 9af0fa6a Iustin Pop
7075 942be002 Michael Hanselmann
      else:
7076 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7077 942be002 Michael Hanselmann
                                     self.mode)
7078 942be002 Michael Hanselmann
7079 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
7080 942be002 Michael Hanselmann
      if not self.disks:
7081 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
7082 a9e0c397 Iustin Pop
7083 2bb5c911 Michael Hanselmann
    for node in check_nodes:
7084 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
7085 e4376078 Iustin Pop
7086 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
7087 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
7088 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
7089 e4376078 Iustin Pop
7090 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
7091 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
7092 e4376078 Iustin Pop
7093 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
7094 2bb5c911 Michael Hanselmann
      if node_name is not None:
7095 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7096 e4376078 Iustin Pop
7097 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
7098 a9e0c397 Iustin Pop
7099 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
7100 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
7101 2bb5c911 Michael Hanselmann

7102 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
7103 cff90b79 Iustin Pop

7104 a9e0c397 Iustin Pop
    """
7105 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
7106 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7107 94a1b377 Michael Hanselmann
7108 942be002 Michael Hanselmann
    if not self.disks:
7109 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
7110 942be002 Michael Hanselmann
      return
7111 942be002 Michael Hanselmann
7112 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
7113 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
7114 7ffc5a86 Michael Hanselmann
7115 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
7116 2bb5c911 Michael Hanselmann
7117 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
7118 2bb5c911 Michael Hanselmann
    if activate_disks:
7119 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
7120 2bb5c911 Michael Hanselmann
7121 2bb5c911 Michael Hanselmann
    try:
7122 942be002 Michael Hanselmann
      # Should we replace the secondary node?
7123 942be002 Michael Hanselmann
      if self.new_node is not None:
7124 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
7125 2bb5c911 Michael Hanselmann
      else:
7126 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
7127 a4eae71f Michael Hanselmann
7128 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
7129 2bb5c911 Michael Hanselmann
7130 2bb5c911 Michael Hanselmann
    finally:
7131 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
7132 5c983ee5 Iustin Pop
      # down instance
7133 2bb5c911 Michael Hanselmann
      if activate_disks:
7134 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7135 2bb5c911 Michael Hanselmann
7136 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
7137 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
7138 2bb5c911 Michael Hanselmann
7139 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
7140 cff90b79 Iustin Pop
7141 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
7142 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
7143 cff90b79 Iustin Pop
    if not results:
7144 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
7145 2bb5c911 Michael Hanselmann
7146 2bb5c911 Michael Hanselmann
    for node in nodes:
7147 781de953 Iustin Pop
      res = results[node]
7148 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
7149 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
7150 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7151 2bb5c911 Michael Hanselmann
                                 (vgname, node))
7152 2bb5c911 Michael Hanselmann
7153 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
7154 2bb5c911 Michael Hanselmann
    # Check disk existence
7155 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7156 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7157 cff90b79 Iustin Pop
        continue
7158 2bb5c911 Michael Hanselmann
7159 2bb5c911 Michael Hanselmann
      for node in nodes:
7160 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7161 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
7162 2bb5c911 Michael Hanselmann
7163 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
7164 2bb5c911 Michael Hanselmann
7165 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7166 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
7167 2bb5c911 Michael Hanselmann
          if not msg:
7168 2bb5c911 Michael Hanselmann
            msg = "disk not found"
7169 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7170 23829f6f Iustin Pop
                                   (idx, node, msg))
7171 cff90b79 Iustin Pop
7172 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7173 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7174 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7175 cff90b79 Iustin Pop
        continue
7176 cff90b79 Iustin Pop
7177 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7178 2bb5c911 Michael Hanselmann
                      (idx, node_name))
7179 2bb5c911 Michael Hanselmann
7180 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7181 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
7182 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7183 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
7184 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
7185 2bb5c911 Michael Hanselmann
7186 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
7187 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
7188 2bb5c911 Michael Hanselmann
    iv_names = {}
7189 2bb5c911 Michael Hanselmann
7190 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7191 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7192 a9e0c397 Iustin Pop
        continue
7193 2bb5c911 Michael Hanselmann
7194 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7195 2bb5c911 Michael Hanselmann
7196 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7197 2bb5c911 Michael Hanselmann
7198 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7199 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
7200 2bb5c911 Michael Hanselmann
7201 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7202 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
7203 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7204 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
7205 2bb5c911 Michael Hanselmann
7206 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
7207 a9e0c397 Iustin Pop
      old_lvs = dev.children
7208 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7209 2bb5c911 Michael Hanselmann
7210 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
7211 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
7212 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7213 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7214 2bb5c911 Michael Hanselmann
7215 2bb5c911 Michael Hanselmann
    return iv_names
7216 2bb5c911 Michael Hanselmann
7217 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
7218 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
7219 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7220 2bb5c911 Michael Hanselmann
7221 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
7222 2bb5c911 Michael Hanselmann
7223 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
7224 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
7225 2bb5c911 Michael Hanselmann
        if not msg:
7226 2bb5c911 Michael Hanselmann
          msg = "disk not found"
7227 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7228 2bb5c911 Michael Hanselmann
                                 (name, msg))
7229 2bb5c911 Michael Hanselmann
7230 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
7231 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7232 2bb5c911 Michael Hanselmann
7233 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
7234 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
7235 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7236 2bb5c911 Michael Hanselmann
7237 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
7238 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
7239 2bb5c911 Michael Hanselmann
7240 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7241 2bb5c911 Michael Hanselmann
        if msg:
7242 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7243 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
7244 2bb5c911 Michael Hanselmann
7245 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
7246 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
7247 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7248 7ea7bcf6 Iustin Pop
7249 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7250 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
7251 2bb5c911 Michael Hanselmann

7252 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
7253 2bb5c911 Michael Hanselmann

7254 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
7255 2bb5c911 Michael Hanselmann

7256 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
7257 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
7258 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
7259 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
7260 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
7261 2bb5c911 Michael Hanselmann

7262 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
7263 2bb5c911 Michael Hanselmann

7264 2bb5c911 Michael Hanselmann
      1. for each modified disk:
7265 2bb5c911 Michael Hanselmann

7266 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
7267 2bb5c911 Michael Hanselmann

7268 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
7269 2bb5c911 Michael Hanselmann

7270 2bb5c911 Michael Hanselmann
    """
7271 2bb5c911 Michael Hanselmann
    steps_total = 6
7272 2bb5c911 Michael Hanselmann
7273 2bb5c911 Michael Hanselmann
    # Step: check device activation
7274 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7275 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
7276 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
7277 2bb5c911 Michael Hanselmann
7278 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
7279 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7280 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
7281 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
7282 2bb5c911 Michael Hanselmann
                                False)
7283 2bb5c911 Michael Hanselmann
7284 2bb5c911 Michael Hanselmann
    # Step: create new storage
7285 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7286 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
7287 a9e0c397 Iustin Pop
7288 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
7289 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7290 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7291 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7292 2bb5c911 Michael Hanselmann
7293 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7294 4d4a651d Michael Hanselmann
                                                     old_lvs)
7295 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
7296 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
7297 cff90b79 Iustin Pop
      #dev.children = []
7298 cff90b79 Iustin Pop
      #cfg.Update(instance)
7299 a9e0c397 Iustin Pop
7300 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
7301 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
7302 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7303 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
7304 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
7305 cff90b79 Iustin Pop
7306 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
7307 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
7308 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
7309 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
7310 2bb5c911 Michael Hanselmann
7311 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
7312 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
7313 cff90b79 Iustin Pop
      for to_ren in old_lvs:
7314 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7315 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
7316 23829f6f Iustin Pop
          # device exists
7317 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7318 cff90b79 Iustin Pop
7319 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
7320 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7321 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
7322 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7323 2bb5c911 Michael Hanselmann
7324 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
7325 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
7326 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
7327 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
7328 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7329 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
7330 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7331 cff90b79 Iustin Pop
7332 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
7333 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
7334 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
7335 a9e0c397 Iustin Pop
7336 cff90b79 Iustin Pop
      for disk in old_lvs:
7337 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
7338 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7339 a9e0c397 Iustin Pop
7340 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7341 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7342 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7343 4d4a651d Michael Hanselmann
                                                  new_lvs)
7344 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7345 2cc1da8b Iustin Pop
      if msg:
7346 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7347 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7348 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7349 4c4e4e1e Iustin Pop
          if msg2:
7350 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7351 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7352 2bb5c911 Michael Hanselmann
                                     "volumes"))
7353 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7354 a9e0c397 Iustin Pop
7355 a9e0c397 Iustin Pop
      dev.children = new_lvs
7356 a9e0c397 Iustin Pop
7357 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7358 a9e0c397 Iustin Pop
7359 7ea7bcf6 Iustin Pop
    cstep = 5
7360 7ea7bcf6 Iustin Pop
    if self.early_release:
7361 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7362 7ea7bcf6 Iustin Pop
      cstep += 1
7363 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7364 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7365 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7366 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7367 7ea7bcf6 Iustin Pop
7368 2bb5c911 Michael Hanselmann
    # Wait for sync
7369 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7370 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7371 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7372 7ea7bcf6 Iustin Pop
    cstep += 1
7373 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7374 a9e0c397 Iustin Pop
7375 2bb5c911 Michael Hanselmann
    # Check all devices manually
7376 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7377 a9e0c397 Iustin Pop
7378 cff90b79 Iustin Pop
    # Step: remove old storage
7379 7ea7bcf6 Iustin Pop
    if not self.early_release:
7380 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7381 7ea7bcf6 Iustin Pop
      cstep += 1
7382 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7383 a9e0c397 Iustin Pop
7384 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7385 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7386 a9e0c397 Iustin Pop

7387 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7388 a9e0c397 Iustin Pop
      - for all disks of the instance:
7389 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7390 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7391 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7392 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7393 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7394 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7395 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7396 a9e0c397 Iustin Pop
          not network enabled
7397 a9e0c397 Iustin Pop
      - wait for sync across all devices
7398 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7399 a9e0c397 Iustin Pop

7400 a9e0c397 Iustin Pop
    Failures are not very well handled.
7401 0834c866 Iustin Pop

7402 a9e0c397 Iustin Pop
    """
7403 0834c866 Iustin Pop
    steps_total = 6
7404 0834c866 Iustin Pop
7405 0834c866 Iustin Pop
    # Step: check device activation
7406 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7407 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7408 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7409 0834c866 Iustin Pop
7410 0834c866 Iustin Pop
    # Step: check other node consistency
7411 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7412 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7413 0834c866 Iustin Pop
7414 0834c866 Iustin Pop
    # Step: create new storage
7415 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7416 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7417 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7418 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7419 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7420 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7421 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7422 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7423 a9e0c397 Iustin Pop
7424 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7425 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7426 a1578d63 Iustin Pop
    # error and the success paths
7427 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7428 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7429 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7430 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7431 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7432 2bb5c911 Michael Hanselmann
7433 2bb5c911 Michael Hanselmann
    iv_names = {}
7434 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7435 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7436 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7437 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7438 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7439 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7440 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7441 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7442 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7443 a2d59d8b Iustin Pop
        p_minor = o_minor1
7444 ffa1c0dc Iustin Pop
      else:
7445 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7446 a2d59d8b Iustin Pop
        p_minor = o_minor2
7447 a2d59d8b Iustin Pop
7448 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7449 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7450 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7451 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7452 a2d59d8b Iustin Pop
7453 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7454 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7455 a2d59d8b Iustin Pop
                    new_net_id)
7456 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7457 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7458 8a6c7011 Iustin Pop
                              children=dev.children,
7459 8a6c7011 Iustin Pop
                              size=dev.size)
7460 796cab27 Iustin Pop
      try:
7461 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7462 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7463 82759cb1 Iustin Pop
      except errors.GenericError:
7464 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7465 796cab27 Iustin Pop
        raise
7466 a9e0c397 Iustin Pop
7467 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7468 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7469 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7470 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7471 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7472 cacfd1fd Iustin Pop
      if msg:
7473 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7474 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7475 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7476 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7477 a9e0c397 Iustin Pop
7478 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7479 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7480 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7481 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7482 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7483 642445d9 Iustin Pop
7484 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7485 a2d59d8b Iustin Pop
    if msg:
7486 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7487 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7488 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7489 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7490 642445d9 Iustin Pop
7491 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7492 642445d9 Iustin Pop
    # the instance to point to the new secondary
7493 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7494 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7495 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7496 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7497 2bb5c911 Michael Hanselmann
7498 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7499 a9e0c397 Iustin Pop
7500 642445d9 Iustin Pop
    # and now perform the drbd attach
7501 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7502 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7503 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7504 4d4a651d Michael Hanselmann
                                            self.new_node],
7505 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7506 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7507 4d4a651d Michael Hanselmann
                                           self.instance.name,
7508 a2d59d8b Iustin Pop
                                           False)
7509 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7510 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7511 a2d59d8b Iustin Pop
      if msg:
7512 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7513 4d4a651d Michael Hanselmann
                           to_node, msg,
7514 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7515 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7516 7ea7bcf6 Iustin Pop
    cstep = 5
7517 7ea7bcf6 Iustin Pop
    if self.early_release:
7518 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7519 7ea7bcf6 Iustin Pop
      cstep += 1
7520 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7521 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7522 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7523 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7524 d5cd389c Iustin Pop
                             self.target_node,
7525 d5cd389c Iustin Pop
                             self.new_node])
7526 a9e0c397 Iustin Pop
7527 2bb5c911 Michael Hanselmann
    # Wait for sync
7528 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7529 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7530 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7531 7ea7bcf6 Iustin Pop
    cstep += 1
7532 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7533 a9e0c397 Iustin Pop
7534 2bb5c911 Michael Hanselmann
    # Check all devices manually
7535 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7536 22985314 Guido Trotter
7537 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7538 7ea7bcf6 Iustin Pop
    if not self.early_release:
7539 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7540 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7541 a9e0c397 Iustin Pop
7542 a8083063 Iustin Pop
7543 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7544 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7545 76aef8fc Michael Hanselmann

7546 76aef8fc Michael Hanselmann
  """
7547 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7548 76aef8fc Michael Hanselmann
  REQ_BGL = False
7549 76aef8fc Michael Hanselmann
7550 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7551 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7552 76aef8fc Michael Hanselmann
7553 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7554 76aef8fc Michael Hanselmann
    self.needed_locks = {
7555 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7556 76aef8fc Michael Hanselmann
      }
7557 76aef8fc Michael Hanselmann
7558 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7559 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7560 7e9c6a78 Iustin Pop
    try:
7561 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7562 7e9c6a78 Iustin Pop
                                  node_name, True):
7563 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7564 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7565 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7566 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7567 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7568 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7569 7e9c6a78 Iustin Pop
      else:
7570 7e9c6a78 Iustin Pop
        raise
7571 76aef8fc Michael Hanselmann
7572 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7573 76aef8fc Michael Hanselmann
    """Check prerequisites.
7574 76aef8fc Michael Hanselmann

7575 76aef8fc Michael Hanselmann
    """
7576 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7577 76aef8fc Michael Hanselmann
7578 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7579 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7580 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7581 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7582 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7583 76aef8fc Michael Hanselmann
7584 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7585 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7586 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7587 7e9c6a78 Iustin Pop
        continue
7588 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7589 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7590 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7591 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7592 76aef8fc Michael Hanselmann
7593 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7594 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7595 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7596 76aef8fc Michael Hanselmann
7597 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7598 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7599 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7600 76aef8fc Michael Hanselmann
                                           self.op.name,
7601 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7602 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7603 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7604 76aef8fc Michael Hanselmann
7605 76aef8fc Michael Hanselmann
7606 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7607 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7608 f7e7689f Iustin Pop

7609 f7e7689f Iustin Pop
  """
7610 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7611 f7e7689f Iustin Pop
  REQ_BGL = False
7612 f7e7689f Iustin Pop
7613 f7e7689f Iustin Pop
  def CheckArguments(self):
7614 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7615 f7e7689f Iustin Pop
      self.op.remote_node = None
7616 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7617 f7e7689f Iustin Pop
      self.op.iallocator = None
7618 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7619 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7620 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7621 f7e7689f Iustin Pop
7622 f7e7689f Iustin Pop
  def ExpandNames(self):
7623 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7624 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7625 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7626 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7627 f7e7689f Iustin Pop
    else:
7628 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7629 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7630 f7e7689f Iustin Pop
7631 f7e7689f Iustin Pop
  def CheckPrereq(self):
7632 f7e7689f Iustin Pop
    pass
7633 f7e7689f Iustin Pop
7634 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7635 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7636 f7e7689f Iustin Pop
      instances = []
7637 f7e7689f Iustin Pop
      for node in self.op.nodes:
7638 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7639 f7e7689f Iustin Pop
      result = []
7640 f7e7689f Iustin Pop
      for i in instances:
7641 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7642 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7643 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7644 f7e7689f Iustin Pop
                                     " secondary" %
7645 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7646 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7647 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7648 f7e7689f Iustin Pop
    else:
7649 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7650 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7651 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7652 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7653 f7e7689f Iustin Pop
      if not ial.success:
7654 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7655 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7656 f7e7689f Iustin Pop
      result = ial.result
7657 f7e7689f Iustin Pop
    return result
7658 f7e7689f Iustin Pop
7659 f7e7689f Iustin Pop
7660 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7661 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7662 8729e0d7 Iustin Pop

7663 8729e0d7 Iustin Pop
  """
7664 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7665 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7666 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7667 31e63dbf Guido Trotter
  REQ_BGL = False
7668 31e63dbf Guido Trotter
7669 31e63dbf Guido Trotter
  def ExpandNames(self):
7670 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7671 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7672 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7673 31e63dbf Guido Trotter
7674 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7675 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7676 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7677 8729e0d7 Iustin Pop
7678 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7679 8729e0d7 Iustin Pop
    """Build hooks env.
7680 8729e0d7 Iustin Pop

7681 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7682 8729e0d7 Iustin Pop

7683 8729e0d7 Iustin Pop
    """
7684 8729e0d7 Iustin Pop
    env = {
7685 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7686 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7687 8729e0d7 Iustin Pop
      }
7688 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7689 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7690 8729e0d7 Iustin Pop
    return env, nl, nl
7691 8729e0d7 Iustin Pop
7692 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7693 8729e0d7 Iustin Pop
    """Check prerequisites.
7694 8729e0d7 Iustin Pop

7695 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7696 8729e0d7 Iustin Pop

7697 8729e0d7 Iustin Pop
    """
7698 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7699 31e63dbf Guido Trotter
    assert instance is not None, \
7700 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7701 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7702 6b12959c Iustin Pop
    for node in nodenames:
7703 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7704 7527a8a4 Iustin Pop
7705 31e63dbf Guido Trotter
7706 8729e0d7 Iustin Pop
    self.instance = instance
7707 8729e0d7 Iustin Pop
7708 728489a3 Guido Trotter
    if instance.disk_template not in constants.DTS_GROWABLE:
7709 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7710 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7711 8729e0d7 Iustin Pop
7712 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7713 8729e0d7 Iustin Pop
7714 2c42c5df Guido Trotter
    if instance.disk_template != constants.DT_FILE:
7715 2c42c5df Guido Trotter
      # TODO: check the free disk space for file, when that feature will be
7716 2c42c5df Guido Trotter
      # supported
7717 2c42c5df Guido Trotter
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
7718 8729e0d7 Iustin Pop
7719 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7720 8729e0d7 Iustin Pop
    """Execute disk grow.
7721 8729e0d7 Iustin Pop

7722 8729e0d7 Iustin Pop
    """
7723 8729e0d7 Iustin Pop
    instance = self.instance
7724 ad24e046 Iustin Pop
    disk = self.disk
7725 6b12959c Iustin Pop
    for node in instance.all_nodes:
7726 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7727 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7728 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7729 5bc556dd Michael Hanselmann
7730 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7731 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7732 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7733 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7734 5bc556dd Michael Hanselmann
      # time is a work-around.
7735 5bc556dd Michael Hanselmann
      time.sleep(5)
7736 5bc556dd Michael Hanselmann
7737 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7738 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7739 6605411d Iustin Pop
    if self.op.wait_for_sync:
7740 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7741 6605411d Iustin Pop
      if disk_abort:
7742 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7743 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7744 8729e0d7 Iustin Pop
7745 8729e0d7 Iustin Pop
7746 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7747 a8083063 Iustin Pop
  """Query runtime instance data.
7748 a8083063 Iustin Pop

7749 a8083063 Iustin Pop
  """
7750 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7751 a987fa48 Guido Trotter
  REQ_BGL = False
7752 ae5849b5 Michael Hanselmann
7753 a987fa48 Guido Trotter
  def ExpandNames(self):
7754 a987fa48 Guido Trotter
    self.needed_locks = {}
7755 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7756 a987fa48 Guido Trotter
7757 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7758 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7759 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7760 a987fa48 Guido Trotter
7761 a987fa48 Guido Trotter
    if self.op.instances:
7762 a987fa48 Guido Trotter
      self.wanted_names = []
7763 a987fa48 Guido Trotter
      for name in self.op.instances:
7764 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7765 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7766 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7767 a987fa48 Guido Trotter
    else:
7768 a987fa48 Guido Trotter
      self.wanted_names = None
7769 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7770 a987fa48 Guido Trotter
7771 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7772 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7773 a987fa48 Guido Trotter
7774 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7775 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7776 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7777 a8083063 Iustin Pop
7778 a8083063 Iustin Pop
  def CheckPrereq(self):
7779 a8083063 Iustin Pop
    """Check prerequisites.
7780 a8083063 Iustin Pop

7781 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7782 a8083063 Iustin Pop

7783 a8083063 Iustin Pop
    """
7784 a987fa48 Guido Trotter
    if self.wanted_names is None:
7785 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7786 a8083063 Iustin Pop
7787 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7788 a987fa48 Guido Trotter
                             in self.wanted_names]
7789 a987fa48 Guido Trotter
    return
7790 a8083063 Iustin Pop
7791 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7792 98825740 Michael Hanselmann
    """Returns the status of a block device
7793 98825740 Michael Hanselmann

7794 98825740 Michael Hanselmann
    """
7795 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7796 98825740 Michael Hanselmann
      return None
7797 98825740 Michael Hanselmann
7798 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7799 98825740 Michael Hanselmann
7800 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7801 98825740 Michael Hanselmann
    if result.offline:
7802 98825740 Michael Hanselmann
      return None
7803 98825740 Michael Hanselmann
7804 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7805 98825740 Michael Hanselmann
7806 98825740 Michael Hanselmann
    status = result.payload
7807 ddfe2228 Michael Hanselmann
    if status is None:
7808 ddfe2228 Michael Hanselmann
      return None
7809 98825740 Michael Hanselmann
7810 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7811 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7812 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7813 98825740 Michael Hanselmann
7814 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7815 a8083063 Iustin Pop
    """Compute block device status.
7816 a8083063 Iustin Pop

7817 a8083063 Iustin Pop
    """
7818 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7819 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7820 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7821 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7822 a8083063 Iustin Pop
      else:
7823 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7824 a8083063 Iustin Pop
7825 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7826 98825740 Michael Hanselmann
                                              instance.name, dev)
7827 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7828 a8083063 Iustin Pop
7829 a8083063 Iustin Pop
    if dev.children:
7830 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7831 a8083063 Iustin Pop
                      for child in dev.children]
7832 a8083063 Iustin Pop
    else:
7833 a8083063 Iustin Pop
      dev_children = []
7834 a8083063 Iustin Pop
7835 a8083063 Iustin Pop
    data = {
7836 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7837 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7838 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7839 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7840 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7841 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7842 a8083063 Iustin Pop
      "children": dev_children,
7843 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7844 c98162a7 Iustin Pop
      "size": dev.size,
7845 a8083063 Iustin Pop
      }
7846 a8083063 Iustin Pop
7847 a8083063 Iustin Pop
    return data
7848 a8083063 Iustin Pop
7849 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7850 a8083063 Iustin Pop
    """Gather and return data"""
7851 a8083063 Iustin Pop
    result = {}
7852 338e51e8 Iustin Pop
7853 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7854 338e51e8 Iustin Pop
7855 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7856 57821cac Iustin Pop
      if not self.op.static:
7857 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7858 57821cac Iustin Pop
                                                  instance.name,
7859 57821cac Iustin Pop
                                                  instance.hypervisor)
7860 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7861 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7862 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7863 57821cac Iustin Pop
          remote_state = "up"
7864 57821cac Iustin Pop
        else:
7865 57821cac Iustin Pop
          remote_state = "down"
7866 a8083063 Iustin Pop
      else:
7867 57821cac Iustin Pop
        remote_state = None
7868 0d68c45d Iustin Pop
      if instance.admin_up:
7869 a8083063 Iustin Pop
        config_state = "up"
7870 0d68c45d Iustin Pop
      else:
7871 0d68c45d Iustin Pop
        config_state = "down"
7872 a8083063 Iustin Pop
7873 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7874 a8083063 Iustin Pop
               for device in instance.disks]
7875 a8083063 Iustin Pop
7876 a8083063 Iustin Pop
      idict = {
7877 a8083063 Iustin Pop
        "name": instance.name,
7878 a8083063 Iustin Pop
        "config_state": config_state,
7879 a8083063 Iustin Pop
        "run_state": remote_state,
7880 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7881 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7882 a8083063 Iustin Pop
        "os": instance.os,
7883 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7884 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7885 a8083063 Iustin Pop
        "disks": disks,
7886 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7887 24838135 Iustin Pop
        "network_port": instance.network_port,
7888 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7889 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
7890 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7891 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7892 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7893 90f72445 Iustin Pop
        "mtime": instance.mtime,
7894 90f72445 Iustin Pop
        "ctime": instance.ctime,
7895 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7896 a8083063 Iustin Pop
        }
7897 a8083063 Iustin Pop
7898 a8083063 Iustin Pop
      result[instance.name] = idict
7899 a8083063 Iustin Pop
7900 a8083063 Iustin Pop
    return result
7901 a8083063 Iustin Pop
7902 a8083063 Iustin Pop
7903 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7904 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7905 a8083063 Iustin Pop

7906 a8083063 Iustin Pop
  """
7907 a8083063 Iustin Pop
  HPATH = "instance-modify"
7908 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7909 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7910 1a5c7281 Guido Trotter
  REQ_BGL = False
7911 1a5c7281 Guido Trotter
7912 24991749 Iustin Pop
  def CheckArguments(self):
7913 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7914 24991749 Iustin Pop
      self.op.nics = []
7915 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7916 24991749 Iustin Pop
      self.op.disks = []
7917 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7918 24991749 Iustin Pop
      self.op.beparams = {}
7919 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7920 24991749 Iustin Pop
      self.op.hvparams = {}
7921 e29e9550 Iustin Pop
    if not hasattr(self.op, "disk_template"):
7922 e29e9550 Iustin Pop
      self.op.disk_template = None
7923 e29e9550 Iustin Pop
    if not hasattr(self.op, "remote_node"):
7924 e29e9550 Iustin Pop
      self.op.remote_node = None
7925 96b39bcc Iustin Pop
    if not hasattr(self.op, "os_name"):
7926 96b39bcc Iustin Pop
      self.op.os_name = None
7927 96b39bcc Iustin Pop
    if not hasattr(self.op, "force_variant"):
7928 96b39bcc Iustin Pop
      self.op.force_variant = False
7929 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7930 e29e9550 Iustin Pop
    if not (self.op.nics or self.op.disks or self.op.disk_template or
7931 96b39bcc Iustin Pop
            self.op.hvparams or self.op.beparams or self.op.os_name):
7932 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
7933 24991749 Iustin Pop
7934 7736a5f2 Iustin Pop
    if self.op.hvparams:
7935 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
7936 7736a5f2 Iustin Pop
7937 24991749 Iustin Pop
    # Disk validation
7938 24991749 Iustin Pop
    disk_addremove = 0
7939 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7940 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7941 24991749 Iustin Pop
        disk_addremove += 1
7942 24991749 Iustin Pop
        continue
7943 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7944 24991749 Iustin Pop
        disk_addremove += 1
7945 24991749 Iustin Pop
      else:
7946 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7947 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
7948 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7949 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7950 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
7951 8b46606c Guido Trotter
7952 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7953 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7954 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7955 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
7956 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7957 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7958 24991749 Iustin Pop
        if size is None:
7959 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
7960 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
7961 24991749 Iustin Pop
        try:
7962 24991749 Iustin Pop
          size = int(size)
7963 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
7964 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7965 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
7966 24991749 Iustin Pop
        disk_dict['size'] = size
7967 24991749 Iustin Pop
      else:
7968 24991749 Iustin Pop
        # modification of disk
7969 24991749 Iustin Pop
        if 'size' in disk_dict:
7970 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7971 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
7972 24991749 Iustin Pop
7973 24991749 Iustin Pop
    if disk_addremove > 1:
7974 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7975 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
7976 24991749 Iustin Pop
7977 e29e9550 Iustin Pop
    if self.op.disks and self.op.disk_template is not None:
7978 e29e9550 Iustin Pop
      raise errors.OpPrereqError("Disk template conversion and other disk"
7979 e29e9550 Iustin Pop
                                 " changes not supported at the same time",
7980 e29e9550 Iustin Pop
                                 errors.ECODE_INVAL)
7981 e29e9550 Iustin Pop
7982 e29e9550 Iustin Pop
    if self.op.disk_template:
7983 e29e9550 Iustin Pop
      _CheckDiskTemplate(self.op.disk_template)
7984 e29e9550 Iustin Pop
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
7985 e29e9550 Iustin Pop
          self.op.remote_node is None):
7986 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
7987 e29e9550 Iustin Pop
                                   " one requires specifying a secondary node",
7988 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
7989 e29e9550 Iustin Pop
7990 24991749 Iustin Pop
    # NIC validation
7991 24991749 Iustin Pop
    nic_addremove = 0
7992 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7993 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7994 24991749 Iustin Pop
        nic_addremove += 1
7995 24991749 Iustin Pop
        continue
7996 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7997 24991749 Iustin Pop
        nic_addremove += 1
7998 24991749 Iustin Pop
      else:
7999 24991749 Iustin Pop
        if not isinstance(nic_op, int):
8000 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8001 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
8002 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8003 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8004 24991749 Iustin Pop
8005 24991749 Iustin Pop
      # nic_dict should be a dict
8006 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
8007 24991749 Iustin Pop
      if nic_ip is not None:
8008 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
8009 24991749 Iustin Pop
          nic_dict['ip'] = None
8010 24991749 Iustin Pop
        else:
8011 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
8012 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8013 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
8014 5c44da6a Guido Trotter
8015 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
8016 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
8017 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
8018 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8019 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
8020 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8021 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
8022 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8023 cd098c41 Guido Trotter
        nic_dict['link'] = None
8024 cd098c41 Guido Trotter
8025 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
8026 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
8027 5c44da6a Guido Trotter
        if nic_mac is None:
8028 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
8029 5c44da6a Guido Trotter
8030 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8031 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8032 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8033 82187135 Renรฉ Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8034 82187135 Renรฉ Nussbaumer
8035 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8036 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8037 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
8038 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8039 5c44da6a Guido Trotter
8040 24991749 Iustin Pop
    if nic_addremove > 1:
8041 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8042 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8043 24991749 Iustin Pop
8044 1a5c7281 Guido Trotter
  def ExpandNames(self):
8045 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
8046 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
8047 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8048 74409b12 Iustin Pop
8049 74409b12 Iustin Pop
  def DeclareLocks(self, level):
8050 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
8051 74409b12 Iustin Pop
      self._LockInstancesNodes()
8052 e29e9550 Iustin Pop
      if self.op.disk_template and self.op.remote_node:
8053 e29e9550 Iustin Pop
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8054 e29e9550 Iustin Pop
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8055 a8083063 Iustin Pop
8056 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8057 a8083063 Iustin Pop
    """Build hooks env.
8058 a8083063 Iustin Pop

8059 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
8060 a8083063 Iustin Pop

8061 a8083063 Iustin Pop
    """
8062 396e1b78 Michael Hanselmann
    args = dict()
8063 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
8064 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
8065 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
8066 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8067 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8068 d8dcf3c9 Guido Trotter
    # information at all.
8069 d8dcf3c9 Guido Trotter
    if self.op.nics:
8070 d8dcf3c9 Guido Trotter
      args['nics'] = []
8071 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
8072 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
8073 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
8074 d8dcf3c9 Guido Trotter
        if idx in nic_override:
8075 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
8076 d8dcf3c9 Guido Trotter
        else:
8077 d8dcf3c9 Guido Trotter
          this_nic_override = {}
8078 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
8079 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
8080 d8dcf3c9 Guido Trotter
        else:
8081 d8dcf3c9 Guido Trotter
          ip = nic.ip
8082 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
8083 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
8084 d8dcf3c9 Guido Trotter
        else:
8085 d8dcf3c9 Guido Trotter
          mac = nic.mac
8086 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
8087 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
8088 62f0dd02 Guido Trotter
        else:
8089 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
8090 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8091 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8092 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8093 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
8094 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8095 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
8096 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
8097 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8098 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8099 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8100 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
8101 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
8102 d8dcf3c9 Guido Trotter
8103 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8104 e29e9550 Iustin Pop
    if self.op.disk_template:
8105 e29e9550 Iustin Pop
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8106 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8107 a8083063 Iustin Pop
    return env, nl, nl
8108 a8083063 Iustin Pop
8109 7e950d31 Iustin Pop
  @staticmethod
8110 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
8111 0329617a Guido Trotter
                        default_values, parameter_types):
8112 0329617a Guido Trotter
    """Return the new params dict for the given params.
8113 0329617a Guido Trotter

8114 0329617a Guido Trotter
    @type old_params: dict
8115 f2fd87d7 Iustin Pop
    @param old_params: old parameters
8116 0329617a Guido Trotter
    @type update_dict: dict
8117 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
8118 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
8119 f2fd87d7 Iustin Pop
                        parameter to its default value
8120 0329617a Guido Trotter
    @type default_values: dict
8121 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
8122 0329617a Guido Trotter
    @type parameter_types: dict
8123 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
8124 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
8125 0329617a Guido Trotter
    @rtype: (dict, dict)
8126 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
8127 0329617a Guido Trotter

8128 0329617a Guido Trotter
    """
8129 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
8130 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
8131 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
8132 0329617a Guido Trotter
        try:
8133 0329617a Guido Trotter
          del params_copy[key]
8134 0329617a Guido Trotter
        except KeyError:
8135 0329617a Guido Trotter
          pass
8136 0329617a Guido Trotter
      else:
8137 0329617a Guido Trotter
        params_copy[key] = val
8138 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
8139 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
8140 0329617a Guido Trotter
    return (params_copy, params_filled)
8141 0329617a Guido Trotter
8142 a8083063 Iustin Pop
  def CheckPrereq(self):
8143 a8083063 Iustin Pop
    """Check prerequisites.
8144 a8083063 Iustin Pop

8145 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
8146 a8083063 Iustin Pop

8147 a8083063 Iustin Pop
    """
8148 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
8149 a8083063 Iustin Pop
8150 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
8151 31a853d2 Iustin Pop
8152 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8153 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
8154 1a5c7281 Guido Trotter
    assert self.instance is not None, \
8155 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
8156 6b12959c Iustin Pop
    pnode = instance.primary_node
8157 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
8158 74409b12 Iustin Pop
8159 e29e9550 Iustin Pop
    if self.op.disk_template:
8160 e29e9550 Iustin Pop
      if instance.disk_template == self.op.disk_template:
8161 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Instance already has disk template %s" %
8162 e29e9550 Iustin Pop
                                   instance.disk_template, errors.ECODE_INVAL)
8163 e29e9550 Iustin Pop
8164 e29e9550 Iustin Pop
      if (instance.disk_template,
8165 e29e9550 Iustin Pop
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8166 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8167 e29e9550 Iustin Pop
                                   " %s to %s" % (instance.disk_template,
8168 e29e9550 Iustin Pop
                                                  self.op.disk_template),
8169 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8170 e29e9550 Iustin Pop
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8171 e29e9550 Iustin Pop
        _CheckNodeOnline(self, self.op.remote_node)
8172 e29e9550 Iustin Pop
        _CheckNodeNotDrained(self, self.op.remote_node)
8173 e29e9550 Iustin Pop
        disks = [{"size": d.size} for d in instance.disks]
8174 e29e9550 Iustin Pop
        required = _ComputeDiskSize(self.op.disk_template, disks)
8175 e29e9550 Iustin Pop
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8176 e29e9550 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot change disk template")
8177 e29e9550 Iustin Pop
8178 338e51e8 Iustin Pop
    # hvparams processing
8179 74409b12 Iustin Pop
    if self.op.hvparams:
8180 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
8181 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
8182 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
8183 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
8184 74409b12 Iustin Pop
      # local check
8185 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
8186 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
8187 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8188 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
8189 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
8190 338e51e8 Iustin Pop
    else:
8191 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
8192 338e51e8 Iustin Pop
8193 338e51e8 Iustin Pop
    # beparams processing
8194 338e51e8 Iustin Pop
    if self.op.beparams:
8195 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
8196 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
8197 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
8198 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
8199 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
8200 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
8201 338e51e8 Iustin Pop
    else:
8202 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
8203 74409b12 Iustin Pop
8204 cfefe007 Guido Trotter
    self.warn = []
8205 647a5d80 Iustin Pop
8206 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8207 647a5d80 Iustin Pop
      mem_check_list = [pnode]
8208 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8209 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
8210 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
8211 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8212 72737a7f Iustin Pop
                                                  instance.hypervisor)
8213 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8214 72737a7f Iustin Pop
                                         instance.hypervisor)
8215 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
8216 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
8217 070e998b Iustin Pop
      if msg:
8218 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
8219 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
8220 070e998b Iustin Pop
                         (pnode,  msg))
8221 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8222 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
8223 070e998b Iustin Pop
                         " free memory information" % pnode)
8224 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
8225 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
8226 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
8227 cfefe007 Guido Trotter
      else:
8228 7ad1af4a Iustin Pop
        if instance_info.payload:
8229 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
8230 cfefe007 Guido Trotter
        else:
8231 cfefe007 Guido Trotter
          # Assume instance not running
8232 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
8233 cfefe007 Guido Trotter
          # and we have no other way to check)
8234 cfefe007 Guido Trotter
          current_mem = 0
8235 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8236 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
8237 cfefe007 Guido Trotter
        if miss_mem > 0:
8238 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
8239 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
8240 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
8241 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
8242 cfefe007 Guido Trotter
8243 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8244 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
8245 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
8246 ea33068f Iustin Pop
            continue
8247 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
8248 070e998b Iustin Pop
          if msg:
8249 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
8250 070e998b Iustin Pop
                             (node, msg))
8251 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
8252 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
8253 070e998b Iustin Pop
                             " memory information" % node)
8254 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8255 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
8256 647a5d80 Iustin Pop
                             " secondary node %s" % node)
8257 5bc84f33 Alexander Schreiber
8258 24991749 Iustin Pop
    # NIC processing
8259 cd098c41 Guido Trotter
    self.nic_pnew = {}
8260 cd098c41 Guido Trotter
    self.nic_pinst = {}
8261 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8262 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8263 24991749 Iustin Pop
        if not instance.nics:
8264 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8265 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8266 24991749 Iustin Pop
        continue
8267 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
8268 24991749 Iustin Pop
        # an existing nic
8269 21bcb9aa Michael Hanselmann
        if not instance.nics:
8270 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8271 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
8272 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
8273 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
8274 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8275 24991749 Iustin Pop
                                     " are 0 to %d" %
8276 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
8277 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8278 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
8279 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
8280 cd098c41 Guido Trotter
      else:
8281 cd098c41 Guido Trotter
        old_nic_params = {}
8282 cd098c41 Guido Trotter
        old_nic_ip = None
8283 cd098c41 Guido Trotter
8284 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
8285 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
8286 cd098c41 Guido Trotter
                                 if key in nic_dict])
8287 cd098c41 Guido Trotter
8288 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
8289 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8290 cd098c41 Guido Trotter
8291 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
8292 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
8293 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
8294 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
8295 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8296 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
8297 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
8298 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8299 cd098c41 Guido Trotter
8300 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8301 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8302 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8303 35c0c8da Iustin Pop
        if msg:
8304 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8305 24991749 Iustin Pop
          if self.force:
8306 24991749 Iustin Pop
            self.warn.append(msg)
8307 24991749 Iustin Pop
          else:
8308 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8309 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8310 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
8311 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
8312 cd098c41 Guido Trotter
        else:
8313 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
8314 cd098c41 Guido Trotter
        if nic_ip is None:
8315 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8316 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
8317 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8318 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8319 5c44da6a Guido Trotter
        if nic_mac is None:
8320 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8321 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8322 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8323 5c44da6a Guido Trotter
          # otherwise generate the mac
8324 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8325 5c44da6a Guido Trotter
        else:
8326 5c44da6a Guido Trotter
          # or validate/reserve the current one
8327 36b66e6e Guido Trotter
          try:
8328 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8329 36b66e6e Guido Trotter
          except errors.ReservationError:
8330 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
8331 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
8332 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
8333 24991749 Iustin Pop
8334 24991749 Iustin Pop
    # DISK processing
8335 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8336 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
8337 5c983ee5 Iustin Pop
                                 " diskless instances",
8338 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8339 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
8340 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8341 24991749 Iustin Pop
        if len(instance.disks) == 1:
8342 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
8343 31624382 Iustin Pop
                                     " an instance", errors.ECODE_INVAL)
8344 31624382 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot remove disks")
8345 24991749 Iustin Pop
8346 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
8347 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
8348 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8349 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
8350 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
8351 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8352 24991749 Iustin Pop
        # an existing disk
8353 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
8354 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8355 24991749 Iustin Pop
                                     " are 0 to %d" %
8356 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
8357 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8358 24991749 Iustin Pop
8359 96b39bcc Iustin Pop
    # OS change
8360 96b39bcc Iustin Pop
    if self.op.os_name and not self.op.force:
8361 96b39bcc Iustin Pop
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8362 96b39bcc Iustin Pop
                      self.op.force_variant)
8363 96b39bcc Iustin Pop
8364 a8083063 Iustin Pop
    return
8365 a8083063 Iustin Pop
8366 e29e9550 Iustin Pop
  def _ConvertPlainToDrbd(self, feedback_fn):
8367 e29e9550 Iustin Pop
    """Converts an instance from plain to drbd.
8368 e29e9550 Iustin Pop

8369 e29e9550 Iustin Pop
    """
8370 e29e9550 Iustin Pop
    feedback_fn("Converting template to drbd")
8371 e29e9550 Iustin Pop
    instance = self.instance
8372 e29e9550 Iustin Pop
    pnode = instance.primary_node
8373 e29e9550 Iustin Pop
    snode = self.op.remote_node
8374 e29e9550 Iustin Pop
8375 e29e9550 Iustin Pop
    # create a fake disk info for _GenerateDiskTemplate
8376 e29e9550 Iustin Pop
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
8377 e29e9550 Iustin Pop
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
8378 e29e9550 Iustin Pop
                                      instance.name, pnode, [snode],
8379 e29e9550 Iustin Pop
                                      disk_info, None, None, 0)
8380 e29e9550 Iustin Pop
    info = _GetInstanceInfoText(instance)
8381 e29e9550 Iustin Pop
    feedback_fn("Creating aditional volumes...")
8382 e29e9550 Iustin Pop
    # first, create the missing data and meta devices
8383 e29e9550 Iustin Pop
    for disk in new_disks:
8384 e29e9550 Iustin Pop
      # unfortunately this is... not too nice
8385 e29e9550 Iustin Pop
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
8386 e29e9550 Iustin Pop
                            info, True)
8387 e29e9550 Iustin Pop
      for child in disk.children:
8388 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
8389 e29e9550 Iustin Pop
    # at this stage, all new LVs have been created, we can rename the
8390 e29e9550 Iustin Pop
    # old ones
8391 e29e9550 Iustin Pop
    feedback_fn("Renaming original volumes...")
8392 e29e9550 Iustin Pop
    rename_list = [(o, n.children[0].logical_id)
8393 e29e9550 Iustin Pop
                   for (o, n) in zip(instance.disks, new_disks)]
8394 e29e9550 Iustin Pop
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
8395 e29e9550 Iustin Pop
    result.Raise("Failed to rename original LVs")
8396 e29e9550 Iustin Pop
8397 e29e9550 Iustin Pop
    feedback_fn("Initializing DRBD devices...")
8398 e29e9550 Iustin Pop
    # all child devices are in place, we can now create the DRBD devices
8399 e29e9550 Iustin Pop
    for disk in new_disks:
8400 e29e9550 Iustin Pop
      for node in [pnode, snode]:
8401 e29e9550 Iustin Pop
        f_create = node == pnode
8402 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
8403 e29e9550 Iustin Pop
8404 e29e9550 Iustin Pop
    # at this point, the instance has been modified
8405 e29e9550 Iustin Pop
    instance.disk_template = constants.DT_DRBD8
8406 e29e9550 Iustin Pop
    instance.disks = new_disks
8407 e29e9550 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8408 e29e9550 Iustin Pop
8409 e29e9550 Iustin Pop
    # disks are created, waiting for sync
8410 e29e9550 Iustin Pop
    disk_abort = not _WaitForSync(self, instance)
8411 e29e9550 Iustin Pop
    if disk_abort:
8412 e29e9550 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
8413 e29e9550 Iustin Pop
                               " this instance, please cleanup manually")
8414 e29e9550 Iustin Pop
8415 2f414c48 Iustin Pop
  def _ConvertDrbdToPlain(self, feedback_fn):
8416 2f414c48 Iustin Pop
    """Converts an instance from drbd to plain.
8417 2f414c48 Iustin Pop

8418 2f414c48 Iustin Pop
    """
8419 2f414c48 Iustin Pop
    instance = self.instance
8420 2f414c48 Iustin Pop
    assert len(instance.secondary_nodes) == 1
8421 2f414c48 Iustin Pop
    pnode = instance.primary_node
8422 2f414c48 Iustin Pop
    snode = instance.secondary_nodes[0]
8423 2f414c48 Iustin Pop
    feedback_fn("Converting template to plain")
8424 2f414c48 Iustin Pop
8425 2f414c48 Iustin Pop
    old_disks = instance.disks
8426 2f414c48 Iustin Pop
    new_disks = [d.children[0] for d in old_disks]
8427 2f414c48 Iustin Pop
8428 2f414c48 Iustin Pop
    # copy over size and mode
8429 2f414c48 Iustin Pop
    for parent, child in zip(old_disks, new_disks):
8430 2f414c48 Iustin Pop
      child.size = parent.size
8431 2f414c48 Iustin Pop
      child.mode = parent.mode
8432 2f414c48 Iustin Pop
8433 2f414c48 Iustin Pop
    # update instance structure
8434 2f414c48 Iustin Pop
    instance.disks = new_disks
8435 2f414c48 Iustin Pop
    instance.disk_template = constants.DT_PLAIN
8436 2f414c48 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8437 2f414c48 Iustin Pop
8438 2f414c48 Iustin Pop
    feedback_fn("Removing volumes on the secondary node...")
8439 2f414c48 Iustin Pop
    for disk in old_disks:
8440 2f414c48 Iustin Pop
      self.cfg.SetDiskID(disk, snode)
8441 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
8442 2f414c48 Iustin Pop
      if msg:
8443 2f414c48 Iustin Pop
        self.LogWarning("Could not remove block device %s on node %s,"
8444 2f414c48 Iustin Pop
                        " continuing anyway: %s", disk.iv_name, snode, msg)
8445 2f414c48 Iustin Pop
8446 2f414c48 Iustin Pop
    feedback_fn("Removing unneeded volumes on the primary node...")
8447 2f414c48 Iustin Pop
    for idx, disk in enumerate(old_disks):
8448 2f414c48 Iustin Pop
      meta = disk.children[1]
8449 2f414c48 Iustin Pop
      self.cfg.SetDiskID(meta, pnode)
8450 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
8451 2f414c48 Iustin Pop
      if msg:
8452 2f414c48 Iustin Pop
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
8453 2f414c48 Iustin Pop
                        " continuing anyway: %s", idx, pnode, msg)
8454 2f414c48 Iustin Pop
8455 2f414c48 Iustin Pop
8456 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8457 a8083063 Iustin Pop
    """Modifies an instance.
8458 a8083063 Iustin Pop

8459 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8460 24991749 Iustin Pop

8461 a8083063 Iustin Pop
    """
8462 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8463 cfefe007 Guido Trotter
    # feedback_fn there.
8464 cfefe007 Guido Trotter
    for warn in self.warn:
8465 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8466 cfefe007 Guido Trotter
8467 a8083063 Iustin Pop
    result = []
8468 a8083063 Iustin Pop
    instance = self.instance
8469 24991749 Iustin Pop
    # disk changes
8470 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8471 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8472 24991749 Iustin Pop
        # remove the last disk
8473 24991749 Iustin Pop
        device = instance.disks.pop()
8474 24991749 Iustin Pop
        device_idx = len(instance.disks)
8475 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8476 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8477 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8478 e1bc0878 Iustin Pop
          if msg:
8479 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8480 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8481 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8482 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8483 24991749 Iustin Pop
        # add a new disk
8484 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8485 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8486 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8487 24991749 Iustin Pop
        else:
8488 24991749 Iustin Pop
          file_driver = file_path = None
8489 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8490 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8491 24991749 Iustin Pop
                                         instance.disk_template,
8492 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8493 24991749 Iustin Pop
                                         instance.secondary_nodes,
8494 24991749 Iustin Pop
                                         [disk_dict],
8495 24991749 Iustin Pop
                                         file_path,
8496 24991749 Iustin Pop
                                         file_driver,
8497 24991749 Iustin Pop
                                         disk_idx_base)[0]
8498 24991749 Iustin Pop
        instance.disks.append(new_disk)
8499 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8500 24991749 Iustin Pop
8501 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8502 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8503 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8504 24991749 Iustin Pop
        #HARDCODE
8505 428958aa Iustin Pop
        for node in instance.all_nodes:
8506 428958aa Iustin Pop
          f_create = node == instance.primary_node
8507 796cab27 Iustin Pop
          try:
8508 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8509 428958aa Iustin Pop
                            f_create, info, f_create)
8510 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8511 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8512 428958aa Iustin Pop
                            " node %s: %s",
8513 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8514 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8515 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8516 24991749 Iustin Pop
      else:
8517 24991749 Iustin Pop
        # change a given disk
8518 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8519 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8520 e29e9550 Iustin Pop
8521 e29e9550 Iustin Pop
    if self.op.disk_template:
8522 e29e9550 Iustin Pop
      r_shut = _ShutdownInstanceDisks(self, instance)
8523 e29e9550 Iustin Pop
      if not r_shut:
8524 e29e9550 Iustin Pop
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
8525 e29e9550 Iustin Pop
                                 " proceed with disk template conversion")
8526 e29e9550 Iustin Pop
      mode = (instance.disk_template, self.op.disk_template)
8527 e29e9550 Iustin Pop
      try:
8528 e29e9550 Iustin Pop
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
8529 e29e9550 Iustin Pop
      except:
8530 e29e9550 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
8531 e29e9550 Iustin Pop
        raise
8532 e29e9550 Iustin Pop
      result.append(("disk_template", self.op.disk_template))
8533 e29e9550 Iustin Pop
8534 24991749 Iustin Pop
    # NIC changes
8535 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8536 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8537 24991749 Iustin Pop
        # remove the last nic
8538 24991749 Iustin Pop
        del instance.nics[-1]
8539 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8540 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8541 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8542 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8543 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8544 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8545 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8546 24991749 Iustin Pop
        instance.nics.append(new_nic)
8547 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8548 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8549 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8550 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8551 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8552 cd098c41 Guido Trotter
                       )))
8553 24991749 Iustin Pop
      else:
8554 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8555 24991749 Iustin Pop
          if key in nic_dict:
8556 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8557 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8558 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8559 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8560 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8561 24991749 Iustin Pop
8562 24991749 Iustin Pop
    # hvparams changes
8563 74409b12 Iustin Pop
    if self.op.hvparams:
8564 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8565 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8566 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8567 24991749 Iustin Pop
8568 24991749 Iustin Pop
    # beparams changes
8569 338e51e8 Iustin Pop
    if self.op.beparams:
8570 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8571 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8572 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8573 a8083063 Iustin Pop
8574 96b39bcc Iustin Pop
    # OS change
8575 96b39bcc Iustin Pop
    if self.op.os_name:
8576 96b39bcc Iustin Pop
      instance.os = self.op.os_name
8577 96b39bcc Iustin Pop
8578 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8579 a8083063 Iustin Pop
8580 a8083063 Iustin Pop
    return result
8581 a8083063 Iustin Pop
8582 e29e9550 Iustin Pop
  _DISK_CONVERSIONS = {
8583 e29e9550 Iustin Pop
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
8584 2f414c48 Iustin Pop
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
8585 e29e9550 Iustin Pop
    }
8586 a8083063 Iustin Pop
8587 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8588 a8083063 Iustin Pop
  """Query the exports list
8589 a8083063 Iustin Pop

8590 a8083063 Iustin Pop
  """
8591 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8592 21a15682 Guido Trotter
  REQ_BGL = False
8593 21a15682 Guido Trotter
8594 21a15682 Guido Trotter
  def ExpandNames(self):
8595 21a15682 Guido Trotter
    self.needed_locks = {}
8596 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8597 21a15682 Guido Trotter
    if not self.op.nodes:
8598 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8599 21a15682 Guido Trotter
    else:
8600 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8601 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8602 a8083063 Iustin Pop
8603 a8083063 Iustin Pop
  def CheckPrereq(self):
8604 21a15682 Guido Trotter
    """Check prerequisites.
8605 a8083063 Iustin Pop

8606 a8083063 Iustin Pop
    """
8607 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8608 a8083063 Iustin Pop
8609 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8610 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8611 a8083063 Iustin Pop

8612 e4376078 Iustin Pop
    @rtype: dict
8613 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8614 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8615 e4376078 Iustin Pop
        that node.
8616 a8083063 Iustin Pop

8617 a8083063 Iustin Pop
    """
8618 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8619 b04285f2 Guido Trotter
    result = {}
8620 b04285f2 Guido Trotter
    for node in rpcresult:
8621 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8622 b04285f2 Guido Trotter
        result[node] = False
8623 b04285f2 Guido Trotter
      else:
8624 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8625 b04285f2 Guido Trotter
8626 b04285f2 Guido Trotter
    return result
8627 a8083063 Iustin Pop
8628 a8083063 Iustin Pop
8629 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8630 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8631 a8083063 Iustin Pop

8632 a8083063 Iustin Pop
  """
8633 a8083063 Iustin Pop
  HPATH = "instance-export"
8634 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8635 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8636 6657590e Guido Trotter
  REQ_BGL = False
8637 6657590e Guido Trotter
8638 17c3f802 Guido Trotter
  def CheckArguments(self):
8639 17c3f802 Guido Trotter
    """Check the arguments.
8640 17c3f802 Guido Trotter

8641 17c3f802 Guido Trotter
    """
8642 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8643 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8644 17c3f802 Guido Trotter
8645 6657590e Guido Trotter
  def ExpandNames(self):
8646 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8647 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8648 6657590e Guido Trotter
    #
8649 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8650 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8651 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8652 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8653 6657590e Guido Trotter
    #    then one to remove, after
8654 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8655 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8656 6657590e Guido Trotter
8657 6657590e Guido Trotter
  def DeclareLocks(self, level):
8658 6657590e Guido Trotter
    """Last minute lock declaration."""
8659 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8660 a8083063 Iustin Pop
8661 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8662 a8083063 Iustin Pop
    """Build hooks env.
8663 a8083063 Iustin Pop

8664 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8665 a8083063 Iustin Pop

8666 a8083063 Iustin Pop
    """
8667 a8083063 Iustin Pop
    env = {
8668 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8669 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8670 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8671 a8083063 Iustin Pop
      }
8672 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8673 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8674 a8083063 Iustin Pop
          self.op.target_node]
8675 a8083063 Iustin Pop
    return env, nl, nl
8676 a8083063 Iustin Pop
8677 a8083063 Iustin Pop
  def CheckPrereq(self):
8678 a8083063 Iustin Pop
    """Check prerequisites.
8679 a8083063 Iustin Pop

8680 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8681 a8083063 Iustin Pop

8682 a8083063 Iustin Pop
    """
8683 6657590e Guido Trotter
    instance_name = self.op.instance_name
8684 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8685 6657590e Guido Trotter
    assert self.instance is not None, \
8686 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8687 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8688 a8083063 Iustin Pop
8689 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8690 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8691 cf26a87a Iustin Pop
    assert self.dst_node is not None
8692 a8083063 Iustin Pop
8693 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8694 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8695 a8083063 Iustin Pop
8696 b6023d6c Manuel Franceschini
    # instance disk type verification
8697 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8698 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8699 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8700 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8701 b6023d6c Manuel Franceschini
8702 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8703 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8704 a8083063 Iustin Pop

8705 a8083063 Iustin Pop
    """
8706 a8083063 Iustin Pop
    instance = self.instance
8707 a8083063 Iustin Pop
    dst_node = self.dst_node
8708 a8083063 Iustin Pop
    src_node = instance.primary_node
8709 37972df0 Michael Hanselmann
8710 a8083063 Iustin Pop
    if self.op.shutdown:
8711 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8712 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8713 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8714 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8715 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8716 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8717 a8083063 Iustin Pop
8718 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
8719 a8083063 Iustin Pop
8720 a8083063 Iustin Pop
    snap_disks = []
8721 a8083063 Iustin Pop
8722 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8723 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8724 998c712c Iustin Pop
    for disk in instance.disks:
8725 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8726 998c712c Iustin Pop
8727 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8728 3e53a60b Michael Hanselmann
8729 3e53a60b Michael Hanselmann
    if activate_disks:
8730 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8731 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8732 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8733 3e53a60b Michael Hanselmann
8734 a8083063 Iustin Pop
    try:
8735 3e53a60b Michael Hanselmann
      # per-disk results
8736 3e53a60b Michael Hanselmann
      dresults = []
8737 3e53a60b Michael Hanselmann
      try:
8738 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
8739 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8740 3e53a60b Michael Hanselmann
                      (idx, src_node))
8741 3e53a60b Michael Hanselmann
8742 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
8743 3e53a60b Michael Hanselmann
          # passed
8744 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8745 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8746 3e53a60b Michael Hanselmann
          if msg:
8747 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8748 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
8749 3e53a60b Michael Hanselmann
            snap_disks.append(False)
8750 3e53a60b Michael Hanselmann
          else:
8751 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
8752 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8753 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
8754 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
8755 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
8756 37972df0 Michael Hanselmann
8757 3e53a60b Michael Hanselmann
      finally:
8758 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
8759 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
8760 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8761 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8762 3e53a60b Michael Hanselmann
          if msg:
8763 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
8764 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
8765 3e53a60b Michael Hanselmann
8766 3e53a60b Michael Hanselmann
      # TODO: check for size
8767 3e53a60b Michael Hanselmann
8768 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
8769 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
8770 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
8771 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
8772 3e53a60b Michael Hanselmann
        if dev:
8773 4a0e011f Iustin Pop
          # FIXME: pass debug from opcode to backend
8774 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8775 4a0e011f Iustin Pop
                                                 instance, cluster_name,
8776 dd713605 Iustin Pop
                                                 idx, self.op.debug_level)
8777 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8778 3e53a60b Michael Hanselmann
          if msg:
8779 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
8780 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8781 3e53a60b Michael Hanselmann
            dresults.append(False)
8782 3e53a60b Michael Hanselmann
          else:
8783 3e53a60b Michael Hanselmann
            dresults.append(True)
8784 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8785 3e53a60b Michael Hanselmann
          if msg:
8786 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8787 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8788 19d7f90a Guido Trotter
        else:
8789 084f05a5 Iustin Pop
          dresults.append(False)
8790 a8083063 Iustin Pop
8791 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8792 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8793 3e53a60b Michael Hanselmann
                                             snap_disks)
8794 3e53a60b Michael Hanselmann
      fin_resu = True
8795 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8796 3e53a60b Michael Hanselmann
      if msg:
8797 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8798 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8799 3e53a60b Michael Hanselmann
        fin_resu = False
8800 3e53a60b Michael Hanselmann
8801 3e53a60b Michael Hanselmann
    finally:
8802 3e53a60b Michael Hanselmann
      if activate_disks:
8803 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8804 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8805 a8083063 Iustin Pop
8806 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8807 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8808 a8083063 Iustin Pop
8809 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8810 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8811 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8812 35fbcd11 Iustin Pop
    iname = instance.name
8813 a8083063 Iustin Pop
    if nodelist:
8814 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8815 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8816 a8083063 Iustin Pop
      for node in exportlist:
8817 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8818 781de953 Iustin Pop
          continue
8819 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8820 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8821 35fbcd11 Iustin Pop
          if msg:
8822 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8823 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8824 084f05a5 Iustin Pop
    return fin_resu, dresults
8825 5c947f38 Iustin Pop
8826 5c947f38 Iustin Pop
8827 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8828 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8829 9ac99fda Guido Trotter

8830 9ac99fda Guido Trotter
  """
8831 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
8832 3656b3af Guido Trotter
  REQ_BGL = False
8833 3656b3af Guido Trotter
8834 3656b3af Guido Trotter
  def ExpandNames(self):
8835 3656b3af Guido Trotter
    self.needed_locks = {}
8836 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
8837 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
8838 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
8839 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8840 9ac99fda Guido Trotter
8841 9ac99fda Guido Trotter
  def CheckPrereq(self):
8842 9ac99fda Guido Trotter
    """Check prerequisites.
8843 9ac99fda Guido Trotter
    """
8844 9ac99fda Guido Trotter
    pass
8845 9ac99fda Guido Trotter
8846 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
8847 9ac99fda Guido Trotter
    """Remove any export.
8848 9ac99fda Guido Trotter

8849 9ac99fda Guido Trotter
    """
8850 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8851 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
8852 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
8853 9ac99fda Guido Trotter
    fqdn_warn = False
8854 9ac99fda Guido Trotter
    if not instance_name:
8855 9ac99fda Guido Trotter
      fqdn_warn = True
8856 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
8857 9ac99fda Guido Trotter
8858 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8859 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
8860 9ac99fda Guido Trotter
    found = False
8861 9ac99fda Guido Trotter
    for node in exportlist:
8862 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
8863 1b7bfbb7 Iustin Pop
      if msg:
8864 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8865 781de953 Iustin Pop
        continue
8866 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
8867 9ac99fda Guido Trotter
        found = True
8868 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
8869 4c4e4e1e Iustin Pop
        msg = result.fail_msg
8870 35fbcd11 Iustin Pop
        if msg:
8871 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
8872 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
8873 9ac99fda Guido Trotter
8874 9ac99fda Guido Trotter
    if fqdn_warn and not found:
8875 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
8876 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
8877 9ac99fda Guido Trotter
                  " Domain Name.")
8878 9ac99fda Guido Trotter
8879 9ac99fda Guido Trotter
8880 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
8881 5c947f38 Iustin Pop
  """Generic tags LU.
8882 5c947f38 Iustin Pop

8883 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
8884 5c947f38 Iustin Pop

8885 5c947f38 Iustin Pop
  """
8886 5c947f38 Iustin Pop
8887 8646adce Guido Trotter
  def ExpandNames(self):
8888 8646adce Guido Trotter
    self.needed_locks = {}
8889 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
8890 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
8891 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
8892 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
8893 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
8894 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
8895 8646adce Guido Trotter
8896 8646adce Guido Trotter
  def CheckPrereq(self):
8897 8646adce Guido Trotter
    """Check prerequisites.
8898 8646adce Guido Trotter

8899 8646adce Guido Trotter
    """
8900 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
8901 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
8902 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
8903 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
8904 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
8905 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8906 5c947f38 Iustin Pop
    else:
8907 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8908 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
8909 5c947f38 Iustin Pop
8910 5c947f38 Iustin Pop
8911 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
8912 5c947f38 Iustin Pop
  """Returns the tags of a given object.
8913 5c947f38 Iustin Pop

8914 5c947f38 Iustin Pop
  """
8915 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
8916 8646adce Guido Trotter
  REQ_BGL = False
8917 5c947f38 Iustin Pop
8918 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8919 5c947f38 Iustin Pop
    """Returns the tag list.
8920 5c947f38 Iustin Pop

8921 5c947f38 Iustin Pop
    """
8922 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8923 5c947f38 Iustin Pop
8924 5c947f38 Iustin Pop
8925 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8926 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8927 73415719 Iustin Pop

8928 73415719 Iustin Pop
  """
8929 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8930 8646adce Guido Trotter
  REQ_BGL = False
8931 8646adce Guido Trotter
8932 8646adce Guido Trotter
  def ExpandNames(self):
8933 8646adce Guido Trotter
    self.needed_locks = {}
8934 73415719 Iustin Pop
8935 73415719 Iustin Pop
  def CheckPrereq(self):
8936 73415719 Iustin Pop
    """Check prerequisites.
8937 73415719 Iustin Pop

8938 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8939 73415719 Iustin Pop

8940 73415719 Iustin Pop
    """
8941 73415719 Iustin Pop
    try:
8942 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8943 73415719 Iustin Pop
    except re.error, err:
8944 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8945 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
8946 73415719 Iustin Pop
8947 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8948 73415719 Iustin Pop
    """Returns the tag list.
8949 73415719 Iustin Pop

8950 73415719 Iustin Pop
    """
8951 73415719 Iustin Pop
    cfg = self.cfg
8952 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8953 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8954 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8955 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8956 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8957 73415719 Iustin Pop
    results = []
8958 73415719 Iustin Pop
    for path, target in tgts:
8959 73415719 Iustin Pop
      for tag in target.GetTags():
8960 73415719 Iustin Pop
        if self.re.search(tag):
8961 73415719 Iustin Pop
          results.append((path, tag))
8962 73415719 Iustin Pop
    return results
8963 73415719 Iustin Pop
8964 73415719 Iustin Pop
8965 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8966 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8967 5c947f38 Iustin Pop

8968 5c947f38 Iustin Pop
  """
8969 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8970 8646adce Guido Trotter
  REQ_BGL = False
8971 5c947f38 Iustin Pop
8972 5c947f38 Iustin Pop
  def CheckPrereq(self):
8973 5c947f38 Iustin Pop
    """Check prerequisites.
8974 5c947f38 Iustin Pop

8975 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8976 5c947f38 Iustin Pop

8977 5c947f38 Iustin Pop
    """
8978 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8979 f27302fa Iustin Pop
    for tag in self.op.tags:
8980 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8981 5c947f38 Iustin Pop
8982 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8983 5c947f38 Iustin Pop
    """Sets the tag.
8984 5c947f38 Iustin Pop

8985 5c947f38 Iustin Pop
    """
8986 5c947f38 Iustin Pop
    try:
8987 f27302fa Iustin Pop
      for tag in self.op.tags:
8988 f27302fa Iustin Pop
        self.target.AddTag(tag)
8989 5c947f38 Iustin Pop
    except errors.TagError, err:
8990 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8991 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8992 5c947f38 Iustin Pop
8993 5c947f38 Iustin Pop
8994 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8995 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8996 5c947f38 Iustin Pop

8997 5c947f38 Iustin Pop
  """
8998 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8999 8646adce Guido Trotter
  REQ_BGL = False
9000 5c947f38 Iustin Pop
9001 5c947f38 Iustin Pop
  def CheckPrereq(self):
9002 5c947f38 Iustin Pop
    """Check prerequisites.
9003 5c947f38 Iustin Pop

9004 5c947f38 Iustin Pop
    This checks that we have the given tag.
9005 5c947f38 Iustin Pop

9006 5c947f38 Iustin Pop
    """
9007 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9008 f27302fa Iustin Pop
    for tag in self.op.tags:
9009 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9010 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
9011 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
9012 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
9013 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
9014 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
9015 f27302fa Iustin Pop
      diff_names.sort()
9016 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
9017 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9018 5c947f38 Iustin Pop
9019 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9020 5c947f38 Iustin Pop
    """Remove the tag from the object.
9021 5c947f38 Iustin Pop

9022 5c947f38 Iustin Pop
    """
9023 f27302fa Iustin Pop
    for tag in self.op.tags:
9024 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
9025 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9026 06009e27 Iustin Pop
9027 0eed6e61 Guido Trotter
9028 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
9029 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
9030 06009e27 Iustin Pop

9031 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
9032 06009e27 Iustin Pop
  time.
9033 06009e27 Iustin Pop

9034 06009e27 Iustin Pop
  """
9035 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
9036 fbe9022f Guido Trotter
  REQ_BGL = False
9037 06009e27 Iustin Pop
9038 fbe9022f Guido Trotter
  def ExpandNames(self):
9039 fbe9022f Guido Trotter
    """Expand names and set required locks.
9040 06009e27 Iustin Pop

9041 fbe9022f Guido Trotter
    This expands the node list, if any.
9042 06009e27 Iustin Pop

9043 06009e27 Iustin Pop
    """
9044 fbe9022f Guido Trotter
    self.needed_locks = {}
9045 06009e27 Iustin Pop
    if self.op.on_nodes:
9046 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
9047 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9048 fbe9022f Guido Trotter
      # more information.
9049 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9050 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9051 fbe9022f Guido Trotter
9052 fbe9022f Guido Trotter
  def CheckPrereq(self):
9053 fbe9022f Guido Trotter
    """Check prerequisites.
9054 fbe9022f Guido Trotter

9055 fbe9022f Guido Trotter
    """
9056 06009e27 Iustin Pop
9057 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
9058 06009e27 Iustin Pop
    """Do the actual sleep.
9059 06009e27 Iustin Pop

9060 06009e27 Iustin Pop
    """
9061 06009e27 Iustin Pop
    if self.op.on_master:
9062 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
9063 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
9064 06009e27 Iustin Pop
    if self.op.on_nodes:
9065 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9066 06009e27 Iustin Pop
      for node, node_result in result.items():
9067 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
9068 d61df03e Iustin Pop
9069 d61df03e Iustin Pop
9070 d1c2dd75 Iustin Pop
class IAllocator(object):
9071 d1c2dd75 Iustin Pop
  """IAllocator framework.
9072 d61df03e Iustin Pop

9073 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
9074 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
9075 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
9076 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
9077 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
9078 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
9079 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
9080 d1c2dd75 Iustin Pop
      easy usage
9081 d61df03e Iustin Pop

9082 d61df03e Iustin Pop
  """
9083 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
9084 7260cfbe Iustin Pop
  # lots of instance attributes
9085 29859cb7 Iustin Pop
  _ALLO_KEYS = [
9086 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
9087 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
9088 d1c2dd75 Iustin Pop
    ]
9089 29859cb7 Iustin Pop
  _RELO_KEYS = [
9090 8d3f86a0 Iustin Pop
    "name", "relocate_from",
9091 29859cb7 Iustin Pop
    ]
9092 7f60a422 Iustin Pop
  _EVAC_KEYS = [
9093 7f60a422 Iustin Pop
    "evac_nodes",
9094 7f60a422 Iustin Pop
    ]
9095 d1c2dd75 Iustin Pop
9096 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
9097 923ddac0 Michael Hanselmann
    self.cfg = cfg
9098 923ddac0 Michael Hanselmann
    self.rpc = rpc
9099 d1c2dd75 Iustin Pop
    # init buffer variables
9100 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
9101 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
9102 29859cb7 Iustin Pop
    self.mode = mode
9103 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
9104 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
9105 a0add446 Iustin Pop
    self.hypervisor = None
9106 29859cb7 Iustin Pop
    self.relocate_from = None
9107 8d3f86a0 Iustin Pop
    self.name = None
9108 7f60a422 Iustin Pop
    self.evac_nodes = None
9109 27579978 Iustin Pop
    # computed fields
9110 27579978 Iustin Pop
    self.required_nodes = None
9111 d1c2dd75 Iustin Pop
    # init result fields
9112 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
9113 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9114 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
9115 9757cc90 Iustin Pop
      fn = self._AddNewInstance
9116 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9117 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
9118 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
9119 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9120 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
9121 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
9122 29859cb7 Iustin Pop
    else:
9123 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
9124 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
9125 d1c2dd75 Iustin Pop
    for key in kwargs:
9126 29859cb7 Iustin Pop
      if key not in keyset:
9127 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
9128 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9129 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
9130 7f60a422 Iustin Pop
9131 29859cb7 Iustin Pop
    for key in keyset:
9132 d1c2dd75 Iustin Pop
      if key not in kwargs:
9133 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
9134 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9135 9757cc90 Iustin Pop
    self._BuildInputData(fn)
9136 d1c2dd75 Iustin Pop
9137 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
9138 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
9139 d1c2dd75 Iustin Pop

9140 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
9141 d1c2dd75 Iustin Pop

9142 d1c2dd75 Iustin Pop
    """
9143 923ddac0 Michael Hanselmann
    cfg = self.cfg
9144 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
9145 d1c2dd75 Iustin Pop
    # cluster data
9146 d1c2dd75 Iustin Pop
    data = {
9147 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
9148 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
9149 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
9150 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
9151 d1c2dd75 Iustin Pop
      # we don't have job IDs
9152 d61df03e Iustin Pop
      }
9153 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
9154 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
9155 6286519f Iustin Pop
9156 d1c2dd75 Iustin Pop
    # node data
9157 d1c2dd75 Iustin Pop
    node_results = {}
9158 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
9159 8cc7e742 Guido Trotter
9160 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9161 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
9162 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9163 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
9164 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9165 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
9166 8cc7e742 Guido Trotter
9167 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
9168 923ddac0 Michael Hanselmann
                                        hypervisor_name)
9169 923ddac0 Michael Hanselmann
    node_iinfo = \
9170 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
9171 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
9172 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
9173 1325da74 Iustin Pop
      # first fill in static (config-based) values
9174 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
9175 d1c2dd75 Iustin Pop
      pnr = {
9176 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
9177 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
9178 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
9179 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
9180 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
9181 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
9182 d1c2dd75 Iustin Pop
        }
9183 1325da74 Iustin Pop
9184 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
9185 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
9186 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
9187 4c4e4e1e Iustin Pop
                                nname)
9188 070e998b Iustin Pop
        remote_info = nresult.payload
9189 b142ef15 Iustin Pop
9190 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
9191 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
9192 1325da74 Iustin Pop
          if attr not in remote_info:
9193 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
9194 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
9195 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
9196 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
9197 070e998b Iustin Pop
                                     " for '%s': %s" %
9198 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
9199 1325da74 Iustin Pop
        # compute memory used by primary instances
9200 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
9201 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
9202 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
9203 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
9204 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
9205 1325da74 Iustin Pop
              i_used_mem = 0
9206 1325da74 Iustin Pop
            else:
9207 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
9208 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
9209 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
9210 1325da74 Iustin Pop
9211 1325da74 Iustin Pop
            if iinfo.admin_up:
9212 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
9213 1325da74 Iustin Pop
9214 1325da74 Iustin Pop
        # compute memory used by instances
9215 1325da74 Iustin Pop
        pnr_dyn = {
9216 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
9217 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
9218 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
9219 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
9220 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
9221 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
9222 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
9223 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
9224 1325da74 Iustin Pop
          }
9225 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
9226 1325da74 Iustin Pop
9227 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
9228 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
9229 d1c2dd75 Iustin Pop
9230 d1c2dd75 Iustin Pop
    # instance data
9231 d1c2dd75 Iustin Pop
    instance_data = {}
9232 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
9233 a9fe7e8f Guido Trotter
      nic_data = []
9234 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
9235 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
9236 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
9237 a9fe7e8f Guido Trotter
            nic.nicparams)
9238 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
9239 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
9240 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
9241 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
9242 a9fe7e8f Guido Trotter
                   }
9243 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
9244 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
9245 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
9246 d1c2dd75 Iustin Pop
      pir = {
9247 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
9248 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
9249 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
9250 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
9251 d1c2dd75 Iustin Pop
        "os": iinfo.os,
9252 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
9253 d1c2dd75 Iustin Pop
        "nics": nic_data,
9254 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
9255 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
9256 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
9257 d1c2dd75 Iustin Pop
        }
9258 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
9259 88ae4f85 Iustin Pop
                                                 pir["disks"])
9260 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
9261 d61df03e Iustin Pop
9262 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
9263 d61df03e Iustin Pop
9264 d1c2dd75 Iustin Pop
    self.in_data = data
9265 d61df03e Iustin Pop
9266 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
9267 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
9268 d61df03e Iustin Pop

9269 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
9270 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9271 d61df03e Iustin Pop

9272 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9273 d1c2dd75 Iustin Pop
    done.
9274 d61df03e Iustin Pop

9275 d1c2dd75 Iustin Pop
    """
9276 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
9277 d1c2dd75 Iustin Pop
9278 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
9279 27579978 Iustin Pop
      self.required_nodes = 2
9280 27579978 Iustin Pop
    else:
9281 27579978 Iustin Pop
      self.required_nodes = 1
9282 d1c2dd75 Iustin Pop
    request = {
9283 d1c2dd75 Iustin Pop
      "name": self.name,
9284 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
9285 d1c2dd75 Iustin Pop
      "tags": self.tags,
9286 d1c2dd75 Iustin Pop
      "os": self.os,
9287 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
9288 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
9289 d1c2dd75 Iustin Pop
      "disks": self.disks,
9290 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
9291 d1c2dd75 Iustin Pop
      "nics": self.nics,
9292 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9293 d1c2dd75 Iustin Pop
      }
9294 9757cc90 Iustin Pop
    return request
9295 298fe380 Iustin Pop
9296 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
9297 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
9298 298fe380 Iustin Pop

9299 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
9300 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9301 d61df03e Iustin Pop

9302 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9303 d1c2dd75 Iustin Pop
    done.
9304 d61df03e Iustin Pop

9305 d1c2dd75 Iustin Pop
    """
9306 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
9307 27579978 Iustin Pop
    if instance is None:
9308 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
9309 27579978 Iustin Pop
                                   " IAllocator" % self.name)
9310 27579978 Iustin Pop
9311 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
9312 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
9313 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
9314 27579978 Iustin Pop
9315 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
9316 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
9317 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
9318 2a139bb0 Iustin Pop
9319 27579978 Iustin Pop
    self.required_nodes = 1
9320 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
9321 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
9322 27579978 Iustin Pop
9323 d1c2dd75 Iustin Pop
    request = {
9324 d1c2dd75 Iustin Pop
      "name": self.name,
9325 27579978 Iustin Pop
      "disk_space_total": disk_space,
9326 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9327 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
9328 d1c2dd75 Iustin Pop
      }
9329 9757cc90 Iustin Pop
    return request
9330 d61df03e Iustin Pop
9331 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
9332 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
9333 7f60a422 Iustin Pop

9334 7f60a422 Iustin Pop
    """
9335 7f60a422 Iustin Pop
    request = {
9336 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
9337 7f60a422 Iustin Pop
      }
9338 7f60a422 Iustin Pop
    return request
9339 7f60a422 Iustin Pop
9340 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
9341 d1c2dd75 Iustin Pop
    """Build input data structures.
9342 d61df03e Iustin Pop

9343 d1c2dd75 Iustin Pop
    """
9344 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
9345 d61df03e Iustin Pop
9346 9757cc90 Iustin Pop
    request = fn()
9347 9757cc90 Iustin Pop
    request["type"] = self.mode
9348 9757cc90 Iustin Pop
    self.in_data["request"] = request
9349 d61df03e Iustin Pop
9350 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
9351 d61df03e Iustin Pop
9352 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
9353 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
9354 298fe380 Iustin Pop

9355 d1c2dd75 Iustin Pop
    """
9356 72737a7f Iustin Pop
    if call_fn is None:
9357 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
9358 298fe380 Iustin Pop
9359 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
9360 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
9361 8d528b7c Iustin Pop
9362 87f5c298 Iustin Pop
    self.out_text = result.payload
9363 d1c2dd75 Iustin Pop
    if validate:
9364 d1c2dd75 Iustin Pop
      self._ValidateResult()
9365 298fe380 Iustin Pop
9366 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
9367 d1c2dd75 Iustin Pop
    """Process the allocator results.
9368 538475ca Iustin Pop

9369 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
9370 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
9371 538475ca Iustin Pop

9372 d1c2dd75 Iustin Pop
    """
9373 d1c2dd75 Iustin Pop
    try:
9374 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
9375 d1c2dd75 Iustin Pop
    except Exception, err:
9376 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
9377 d1c2dd75 Iustin Pop
9378 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
9379 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
9380 538475ca Iustin Pop
9381 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
9382 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
9383 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
9384 680f0a89 Iustin Pop
      del rdict["nodes"]
9385 680f0a89 Iustin Pop
9386 680f0a89 Iustin Pop
    for key in "success", "info", "result":
9387 d1c2dd75 Iustin Pop
      if key not in rdict:
9388 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
9389 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
9390 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
9391 538475ca Iustin Pop
9392 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
9393 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
9394 d1c2dd75 Iustin Pop
                               " is not a list")
9395 d1c2dd75 Iustin Pop
    self.out_data = rdict
9396 538475ca Iustin Pop
9397 538475ca Iustin Pop
9398 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
9399 d61df03e Iustin Pop
  """Run allocator tests.
9400 d61df03e Iustin Pop

9401 d61df03e Iustin Pop
  This LU runs the allocator tests
9402 d61df03e Iustin Pop

9403 d61df03e Iustin Pop
  """
9404 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
9405 d61df03e Iustin Pop
9406 d61df03e Iustin Pop
  def CheckPrereq(self):
9407 d61df03e Iustin Pop
    """Check prerequisites.
9408 d61df03e Iustin Pop

9409 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
9410 d61df03e Iustin Pop

9411 d61df03e Iustin Pop
    """
9412 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9413 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
9414 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
9415 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
9416 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
9417 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
9418 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
9419 d61df03e Iustin Pop
      if iname is not None:
9420 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
9421 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
9422 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
9423 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
9424 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9425 d61df03e Iustin Pop
      for row in self.op.nics:
9426 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9427 d61df03e Iustin Pop
            "mac" not in row or
9428 d61df03e Iustin Pop
            "ip" not in row or
9429 d61df03e Iustin Pop
            "bridge" not in row):
9430 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
9431 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9432 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
9433 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
9434 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9435 d61df03e Iustin Pop
      for row in self.op.disks:
9436 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9437 d61df03e Iustin Pop
            "size" not in row or
9438 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
9439 d61df03e Iustin Pop
            "mode" not in row or
9440 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
9441 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
9442 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9443 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
9444 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
9445 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9446 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
9447 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
9448 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9449 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
9450 d61df03e Iustin Pop
      self.op.name = fname
9451 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
9452 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9453 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
9454 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
9455 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
9456 d61df03e Iustin Pop
    else:
9457 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
9458 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
9459 d61df03e Iustin Pop
9460 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
9461 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
9462 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
9463 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9464 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
9465 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
9466 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
9467 d61df03e Iustin Pop
9468 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
9469 d61df03e Iustin Pop
    """Run the allocator test.
9470 d61df03e Iustin Pop

9471 d61df03e Iustin Pop
    """
9472 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9473 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9474 29859cb7 Iustin Pop
                       mode=self.op.mode,
9475 29859cb7 Iustin Pop
                       name=self.op.name,
9476 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
9477 29859cb7 Iustin Pop
                       disks=self.op.disks,
9478 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
9479 29859cb7 Iustin Pop
                       os=self.op.os,
9480 29859cb7 Iustin Pop
                       tags=self.op.tags,
9481 29859cb7 Iustin Pop
                       nics=self.op.nics,
9482 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9483 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9484 29859cb7 Iustin Pop
                       )
9485 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9486 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9487 29859cb7 Iustin Pop
                       mode=self.op.mode,
9488 29859cb7 Iustin Pop
                       name=self.op.name,
9489 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9490 29859cb7 Iustin Pop
                       )
9491 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9492 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9493 823a72bc Iustin Pop
                       mode=self.op.mode,
9494 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9495 823a72bc Iustin Pop
    else:
9496 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9497 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9498 d61df03e Iustin Pop
9499 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9500 d1c2dd75 Iustin Pop
      result = ial.in_text
9501 298fe380 Iustin Pop
    else:
9502 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9503 d1c2dd75 Iustin Pop
      result = ial.out_text
9504 298fe380 Iustin Pop
    return result